repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Im2Hands | Im2Hands-main/im2mesh/common.py | # import multiprocessing
import torch
from im2mesh.utils.libkdtree import KDTree
import numpy as np
def compute_iou(occ1, occ2):
''' Computes the Intersection over Union (IoU) value for two sets of
occupancy values.
Args:
occ1 (tensor): first set of occupancy values
occ2 (tensor): second set of occupancy values
'''
occ1 = np.asarray(occ1)
occ2 = np.asarray(occ2)
# Put all data in second dimension
# Also works for 1-dimensional data
if occ1.ndim >= 2:
occ1 = occ1.reshape(occ1.shape[0], -1)
if occ2.ndim >= 2:
occ2 = occ2.reshape(occ2.shape[0], -1)
# Convert to boolean values
occ1 = (occ1 >= 0.5)
occ2 = (occ2 >= 0.5)
# Compute IOU
area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1)
area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1)
iou = (area_intersect / area_union)
return iou
def chamfer_distance(points1, points2, use_kdtree=True, give_id=False):
''' Returns the chamfer distance for the sets of points.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
use_kdtree (bool): whether to use a kdtree
give_id (bool): whether to return the IDs of nearest points
'''
if use_kdtree:
return chamfer_distance_kdtree(points1, points2, give_id=give_id)
else:
return chamfer_distance_naive(points1, points2)
def chamfer_distance_naive(points1, points2):
''' Naive implementation of the Chamfer distance.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
'''
assert(points1.size() == points2.size())
batch_size, T, _ = points1.size()
points1 = points1.view(batch_size, T, 1, 3)
points2 = points2.view(batch_size, 1, T, 3)
distances = (points1 - points2).pow(2).sum(-1)
chamfer1 = distances.min(dim=1)[0].mean(dim=1)
chamfer2 = distances.min(dim=2)[0].mean(dim=1)
chamfer = chamfer1 + chamfer2
return chamfer
def chamfer_distance_kdtree(points1, points2, give_id=False):
''' KD-tree based implementation of the Chamfer distance.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
give_id (bool): whether to return the IDs of the nearest points
'''
# Points have size batch_size x T x 3
batch_size = points1.size(0)
# First convert points to numpy
points1_np = points1.detach().cpu().numpy()
points2_np = points2.detach().cpu().numpy()
# Get list of nearest neighbors indieces
idx_nn_12, _ = get_nearest_neighbors_indices_batch(points1_np, points2_np)
idx_nn_12 = torch.LongTensor(idx_nn_12).to(points1.device)
# Expands it as batch_size x 1 x 3
idx_nn_12_expand = idx_nn_12.view(batch_size, -1, 1).expand_as(points1)
# Get list of nearest neighbors indieces
idx_nn_21, _ = get_nearest_neighbors_indices_batch(points2_np, points1_np)
idx_nn_21 = torch.LongTensor(idx_nn_21).to(points1.device)
# Expands it as batch_size x T x 3
idx_nn_21_expand = idx_nn_21.view(batch_size, -1, 1).expand_as(points2)
# Compute nearest neighbors in points2 to points in points1
# points_12[i, j, k] = points2[i, idx_nn_12_expand[i, j, k], k]
points_12 = torch.gather(points2, dim=1, index=idx_nn_12_expand)
# Compute nearest neighbors in points1 to points in points2
# points_21[i, j, k] = points2[i, idx_nn_21_expand[i, j, k], k]
points_21 = torch.gather(points1, dim=1, index=idx_nn_21_expand)
# Compute chamfer distance
chamfer1 = (points1 - points_12).pow(2).sum(2).mean(1)
chamfer2 = (points2 - points_21).pow(2).sum(2).mean(1)
# Take sum
chamfer = chamfer1 + chamfer2
# If required, also return nearest neighbors
if give_id:
return chamfer1, chamfer2, idx_nn_12, idx_nn_21
return chamfer
def get_nearest_neighbors_indices_batch(points_src, points_tgt, k=1):
''' Returns the nearest neighbors for point sets batchwise.
Args:
points_src (numpy array): source points
points_tgt (numpy array): target points
k (int): number of nearest neighbors to return
'''
indices = []
distances = []
for (p1, p2) in zip(points_src, points_tgt):
kdtree = KDTree(p2)
dist, idx = kdtree.query(p1, k=k)
indices.append(idx)
distances.append(dist)
return indices, distances
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
def make_3d_grid(bb_min, bb_max, shape):
''' Makes a 3D grid.
Args:
bb_min (tuple): bounding box minimum
bb_max (tuple): bounding box maximum
shape (tuple): output shape
'''
size = shape[0] * shape[1] * shape[2]
pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])
pys = torch.linspace(bb_min[1], bb_max[1], shape[1])
pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])
pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)
pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)
pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)
p = torch.stack([pxs, pys, pzs], dim=1)
return p
def transform_points(points, transform):
''' Transforms points with regard to passed camera information.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
assert(points.size(2) == 3)
assert(transform.size(1) == 3)
assert(points.size(0) == transform.size(0))
if transform.size(2) == 4:
R = transform[:, :, :3]
t = transform[:, :, 3:]
points_out = points @ R.transpose(1, 2) + t.transpose(1, 2)
elif transform.size(2) == 3:
K = transform
points_out = points @ K.transpose(1, 2)
return points_out
def b_inv(b_mat):
''' Performs batch matrix inversion.
Arguments:
b_mat: the batch of matrices that should be inverted
'''
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def transform_points_back(points, transform):
''' Inverts the transformation.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
assert(points.size(2) == 3)
assert(transform.size(1) == 3)
assert(points.size(0) == transform.size(0))
if transform.size(2) == 4:
R = transform[:, :, :3]
t = transform[:, :, 3:]
points_out = points - t.transpose(1, 2)
points_out = points_out @ b_inv(R.transpose(1, 2))
elif transform.size(2) == 3:
K = transform
points_out = points @ b_inv(K.transpose(1, 2))
return points_out
def project_to_camera(points, transform):
''' Projects points to the camera plane.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
p_camera = transform_points(points, transform)
p_camera = p_camera[..., :2] / p_camera[..., 2:]
return p_camera
def get_camera_args(data, loc_field=None, scale_field=None, device=None):
''' Returns dictionary of camera arguments.
Args:
data (dict): data dictionary
loc_field (str): name of location field
scale_field (str): name of scale field
device (device): pytorch device
'''
Rt = data['inputs.world_mat'].to(device)
K = data['inputs.camera_mat'].to(device)
if loc_field is not None:
loc = data[loc_field].to(device)
else:
loc = torch.zeros(K.size(0), 3, device=K.device, dtype=K.dtype)
if scale_field is not None:
scale = data[scale_field].to(device)
else:
scale = torch.zeros(K.size(0), device=K.device, dtype=K.dtype)
Rt = fix_Rt_camera(Rt, loc, scale)
K = fix_K_camera(K, img_size=137.)
kwargs = {'Rt': Rt, 'K': K}
return kwargs
def fix_Rt_camera(Rt, loc, scale):
''' Fixes Rt camera matrix.
Args:
Rt (tensor): Rt camera matrix
loc (tensor): location
scale (float): scale
'''
# Rt is B x 3 x 4
# loc is B x 3 and scale is B
batch_size = Rt.size(0)
R = Rt[:, :, :3]
t = Rt[:, :, 3:]
scale = scale.view(batch_size, 1, 1)
R_new = R * scale
t_new = t + R @ loc.unsqueeze(2)
Rt_new = torch.cat([R_new, t_new], dim=2)
assert(Rt_new.size() == (batch_size, 3, 4))
return Rt_new
def fix_K_camera(K, img_size=137):
"""Fix camera projection matrix.
This changes a camera projection matrix that maps to
[0, img_size] x [0, img_size] to one that maps to [-1, 1] x [-1, 1].
Args:
K (np.ndarray): Camera projection matrix.
img_size (float): Size of image plane K projects to.
"""
# Unscale and recenter
scale_mat = torch.tensor([
[2./img_size, 0, -1],
[0, 2./img_size, -1],
[0, 0, 1.],
], device=K.device, dtype=K.dtype)
K_new = scale_mat.view(1, 3, 3) @ K
return K_new
| 9,273 | 28.163522 | 78 | py |
Im2Hands | Im2Hands-main/im2mesh/preprocess.py | import torch
from im2mesh import config
from im2mesh.checkpoints import CheckpointIO
from im2mesh.utils.io import export_pointcloud
class PSGNPreprocessor:
''' Point Set Generation Networks (PSGN) preprocessor class.
Args:
cfg_path (str): path to config file
pointcloud_n (int): number of output points
dataset (dataset): dataset
device (device): pytorch device
model_file (str): model file
'''
def __init__(self, cfg_path, pointcloud_n, dataset=None, device=None,
model_file=None):
self.cfg = config.load_config(cfg_path, 'configs/default.yaml')
self.pointcloud_n = pointcloud_n
self.device = device
self.dataset = dataset
self.model = config.get_model(self.cfg, device, dataset)
# Output directory of psgn model
out_dir = self.cfg['training']['out_dir']
# If model_file not specified, use the one from psgn model
if model_file is None:
model_file = self.cfg['test']['model_file']
# Load model
self.checkpoint_io = CheckpointIO(out_dir, model=self.model)
self.checkpoint_io.load(model_file)
def __call__(self, inputs):
self.model.eval()
with torch.no_grad():
points = self.model(inputs)
batch_size = points.size(0)
T = points.size(1)
# Subsample points if necessary
if T != self.pointcloud_n:
idx = torch.randint(
low=0, high=T,
size=(batch_size, self.pointcloud_n),
device=self.device
)
idx = idx[:, :, None].expand(batch_size, self.pointcloud_n, 3)
points = torch.gather(points, dim=1, index=idx)
return points
| 1,773 | 31.254545 | 74 | py |
Im2Hands | Im2Hands-main/im2mesh/r2n2/training.py | import os
from tqdm import trange
import numpy as np
import torch
import torch.nn.functional as F
from im2mesh.training import BaseTrainer
from im2mesh.common import compute_iou
from im2mesh.utils import visualize as vis
from im2mesh.utils.voxels import VoxelGrid
class Trainer(BaseTrainer):
''' Trainer class for the R2N2 model.
It handles the training and evaluation steps as well as intermidiate
visualizations.
Args:
model (nn.Module): R2N2 model
optimizer (optimizer): pytorch optimizer
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
'''
def __init__(self, model, optimizer, device=None, input_type='img',
vis_dir=None, threshold=0.5):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
occ = data.get('voxels').to(self.device)
inputs = data.get('inputs').to(self.device)
loss = self.compute_loss(occ, inputs)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
occ = data.get('voxels').to(device)
inputs = data.get('inputs').to(device)
points = data.get('points_iou')
points_occ = data.get('points_iou.occ')
with torch.no_grad():
occ_logits = self.model(inputs).squeeze(1)
eval_dict = {}
# Compute loss
occ_hat = torch.sigmoid(occ_logits)
loss = F.binary_cross_entropy_with_logits(occ_logits, occ)
eval_dict['loss'] = loss.item()
# Compute discretized IOU
occ_np = (occ >= 0.5).cpu().numpy()
occ_hat_np = (occ_hat >= threshold).cpu().numpy()
iou_voxels = compute_iou(occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
# Compute continuous IOU (if possible)
if points is not None:
voxel_grids = [VoxelGrid(occ_hat_np_i)
for occ_hat_np_i in occ_hat_np]
points_np = points.cpu().numpy()
points_occ_np = (points_occ >= 0.5).cpu().numpy()
points_occ_hat_np = np.stack(
[vg.contains(p) for p, vg in zip(points_np, voxel_grids)])
iou = compute_iou(points_occ_np, points_occ_hat_np).mean()
eval_dict['iou'] = iou
return eval_dict
def visualize(self, data):
''' Performs an intermidiate visualization.
Args:
data (dict): data dictionary
'''
device = self.device
occ = data.get('voxels').to(device)
inputs = data.get('inputs').to(device)
with torch.no_grad():
occ_logits = self.model(inputs).squeeze(1)
occ_hat = torch.sigmoid(occ_logits)
voxels_gt = (occ >= self.threshold).cpu().numpy()
voxels_out = (occ_hat >= self.threshold).cpu().numpy()
batch_size = occ.size(0)
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
vis.visualize_data(
inputs[i].cpu(), self.input_type, input_img_path)
vis.visualize_voxels(
voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
vis.visualize_voxels(
voxels_gt[i], os.path.join(self.vis_dir, '%03d_gt.png' % i))
def compute_loss(self, occ, inputs=None):
''' Computes the loss.
Args:
occ (tensor): GT occupancy values for the voxel grid
inputs (tensor): input tensor
'''
occ_hat = self.model(inputs).squeeze(1)
loss = F.binary_cross_entropy_with_logits(occ_hat, occ)
return loss
| 4,360 | 31.066176 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/r2n2/config.py | import os
from im2mesh.encoder import encoder_dict
from im2mesh.r2n2 import models, training, generation
from im2mesh import data
def get_model(cfg, device=None, **kwargs):
''' Return the model.
Args:
cfg (dict): loaded yaml config
device (device): pytorch device
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
dim = cfg['data']['dim']
# z_dim = cfg['model']['z_dim']
c_dim = cfg['model']['c_dim']
# encoder_kwargs = cfg['model']['encoder_kwargs']
decoder_kwargs = cfg['model']['decoder_kwargs']
encoder_kwargs = cfg['model']['encoder_kwargs']
decoder = models.decoder_dict[decoder](
dim=dim, c_dim=c_dim,
**decoder_kwargs
)
encoder = encoder_dict[encoder](
c_dim=c_dim,
**encoder_kwargs
)
model = models.R2N2(decoder, encoder)
model = model.to(device)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): R2N2 model
optimizer (optimizer): pytorch optimizer
cfg (dict): loaded yaml config
device (device): pytorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
trainer = training.Trainer(
model, optimizer, device=device,
input_type=input_type, vis_dir=vis_dir,
threshold=threshold
)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): R2N2 model
cfg (dict): loaded yaml config
device (device): pytorch device
'''
generator = generation.VoxelGenerator3D(
model, device=device
)
return generator
def get_data_fields(split, cfg, **kwargs):
''' Returns the data fields.
Args:
split (str): the split which should be used
cfg (dict): loaded yaml config
'''
with_transforms = cfg['data']['with_transforms']
fields = {}
if split == 'train':
fields['voxels'] = data.VoxelsField(
cfg['data']['voxels_file']
)
elif split in ('val', 'test'):
fields['points_iou'] = data.PointsField(
cfg['data']['points_iou_file'],
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
return fields
| 2,505 | 24.571429 | 57 | py |
Im2Hands | Im2Hands-main/im2mesh/r2n2/generation.py | import torch
import numpy as np
from im2mesh.utils.voxels import VoxelGrid
class VoxelGenerator3D(object):
''' Generator class for R2N2 model.
The output of the model is transformed to a voxel grid and returned as a
mesh.
Args:
model (nn.Module): (trained) R2N2 model
threshold (float): threshold value for deciding whether a voxel is
occupied or not
device (device): pytorch device
'''
def __init__(self, model, threshold=0.5, device=None):
self.model = model.to(device)
self.threshold = threshold
self.device = device
def generate_mesh(self, data):
''' Generates the output mesh.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
with torch.no_grad():
out = self.model(inputs).squeeze(1).squeeze(0)
out = out.cpu().numpy()
mesh = self.extract_mesh(out)
return mesh
def extract_mesh(self, values):
''' Extracts the mesh.
Args:
values (numpy array): predicted values
'''
# Convert threshold to logits
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
# Extract mesh
mesh = VoxelGrid(values >= threshold).to_mesh()
return mesh
| 1,414 | 24.267857 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/r2n2/models/decoder.py | import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
''' Decoder network class for the R2N2 model.
It consists of 4 transposed 3D-convolutional layers.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
'''
def __init__(self, dim=3, c_dim=128):
super().__init__()
self.actvn = F.relu
self.fc_in = nn.Linear(c_dim, 256*4*4*4)
self.convtrp_0 = nn.ConvTranspose3d(256, 128, 3, stride=2,
padding=1, output_padding=1)
self.convtrp_1 = nn.ConvTranspose3d(128, 64, 3, stride=2,
padding=1, output_padding=1)
self.convtrp_2 = nn.ConvTranspose3d(64, 32, 3, stride=2,
padding=1, output_padding=1)
self.conv_out = nn.Conv3d(32, 1, 1)
def forward(self, c):
batch_size = c.size(0)
net = self.fc_in(c)
net = net.view(batch_size, 256, 4, 4, 4)
net = self.convtrp_0(self.actvn(net))
net = self.convtrp_1(self.actvn(net))
net = self.convtrp_2(self.actvn(net))
occ_hat = self.conv_out(self.actvn(net))
return occ_hat
| 1,260 | 31.333333 | 72 | py |
Im2Hands | Im2Hands-main/im2mesh/r2n2/models/__init__.py | import torch.nn as nn
from im2mesh.r2n2.models.decoder import Decoder
# Decoder dictionary
decoder_dict = {
'simple': Decoder,
}
class R2N2(nn.Module):
''' The 3D Recurrent Reconstruction Neural Network (3D-R2N2) model.
For details regarding the model, please see
https://arxiv.org/abs/1604.00449
As single-view images are used as input, we do not use the recurrent
module.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
'''
def __init__(self, decoder, encoder):
super().__init__()
self.decoder = decoder
self.encoder = encoder
def forward(self, x):
c = self.encoder(x)
occ_hat = self.decoder(c)
return occ_hat
| 757 | 21.294118 | 72 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/training.py | import os
from tqdm import trange
import torch
from im2mesh.common import chamfer_distance
from im2mesh.training import BaseTrainer
from im2mesh.utils import visualize as vis
import numpy as np
import torch.nn.functional as F
import scipy.ndimage
from im2mesh.dmc.utils.util import gaussian_kernel, offset_to_normal
from im2mesh.dmc.ops.curvature_constraint import CurvatureConstraint
from im2mesh.dmc.ops.occupancy_connectivity import OccupancyConnectivity
from im2mesh.dmc.ops.point_triangle_distance import PointTriangleDistance
from im2mesh.dmc.ops.table import get_accept_topology
class Trainer(BaseTrainer):
def __init__(self, model, optimizer, device=None, input_type='pointcloud',
vis_dir=None, num_voxels=16, weight_distance=5.0, weight_prior_pos=0.2, weight_prior=10.0, weight_smoothness=3.0, weight_curvature=3.0):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
self.num_cells = num_voxels # - 1
self.len_cell = 1.0
self.x_grids = np.arange(0, self.num_cells+1, self.len_cell)
self.y_grids = np.arange(0, self.num_cells+1, self.len_cell)
self.z_grids = np.arange(0, self.num_cells+1, self.len_cell)
self.distanceModule = PointTriangleDistance()
self.curvatureLoss = CurvatureConstraint()
self.occupancyConnectivity = OccupancyConnectivity()
self.acceptTopology = torch.LongTensor(
get_accept_topology()).to(device)
flip_indices = torch.arange(
self.acceptTopology.size()[0]-1, -1, -1).long()
self.acceptTopologyWithFlip = torch.cat([
self.acceptTopology, 255 - self.acceptTopology[flip_indices]], dim=0)
self.visTopology = torch.LongTensor(get_accept_topology(4)).to(device)
# assume that the outside __faces__ of the grid is always free
W = len(self.x_grids)
H = len(self.y_grids)
D = len(self.z_grids)
tmp_ = np.zeros((W, H, D))
tmp_[0, :, :] = 1
tmp_[W-1, :, :] = 1
tmp_[:, :, 0] = 1
tmp_[:, :, D-1] = 1
tmp_[:, 0, :] = 1
tmp_[:, H-1, :] = 1
kern3 = gaussian_kernel(3)
neg_weight = scipy.ndimage.filters.convolve(tmp_, kern3)
neg_weight = neg_weight/np.max(neg_weight)
self.neg_weight = torch.from_numpy(neg_weight.astype(np.float32)).to(device)
self.one = torch.ones(1, requires_grad=True).to(device)
self.weight_distance = weight_distance
self.weight_prior_pos = weight_prior_pos
self.weight_prior = weight_prior
self.weight_smoothness = weight_smoothness
self.weight_curvature = weight_curvature
def train_step(self, data):
self.model.train()
inputs = data.get('inputs').to(self.device)
pointcloud = data.get('pointcloud').to(self.device)
inputs = self.num_cells * (inputs / 1.2 + 0.5)
pointcloud = self.num_cells * (pointcloud / 1.2 + 0.5)
offset, topology, occupancy = self.model(inputs)
loss, loss_stages = self.loss_train(
offset, topology, pointcloud, occupancy)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
self.model.eval()
device = self.device
inputs = data.get('inputs').to(device)
pointcloud = data.get('pointcloud').to(device)
inputs = self.num_cells * (inputs / 1.2 + 0.5)
pointcloud = self.num_cells * (pointcloud / 1.2 + 0.5)
with torch.no_grad():
offset, topology, occupancy = self.model(inputs)
loss, loss_stages = self.loss_train(
offset, topology, pointcloud, occupancy)
loss = loss.item()
eval_dict = {
'loss': loss,
'loss mesh': loss_stages[0],
'loss occupancy': loss_stages[1],
'loss smoothness': loss_stages[2],
'loss curvature': loss_stages[3],
}
return eval_dict
def visualize(self, data):
device = self.device
shape = (self.num_cells + 1,) * 3
inputs = data.get('inputs').to(self.device)
batch_size = inputs.size(0)
inputs_norm = self.num_cells * (inputs / 1.2 + 0.5)
with torch.no_grad():
offset, topology, occupancy = self.model(inputs_norm)
occupancy = occupancy.view(batch_size, *shape)
voxels_out = (occupancy >= 0.5).cpu().numpy()
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
vis.visualize_data(
inputs[i].cpu(), self.input_type, input_img_path)
vis.visualize_voxels(
voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
def loss_train(self, offset, topology, pts, occupancy):
"""Compute the losses given a batch of point cloud and the predicted
mesh during the training phase
"""
loss = 0
loss_stages = []
batchsize = offset.size()[0]
for i in range(batchsize):
# L^{mesh}
loss += self.loss_point_to_mesh(offset[i], topology[i], pts[i], 'train')
if i == 0:
loss_stages.append(loss.item())
# L^{occ}
loss += self.loss_on_occupancy(occupancy[i, 0])
if i == 0:
loss_stages.append(loss.item() - sum(loss_stages))
# L^{smooth}
loss += self.loss_on_smoothness(occupancy[i, 0])
if i == 0:
loss_stages.append(loss.item() - sum(loss_stages))
# L^{curve}
loss += self.loss_on_curvature(offset[i], topology[i])
if i == 0:
loss_stages.append(loss.item() - sum(loss_stages))
loss = loss/batchsize
return loss, loss_stages
def loss_eval(self, offset, topology, pts):
"""Compute the point to mesh loss during validation phase"""
loss = self.loss_point_to_mesh(offset, topology, pts, 'val')
return loss * self.one
def loss_point_to_mesh(self, offset, topology, pts, phase='train'):
"""Compute the point to mesh distance"""
# compute the distances between all topologies and a point set
dis_sub = self.distanceModule(offset, pts)
# dual topologies share the same point-to-triangle distance
flip_indices = torch.arange(len(self.acceptTopology)-1, -1, -1).long()
dis_accepted = torch.cat([dis_sub, dis_sub[:, flip_indices]], dim=1)
topology_accepted = topology[:, self.acceptTopologyWithFlip]
# renormalize all desired topologies so that they sum to 1
prob_sum = torch.sum(
topology_accepted, dim=1, keepdim=True).clamp(1e-6)
topology_accepted = topology_accepted / prob_sum
# compute the expected loss
loss = torch.sum(
topology_accepted.mul(dis_accepted)) / (self.num_cells**3)
if phase == 'train':
loss = loss * self.weight_distance
return loss
def loss_on_occupancy(self, occupancy):
"""Compute the loss given the prior that the 6 faces of the cube
bounding the 3D scene are unoccupied and a sub-volume inside thec
scene is occupied
"""
# loss on 6 faces of the cube
loss_free = torch.sum(torch.mul(
occupancy, self.neg_weight)) / torch.sum(self.neg_weight)
W = occupancy.size()[0]
H = occupancy.size()[1]
D = occupancy.size()[2]
# get occupancy.data as we don't want to backpropagate to the adaptive_weight
sorted_cube, _ = torch.sort(
occupancy.data.view(-1), 0, descending=True)
# check the largest 1/30 value
adaptive_weight = 1 - torch.mean(sorted_cube[0:int(sorted_cube.size()[0]/30)])
# loss on a subvolume inside the cube, where the weight is assigned
# adaptively w.r.t. the current occupancy status
loss_occupied = self.weight_prior_pos * adaptive_weight * \
(1-torch.mean(occupancy[int(0.2*W):int(0.8*W),
int(0.2*H):int(0.8*H), int(0.2*D):int(0.8*D)]))
return (loss_free + loss_occupied) * self.weight_prior
def loss_on_smoothness(self, occupancy):
"""Compute the smoothness loss defined between neighboring occupancy
variables
"""
loss = (
self.occupancyConnectivity(occupancy) / (self.num_cells**3)
* self.weight_smoothness
)
return loss
def loss_on_curvature(self, offset, topology):
"""Compute the curvature loss by measuring the smoothness of the
predicted mesh geometry
"""
topology_accepted = topology[:, self.acceptTopologyWithFlip]
return self.weight_curvature*self.curvatureLoss(offset, \
F.softmax(topology_accepted, dim=1)) / (self.num_cells**3)
| 9,194 | 35.78 | 153 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/generation.py | import torch
import numpy as np
import trimesh
from im2mesh.dmc.utils.pred2mesh import pred_to_mesh_max
from im2mesh.dmc.ops.occupancy_to_topology import OccupancyToTopology
from im2mesh.dmc.ops.table import get_accept_topology
class Generator3D(object):
def __init__(self, model, device=None, num_voxels=32):
self.model = model.to(device)
self.device = device
self.num_voxels = num_voxels
self.vis_topology = torch.LongTensor(get_accept_topology(4))
def generate_mesh(self, data):
self.model.eval()
device = self.device
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
inputs = self.num_voxels * (inputs / 1.2 + 0.5)
with torch.no_grad():
offset, topology, occupancy = self.model(inputs)
offset = offset.squeeze()
topology = topology.squeeze()
topology = topology[:, self.vis_topology]
vertices, faces = pred_to_mesh_max(offset, topology)
faces = faces.astype(np.int64)
vertices = 1.2 * (vertices / self.num_voxels - 0.5)
mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
return mesh
| 1,182 | 29.333333 | 77 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/models/encoder.py | import torch.nn as nn
import torch
from im2mesh.dmc.ops.grid_pooling import GridPooling
class PointNetLocal(nn.Module):
''' Point Net Local Conditional Network from the Deep Marching Cubes paper.
It applies two fully connected layers to the input points (dim 3) in a
1D Convolutional Layer fashion to avoid to specify the number of
incoming points
'''
def __init__(self, c_dim=256, out_dim=16, cell_W=16, cell_H=16, cell_D=16):
super().__init__()
self.cell_W = cell_W
self.cell_H = cell_H
self.cell_D = cell_D
# TODO change gridpooling input to be compatible to single values of W H D
self.gridshape = torch.cuda.LongTensor([cell_W, cell_H, cell_D])
actvn = nn.ReLU()
self.grid_pool = GridPooling(self.gridshape)
self.conv1 = nn.Sequential(
nn.Conv1d(3, c_dim, 1), actvn
)
#self.conv2 = nn.Sequential(
# nn.Conv1d(c_dim, out_dim, 1), actvn
#)
self.conv2 = nn.Conv1d(c_dim, out_dim, 1)
def forward(self, x):
pts = x
feats = x.transpose(1, 2) # b_size x 3 x num_points
feats = self.conv1(feats) # b_size x c_dim x num_points
feats = self.conv2(feats) # b_size x out_dim x num_points
feats = feats.transpose(1, 2) # b_size x num_points x out_dim
out = self.point_to_cell(pts, feats, self.cell_W, self.cell_H, self.cell_D)
return out
def point_to_cell(self, pts, feat, W, H, D, expand=1):
""" perform maxpool on points in every cell set zero vector if cell is
empty if expand=1 then return (N+1)x(N+1)x(N+1), for dmc xpand=0 then
return NxNxN, for occupancy/sdf baselines
"""
batchsize = feat.size()[0]
C = feat.size()[2]
feat_cell = []
# grid_shape = torch.LongTensor([W, H, D])
for k in range(batchsize):
feat_cell.append(self.grid_pool(feat[k, :, :], pts[k, :, :]))
feat_cell = torch.stack(feat_cell, dim=0)
# TODO check if this view is compatible to output of grid pool
feat_cell = torch.transpose(feat_cell, 1, 2).contiguous().view(
-1, C, W, H, D)
if expand == 0:
return feat_cell
# expand to (W+1)x(H+1)
curr_size = feat_cell.size()
feat_cell_exp = torch.zeros(
curr_size[0], curr_size[1], curr_size[2]+1, curr_size[3]+1,
curr_size[4]+1).to(pts.device)
feat_cell_exp[:, :, :-1, :-1, :-1] = feat_cell
return feat_cell_exp
| 2,574 | 35.785714 | 83 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/models/decoder.py | import torch.nn as nn
import torch
from im2mesh.dmc.ops.occupancy_to_topology import OccupancyToTopology
class UNetDecoder(nn.Module):
def __init__(self, input_dim=16, T=256, W=32, H=32, D=32, skip_connection=True):
super().__init__()
self.skip_connection = skip_connection
self.decoder = SurfaceDecoder(T, W, H, D, skip_connection)
self.encoder = LocalEncoder(input_dim, skip_connection)
def forward(self, c):
if self.skip_connection:
z, intermediate_feat = self.encoder(c)
occupancy, offset, topology = self.decoder(z, intermediate_feat)
else:
z = self.encoder(c)
occupancy, offset, topology = self.decoder(z)
return offset, topology, occupancy
class SurfaceDecoder(nn.Module):
"""Decoder of the U-Net, estimate topology and offset with two headers"""
def __init__(self, T=256, W=32, H=32, D=32, skip_connection=True):
super(SurfaceDecoder, self).__init__()
self.W = W
self.H = H
self.D = D
self.T = T
self.actvn = nn.ReLU()
self.Occ2Top = OccupancyToTopology()
# decoder
self.deconv4 = nn.Conv3d(128, 64, 3, padding=1)
self.deconv3_1 = nn.ConvTranspose3d(128, 128, 3, padding=1)
self.deconv3_2 = nn.ConvTranspose3d(128, 32, 3, padding=1)
self.deconv2_off_1 = nn.ConvTranspose3d(64, 64, 3, padding=1)
self.deconv2_off_2 = nn.ConvTranspose3d(64, 16, 3, padding=1)
self.deconv2_occ_1 = nn.ConvTranspose3d(64, 64, 3, padding=1)
self.deconv2_occ_2 = nn.ConvTranspose3d(64, 16, 3, padding=1)
self.deconv1_off_1 = nn.ConvTranspose3d(32, 32, 3, padding=1)
self.deconv1_off_2 = nn.ConvTranspose3d(32, 3, 3, padding=3)
self.deconv1_occ_1 = nn.ConvTranspose3d(32, 32, 3, padding=1)
self.deconv1_occ_2 = nn.ConvTranspose3d(32, 1, 3, padding=3)
# batchnorm
self.deconv4_bn = nn.BatchNorm3d(64)
self.deconv3_1_bn = nn.BatchNorm3d(128)
self.deconv3_2_bn = nn.BatchNorm3d(32)
self.deconv2_off_1_bn = nn.BatchNorm3d(64)
self.deconv2_off_2_bn = nn.BatchNorm3d(16)
self.deconv2_occ_1_bn = nn.BatchNorm3d(64)
self.deconv2_occ_2_bn = nn.BatchNorm3d(16)
self.deconv1_off_1_bn = nn.BatchNorm3d(32)
self.deconv1_occ_1_bn = nn.BatchNorm3d(32)
self.sigmoid = nn.Sigmoid()
self.maxunpool = nn.MaxUnpool3d(2)
self.skip_connection = skip_connection
def decoder(self, x, intermediate_feat=None):
if self.skip_connection:
feat1, size1, indices1, feat2, size2, indices2, feat3, size3, indices3 = intermediate_feat
#
x = self.actvn(self.deconv4_bn(self.deconv4(x)))
#
x = self.maxunpool(x, indices3, output_size=size3)
if self.skip_connection:
x = torch.cat((feat3, x), 1)
x = self.actvn(self.deconv3_1_bn(self.deconv3_1(x)))
x = self.actvn(self.deconv3_2_bn(self.deconv3_2(x)))
#
x = self.maxunpool(x, indices2, output_size=size2)
if self.skip_connection:
x = torch.cat((feat2, x), 1)
x_occupancy = self.actvn(self.deconv2_occ_1_bn(self.deconv2_occ_1(x)))
x_occupancy = self.actvn(
self.deconv2_occ_2_bn(self.deconv2_occ_2(x_occupancy)))
x_offset = self.actvn(self.deconv2_off_1_bn(self.deconv2_off_1(x)))
x_offset = self.actvn(
self.deconv2_off_2_bn(self.deconv2_off_2(x_offset)))
#
x_occupancy = self.maxunpool(x_occupancy, indices1, output_size=size1)
if self.skip_connection:
x_occupancy = torch.cat((feat1, x_occupancy), 1)
x_offset = self.maxunpool(x_offset, indices1, output_size=size1)
if self.skip_connection:
x_offset = torch.cat((feat1, x_offset), 1)
x_occupancy = self.actvn(
self.deconv1_occ_1_bn(self.deconv1_occ_1(x_occupancy)))
x_occupancy = self.sigmoid(self.deconv1_occ_2(x_occupancy))
x_offset = self.actvn(
self.deconv1_off_1_bn(self.deconv1_off_1(x_offset)))
x_offset = self.sigmoid(self.deconv1_off_2(x_offset)) - 0.5
batchsize = x_occupancy.size()[0]
topology = torch.zeros(batchsize, self.W*self.H*self.D, self.T).to(x.device)
for k in range(batchsize):
topology[k, :, :] = self.Occ2Top(x_occupancy[k, 0, :, :])
return x_occupancy, x_offset, topology
def forward(self, x, intermediate_feat=None):
return self.decoder(x, intermediate_feat)
class LocalEncoder(nn.Module):
"""Encoder of the U-Net"""
def __init__(self, input_dim=16, skip_connection=True):
super(LocalEncoder, self).__init__()
self.actvn = nn.ReLU()
# u-net
self.conv1_1 = nn.Conv3d(input_dim, 16, 3, padding=3)
self.conv1_2 = nn.Conv3d(16, 16, 3, padding=1)
self.conv2_1 = nn.Conv3d(16, 32, 3, padding=1)
self.conv2_2 = nn.Conv3d(32, 32, 3, padding=1)
self.conv3_1 = nn.Conv3d(32, 64, 3, padding=1)
self.conv3_2 = nn.Conv3d(64, 64, 3, padding=1)
self.conv4 = nn.Conv3d(64, 128, 3, padding=1)
# batchnorm
self.conv1_1_bn = nn.BatchNorm3d(16)
self.conv1_2_bn = nn.BatchNorm3d(16)
self.conv2_1_bn = nn.BatchNorm3d(32)
self.conv2_2_bn = nn.BatchNorm3d(32)
self.conv3_1_bn = nn.BatchNorm3d(64)
self.conv3_2_bn = nn.BatchNorm3d(64)
self.conv4_bn = nn.BatchNorm3d(128)
self.maxpool = nn.MaxPool3d(2, return_indices=True)
self.skip_connection = skip_connection
def encoder(self, x):
x = self.actvn(self.conv1_1_bn(self.conv1_1(x)))
x = self.actvn(self.conv1_2_bn(self.conv1_2(x)))
feat1 = x
size1 = x.size()
x, indices1 = self.maxpool(x)
#
x = self.actvn(self.conv2_1_bn(self.conv2_1(x)))
x = self.actvn(self.conv2_2_bn(self.conv2_2(x)))
feat2 = x
size2 = x.size()
x, indices2 = self.maxpool(x)
#
x = self.actvn(self.conv3_1_bn(self.conv3_1(x)))
x = self.actvn(self.conv3_2_bn(self.conv3_2(x)))
feat3 = x
size3 = x.size()
x, indices3 = self.maxpool(x)
#
x = self.actvn(self.conv4_bn(self.conv4(x)))
return x, feat1, size1, indices1, feat2, size2, indices2, feat3, size3, indices3
def forward(self, x):
x, feat1, size1, indices1, feat2, size2, indices2, feat3, size3, indices3 = self.encoder(x)
if self.skip_connection:
return x, (feat1, size1, indices1, feat2, size2, indices2, feat3, size3, indices3)
else:
return x | 6,730 | 37.028249 | 102 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/models/__init__.py | import torch.nn as nn
from im2mesh.dmc.models import encoder, decoder
decoder_dict = {
'unet': decoder.UNetDecoder
}
encoder_dict = {
'pointnet_local': encoder.PointNetLocal,
}
class DMC(nn.Module):
def __init__(self, decoder, encoder):
super().__init__()
self.decoder = decoder
self.encoder = encoder
def forward(self, x):
c = self.encoder(x)
offset, topology, occupancy = self.decoder(c)
return offset, topology, occupancy
| 495 | 19.666667 | 53 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/utils/pointTriangleDistance.py | #!/usr/bin/env python
#
# Tests distance between point and triangle in 3D. Aligns and uses 2D technique.
#
# Was originally some code on mathworks
#
# Implemented for pytorch Variable
# Adapted from https://gist.github.com/joshuashaffer/99d58e4ccbd37ca5d96e
import numpy as np
import torch
from torch.autograd import Variable
import time
one = Variable(torch.ones(1).type(torch.FloatTensor), requires_grad=True)
eps = 1e-8
def pointTriangleDistanceFast(TRI, P):
# function [dist,PP0] = pointTriangleDistanceFast(TRI,P)
# calculate distance between a set of points and a triangle in 3D
# Approximate method, clamp s and t to enable batch calculation
#
# Input:
# TRI: 3x3 matrix, each column is a vertex
# P: Nx3 matrix, each row is a point
# Output:
# dis: Nx1 matrix, point to triangle distances
assert(np.isnan(np.sum(TRI.data.cpu().numpy()))==0)
assert(np.isnan(np.sum(P.data.cpu().numpy()))==0)
B = TRI[:, 0]
E0 = TRI[:, 1] - B
E1 = TRI[:, 2] - B
D = B.unsqueeze(0).expand_as(P) - P
d = D.mm(E0.unsqueeze(1))
e = D.mm(E1.unsqueeze(1))
f = torch.diag(D.mm(torch.t(D))).unsqueeze(1)
a = torch.dot(E0, E0).expand_as(d)
b = torch.dot(E0, E1).expand_as(d)
c = torch.dot(E1, E1).expand_as(d)
#print "{0} {1} {2} ".format(B,E1,E0)
det = a * c - b * b
s = (b * e - c * d)/(det + eps)
t = (b * d - a * e)/(det + eps)
# clamp s and t to be larger than 0
s = s.clamp(min=0)
t = t.clamp(min=0)
# clamp the sum of s and t to be smaller than 1
norm = (s+t).clamp(min=1)
s = s/norm
t = t/norm
sqrdistance = s * (a * s + b * t + 2.0 * d) + t * (b * s + c * t + 2.0 * e) + f
# directly return sqrdistance
#return torch.sqrt(sqrdistance.clamp(min=0) + eps)
return sqrdistance.clamp(min=0)
def pointTriangleDistance(TRI, P):
# function [dist,PP0] = pointTriangleDistance(TRI,P)
# calculate distance between a point and a triangle in 3D
# SYNTAX
# dist = pointTriangleDistance(TRI,P)
# [dist,PP0] = pointTriangleDistance(TRI,P)
#
# DESCRIPTION
# Calculate the distance of a given point P from a triangle TRI.
# Point P is a row vector of the form 1x3. The triangle is a matrix
# formed by three rows of points TRI = [P1;P2;P3] each of size 1x3.
# dist = pointTriangleDistance(TRI,P) returns the distance of the point P
# to the triangle TRI.
# [dist,PP0] = pointTriangleDistance(TRI,P) additionally returns the
# closest point PP0 to P on the triangle TRI.
#
# Author: Gwolyn Fischer
# Release: 1.0
# Release date: 09/02/02
# Release: 1.1 Fixed Bug because of normalization
# Release: 1.2 Fixed Bug because of typo in region 5 20101013
# Release: 1.3 Fixed Bug because of typo in region 2 20101014
# Possible extention could be a version tailored not to return the distance
# and additionally the closest point, but instead return only the closest
# point. Could lead to a small speed gain.
# Example:
# %% The Problem
# P0 = [0.5 -0.3 0.5]
#
# P1 = [0 -1 0]
# P2 = [1 0 0]
# P3 = [0 0 0]
#
# vertices = [P1; P2; P3]
# faces = [1 2 3]
#
# %% The Engine
# [dist,PP0] = pointTriangleDistance([P1;P2;P3],P0)
#
# %% Visualization
# [x,y,z] = sphere(20)
# x = dist*x+P0(1)
# y = dist*y+P0(2)
# z = dist*z+P0(3)
#
# figure
# hold all
# patch('Vertices',vertices,'Faces',faces,'FaceColor','r','FaceAlpha',0.8)
# plot3(P0(1),P0(2),P0(3),'b*')
# plot3(PP0(1),PP0(2),PP0(3),'*g')
# surf(x,y,z,'FaceColor','b','FaceAlpha',0.3)
# view(3)
# The algorithm is based on
# "David Eberly, 'Distance Between Point and Triangle in 3D',
# Geometric Tools, LLC, (1999)"
# http:\\www.geometrictools.com/Documentation/DistancePoint3Triangle3.pdf
#
# ^t
# \ |
# \reg2|
# \ |
# \ |
# \ |
# \|
# *P2
# |\
# | \
# reg3 | \ reg1
# | \
# |reg0\
# | \
# | \ P1
# -------*-------*------->s
# |P0 \
# reg4 | reg5 \ reg6
# rewrite triangle in normal form
#
reg = -1
assert(np.isnan(np.sum(TRI.data.numpy()))==0)
assert(np.isnan(np.sum(P.data.numpy()))==0)
B = TRI[:, 0]
E0 = TRI[:, 1] - B
# E0 = E0/sqrt(sum(E0.^2)); %normalize vector
E1 = TRI[:, 2] - B
# E1 = E1/sqrt(sum(E1.^2)); %normalize vector
D = B - P
a = torch.dot(E0, E0)
b = torch.dot(E0, E1)
c = torch.dot(E1, E1)
d = torch.dot(E0, D)
e = torch.dot(E1, D)
f = torch.dot(D, D)
#print "{0} {1} {2} ".format(B,E1,E0)
det = a * c - b * b
s = b * e - c * d
t = b * d - a * e
# Terible tree of conditionals to determine in which region of the diagram
# shown above the projection of the point into the triangle-plane lies.
if (s.data[0] + t.data[0]) <= det.data[0]:
if s.data[0] < 0.0:
if t.data[0] < 0.0:
# region4
reg = 4
if d.data[0] < 0:
t = 0.0
if -d.data[0] >= a.data[0]:
s = 1.0
sqrdistance = a + 2.0 * d + f
else:
s = -d / (a + eps)
sqrdistance = d * s + f
else:
s.data[0] = 0.0
if e.data[0] >= 0.0:
t = 0.0
sqrdistance = f
else:
if -e.data[0] >= c.data[0]:
t = 1.0
sqrdistance = c + 2.0 * e + f
else:
t = -e / (c + eps)
sqrdistance = e * t + f
# of region 4
else:
reg = 3
# region 3
s.data[0] = 0
if e.data[0] >= 0:
t = 0
sqrdistance = f
else:
if -e.data[0] >= c.data[0]:
t = 1
sqrdistance = c + 2.0 * e + f
else:
t = -e / (c + eps)
sqrdistance = e * t + f
# of region 3
else:
if t.data[0] < 0:
reg = 5
# region 5
t = 0
if d.data[0] >= 0:
s = 0
sqrdistance = f
else:
if -d.data[0] >= a.data[0]:
s = 1.0
sqrdistance = a + 2.0 * d + f; # GF 20101013 fixed typo d*s ->2*d
else:
s = -d / (a + eps)
sqrdistance = d * s + f
else:
reg = 0
# region 0
invDet = 1.0 / (det + eps)
s = s * invDet
t = t * invDet
if s.data[0] == 0:
sqrdistance = d
else:
sqrdistance = s * (a * s + b * t + 2.0 * d) + t * (b * s + c * t + 2.0 * e) + f
else:
if s.data[0] < 0.0:
reg = 2
# region 2
tmp0 = b + d
tmp1 = c + e
if tmp1.data[0] > tmp0.data[0]: # minimum on edge s+t=1
numer = tmp1 - tmp0
denom = a - 2.0 * b + c
if numer.data[0] >= denom.data[0]:
s = 1.0
t = 0.0
sqrdistance = a + 2.0 * d + f; # GF 20101014 fixed typo 2*b -> 2*d
else:
s = numer / (denom + eps)
t = 1 - s
sqrdistance = s * (a * s + b * t + 2 * d) + t * (b * s + c * t + 2 * e) + f
else: # minimum on edge s=0
s = 0.0
if tmp1.data[0] <= 0.0:
t = 1
sqrdistance = c + 2.0 * e + f
else:
if e.data[0] >= 0.0:
t = 0.0
sqrdistance = f
else:
t = -e / (c + eps)
sqrdistance = e * t + f
# of region 2
else:
if t.data[0] < 0.0:
reg = 6
# region6
tmp0 = b + e
tmp1 = a + d
if tmp1.data[0] > tmp0.data[0]:
numer = tmp1 - tmp0
denom = a - 2.0 * b + c
if numer.data[0] >= denom.data[0]:
t = 1.0
s = 0
sqrdistance = c + 2.0 * e + f
else:
t = numer / (denom + eps)
s = 1 - t
sqrdistance = s * (a * s + b * t + 2.0 * d) + t * (b * s + c * t + 2.0 * e) + f
else:
t = 0.0
if tmp1.data[0] <= 0.0:
s = 1
sqrdistance = a + 2.0 * d + f
else:
if d.data[0] >= 0.0:
s = 0.0
sqrdistance = f
else:
s = -d / (a + eps)
sqrdistance = d * s + f
else:
reg = 1
# region 1
numer = c + e - b - d
if numer.data[0] <= 0:
s = 0.0
t = 1.0
sqrdistance = c + 2.0 * e + f
else:
denom = a - 2.0 * b + c
if numer.data[0] >= denom.data[0]:
s = 1.0
t = 0.0
sqrdistance = a + 2.0 * d + f
else:
s = numer / (denom + eps)
t = 1 - s
sqrdistance = s * (a * s + b * t + 2.0 * d) + t * (b * s + c * t + 2.0 * e) + f
# account for numerical round-off error
#dist = torch.sqrt(torch.max(sqrdistance, 0*one))
# directly return sqr distance
dist = torch.max(sqrdistance, 0*one)
#PP0 = B + s.expand_as(E0) * E0 + t.expand_as(E1) * E1
assert(np.isnan(dist.data[0])==0)
return dist, reg
if __name__ == '__main__':
P = Variable(torch.FloatTensor([[-2.0,1.0,1.5]]).view(-1,3), requires_grad=True)
TRI = Variable(torch.t(torch.FloatTensor([[0.0, 0.0,1.0],[1.0,0.0,0.0],[0.0,1.0,0.0]])), requires_grad=True)
# approximate batch method
#P = Variable(torch.randn(100, 3), requires_grad=True)
#TRI = Variable(torch.rand(3, 3), requires_grad=True)
t0 = time.time()
dists = pointTriangleDistanceFast(TRI, P)
t0 = time.time()-t0
dists.backward()
print(dists)
print(TRI.grad.data.numpy())
print(P.grad.data.numpy())
## accurate method
#t1 = time.time()
#for i in range(P.size()[0]):
# dist, reg = pointTriangleDistance(TRI,P[i, :])
# print '%f' % (dist.data[0]-dists[i,0].data[0]), reg
#t1 = time.time()-t1
#print "Approximate method time: %f, accurate method time: %f" % (t0, t1)
| 11,593 | 32.031339 | 112 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/utils/util.py | import numpy as np
#import os
#import json
import torch
from torch.autograd import Variable
from im2mesh.dmc.utils.pointTriangleDistance import pointTriangleDistance, pointTriangleDistanceFast
from im2mesh.dmc.ops.table import get_triangle_table, get_unique_triangles, vertices_on_location
#from mpl_toolkits.mplot3d.art3d import Poly3DCollection
eps = 1e-6
topologys = get_triangle_table()
def dis_to_meshs(pts, pts_index, vectices, x_, y_, z_ ):
""" Return the distances from a point set to all acceptable topology types
in a single cell
Input:
pts, (Nx3) a set of points
pts_index, (Nx1) indicating if a point is in the cell or not
vertices, (3x12) the 12 vertices on each edge of the cell
x_, the offset of the cell in x direction
y_, the offset of the cell in y direction
z_, the offset of the cell in z direction
Output:
distances
"""
distances = [dis_to_mesh(pts[0], pts_index, vectices, faces, x_, y_, z_) for faces in topologys]
distances = torch.cat(distances)
# adaptively assign the cost for the empty case
if len(pts_index)!=0:
distances[-1].item = torch.max(distances[0:-1]).item() * 10.0
return distances
def dis_to_mesh(pts, pts_index, vertices, faces, x_, y_, z_):
""" Return the distance from a point set to a single topology type
Input:
pts, (Nx3) a set of points
pts_index, (Nx1) indicating if a point is in the cell or not
vertices, (3x12) the 12 vertices on each edge of the cell
faces, (fx3) the
x_, the offset of the cell in x direction
y_, the offset of the cell in y direction
z_, the offset of the cell in z direction
Output:
distances
"""
if pts.is_cuda:
dtype = torch.cuda.FloatTensor
dtype_long = torch.cuda.LongTensor
else:
dtype = torch.FloatTensor
dtype_long = torch.LongTensor
one = Variable(torch.ones(1).type(dtype), requires_grad=True)
if len(pts_index) == 0 and len(faces) == 0:
return 0.0*one
if len(pts_index) == 0 and len(faces) != 0:
return 1.0*one
if len(pts_index) != 0 and len(faces) == 0:
return 1e+3 * one
pts_index = Variable(dtype_long(pts_index))
# for each triangles in each topology, face is a vector of 3
dis_all_faces = []
for face in faces:
triangle = torch.cat((torch.cat(vertices[face[0]]).unsqueeze(1),
torch.cat(vertices[face[1]]).unsqueeze(1),
torch.cat(vertices[face[2]]).unsqueeze(1)), 1)
# use the fast and approximated point to triangle distance
dis_all_faces.append(pointTriangleDistanceFast(triangle, pts.index_select(0, pts_index)
- Variable(dtype([x_, y_, z_])).unsqueeze(0).expand(pts_index.size()[0], 3)))
# only count the nearest distance to the triangles
dis_all_faces, _ = torch.min(torch.cat(dis_all_faces, dim=1), dim=1)
return torch.mean(dis_all_faces).view(1)
def pts_in_cell(pts, cell):
""" get the point indices incide of a given cell (pyTorch)
Input:
pts, a set of points in pytorch format
cell, a list of 6 numbers {x1, y1, z1, x2, y2, z2}
Output:
inds, a list of indices for points inside the cell
"""
N = pts.size()[1]
cell = torch.FloatTensor(cell)
if pts.is_cuda:
cell = cell.cuda()
inds = [i for i in range(N) if pts[0,i,0].item()>cell[0] and pts[0,i,0].item() < cell[3]
and pts[0,i,1].item()>cell[1] and pts[0,i,1].item() < cell[4]
and pts[0,i,2].item()>cell[2] and pts[0,i,2].item() < cell[5]]
return inds
def pts_in_cell_numpy(pts, cell):
""" get the point indices incide of a given cell (numpy)
Input:
pts, a set of points in numpy format
cell, a list of 6 numbers {x1, y1, z1, x2, y2, z2}
Output:
inds, a list of indices for points inside the cell
"""
N = pts.shape[0]
inds = [i for i in range(N) if pts[i,0]>cell[0] and pts[i,0] < cell[3]
and pts[i,1]>cell[1] and pts[i,1] < cell[4]
and pts[i,2]>cell[2] and pts[i,2] < cell[5]]
return inds
def offset_to_vertices(offset, x, y, z):
""" get 12 intersect points on each edge of a single cell """
if offset.is_cuda:
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
one = Variable(torch.ones(1).type(dtype), requires_grad=True)
p = [ [(0.5-offset[0, x+1, y+1, z ])*one, 1.0*one, 0.0*one], #0
[1.0*one, (0.5-offset[1, x+1, y+1, z ])*one, 0.0*one], #1
[(0.5-offset[0, x+1, y , z ])*one, 0.0*one, 0.0*one], #2
[0.0*one, (0.5-offset[1, x , y+1, z ])*one, 0.0*one], #3
[(0.5-offset[0, x+1, y+1, z+1])*one, 1.0*one, 1.0*one], #4
[1.0*one, (0.5-offset[1, x+1, y+1,z+1])*one, 1.0*one], #5
[(0.5-offset[0, x+1, y , z+1])*one, 0.0*one, 1.0*one], #6
[0.0*one, (0.5-offset[1, x , y+1,z+1])*one, 1.0*one], #7
[0.0*one, 1.0*one, (0.5-offset[2, x ,y+1,z+1])*one], #8
[1.0*one, 1.0*one, (0.5-offset[2, x+1,y+1,z+1])*one], #9
[1.0*one, 0.0*one, (0.5-offset[2, x+1,y ,z+1])*one], #10
[0.0*one, 0.0*one, (0.5-offset[2, x ,y ,z+1])*one]] #11
return p
# get normal vector of all triangles dependent on the location of the cell
# 0: x1
# 1: x2
# 2: y1
# 3: y2
# 4: z1
# 5: z2
# 6: inner
def offset_to_normal(offset, x, y, z, location):
"""get normal vector of all triangles"""
p = offset_to_vertices(offset, x, y, z)
# get unique triangles from all topologies
triangles, classes = get_unique_triangles(symmetry=0)
# get normal vector of each triangle
# assign a dummy normal vector to the unconnected ones
# the vertices we care on the specific face
vertices = []
if location<6:
vertices = vertices_on_location()[location]
normal = []
if offset.is_cuda:
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
for tri in triangles:
# if the triangle doesn't has a line on the face we care about
# simply assign a dummy normal vector
intersection = [xx for xx in vertices if xx in tri]
#print tri, intersection
if location < 6 and len(intersection)!=2:
normal.append(Variable(torch.ones(3, 1).type(dtype)))
else:
### when inside/outside is considered
a=tri[0]
b=tri[1]
c=tri[2]
normal.append(torch.cross(torch.cat(p[b])-torch.cat(p[a]), torch.cat(p[c])-torch.cat(p[a])).view(3, 1))
return torch.cat(normal).view(-1,3)
def write_to_off(vertices, faces, filename):
"""write the given vertices and faces to off"""
f = open(filename, 'w')
f.write('OFF\n')
n_vertice = vertices.shape[0]
n_face = faces.shape[0]
f.write('%d %d 0\n' % (n_vertice, n_face))
for nv in range(n_vertice):
## !!! exchange the coordinates to match the orignal simplified mesh !!!
## !!! need to check where the coordinate is exchanged !!!
f.write('%f %f %f\n' % (vertices[nv,1], vertices[nv,2], vertices[nv,0]))
for nf in range(n_face):
f.write('3 %d %d %d\n' % (faces[nf,0], faces[nf,1], faces[nf,2]))
def gaussian_kernel(l, sig=1.):
""" get the gaussian kernel
https://stackoverflow.com/questions/29731726/how-to-calculate-a-gaussian-kernel-matrix-efficiently-in-numpy
"""
ax = np.arange(-l//2 + 1., l//2 + 1.)
xx, yy, zz = np.meshgrid(ax, ax, ax)
kernel = np.exp(-(xx**2 + yy**2 + zz**2) / (2. * sig**2))
return kernel
def unique_rows(a):
""" Return the matrix with unique rows """
rowtype = np.dtype((np.void, a.dtype.itemsize * a.shape[1]))
b = np.ascontiguousarray(a).view(rowtype)
_, idx, inverse = np.unique(b, return_index=True, return_inverse=True)
return a[idx], inverse
| 8,274 | 36.274775 | 115 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/utils/pred2mesh.py | import torch
import numpy as np
from im2mesh.dmc.ops.cpp_modules import pred2mesh
def unique_rows(a):
""" Return the matrix with unique rows """
rowtype = np.dtype((np.void, a.dtype.itemsize * a.shape[1]))
b = np.ascontiguousarray(a).view(rowtype)
_, idx, inverse = np.unique(b, return_index=True, return_inverse=True)
return a[idx], inverse
def pred_to_mesh_max(offset, topology):
"""
Converts the predicted offset variable and topology to a mesh by choosing the most likely topology
Input
----------
arg1 : tensor
offset variables [3 x W+1 x H+1 x D+1]
arg2 : tensor
topology probabilities [W*H*D x T]
Returns
-------
trimesh format
mesh
"""
# get the topology type with maximum probability in each cell
num_cells = offset.size(1) - 1
_, topology_max = torch.max(topology, dim=1)
topology_max = topology_max.view(num_cells, num_cells, num_cells)
# pre-allocate the memory, not safe
vertices = torch.FloatTensor(num_cells**3 * 12, 3)
faces = torch.FloatTensor(num_cells**3 * 12, 3)
num_vertices = torch.LongTensor(1)
num_faces = torch.LongTensor(1)
topology_max = topology_max.int()
# get the mesh from the estimated offest and topology
pred2mesh.pred2mesh(offset.cpu(), topology_max.cpu(),
vertices, faces, num_vertices, num_faces)
# cut the vertices and faces matrix according to the numbers
vertices = vertices[0:num_vertices[0], :].numpy()
faces = faces[0:num_faces[0], :].numpy()
# convert the vertices and face to numpy, and remove the duplicated vertices
vertices_unique = np.asarray(vertices)
faces_unique = np.asarray(faces)
# if len(faces):
# vertices = np.asarray(vertices)
# vertices_unique, indices = unique_rows(vertices)
# faces = np.asarray(faces).flatten()
# faces_unique = faces[indices].reshape((-1, 3))
# else:
# vertices_unique = []
# faces_unique = []
# if len(vertices_unique):
# vertices_unique = vertices_unique[:, [2, 0, 1]]
return vertices_unique, faces_unique
| 2,159 | 29.857143 | 103 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/utils/visualize.py | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torch
import os
import numpy as np
from utils.util import write_to_off, unique_rows
from _ext import eval_util
def save_mesh_fig(pts_rnd_, offset, topology, x_grids, y_grids, z_grids, ind, args, phase):
""" save the estimated mesh with maximum likelihood as image """
# get the topology type with maximum probability in each cell
num_cells = len(x_grids)-1
_, topology_max = torch.max(topology, dim=1)
topology_max = topology_max.view(num_cells, num_cells, num_cells)
# pre-allocate the memory, not safe
vertices = torch.FloatTensor(num_cells**3 * 12, 3)
faces = torch.FloatTensor(num_cells**3 * 12, 3)
num_vertices = torch.LongTensor(1)
num_faces = torch.LongTensor(1)
# get the mesh from the estimated offest and topology
eval_util.pred_to_mesh(offset.data.cpu(), topology_max.data.cpu(),
vertices, faces, num_vertices, num_faces)
if num_vertices[0] == 0:
return
# cut the vertices and faces matrix according to the numbers
vertices = vertices[0:num_vertices[0], :].numpy()
faces = faces[0:num_faces[0], :].numpy()
# convert the vertices and face to numpy, and remove the duplicated vertices
if len(faces):
vertices = np.asarray(vertices)
vertices_unique, indices = unique_rows(vertices)
faces = np.asarray(faces).flatten()
faces_unique = faces[indices].reshape((-1, 3))
else:
vertices_unique = []
faces_unique = []
# if save_off then skip the png figure saving for efficiency
if phase == 'val' and args.save_off == 1:
# exchange the axes to match the off ground truth
if len(vertices_unique):
vertices_unique = vertices_unique[:, [2, 0, 1]]
write_to_off(vertices_unique, faces_unique,
os.path.join(args.output_dir, 'mesh', '%04d.off'%ind))
else:
xv_cls, yv_cls, zv_cls = np.meshgrid(x_grids[:-1], y_grids[:-1], z_grids[:-1], indexing='ij')
xv_cls = xv_cls.flatten()
yv_cls = yv_cls.flatten()
zv_cls = zv_cls.flatten()
fig = plt.figure(0)
fig.clear()
ax = fig.add_subplot(111, projection='3d')
# plot the scattered points
ax.scatter(pts_rnd_[:, 0], pts_rnd_[:, 1], pts_rnd_[:, 2], '.',
color='#727272', zorder=1)
# plot the mesh
color = [0.8, 0.5, 0.5]
ax.plot_trisurf(vertices_unique[:, 0],
vertices_unique[:, 1],
vertices_unique[:, 2],
triangles=faces_unique,
color=color,
edgecolor='none',
alpha=1.0)
ax.set_xlim(x_grids.min(), x_grids.max())
ax.set_ylim(y_grids.min(), y_grids.max())
ax.set_zlim(z_grids.min(), z_grids.max())
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
if phase == 'train':
fig_name = 'train_noise%.02f_epoch%d.png' % (args.noise, ind)
else:
fig_name = 'val_%s_noise%.02f_ind%d.png' % (
os.path.splitext(os.path.basename(args.model))[0],
args.noise,
ind)
plt.savefig(os.path.join(args.output_dir, fig_name))
def save_occupancy_fig(pts_rnd_, occupancy, x_grids, y_grids, z_grids, ind, args, phase):
""" save the estimated occupancy as image """
# skip the occupancy figure saving for efficiency
if phase == 'val' and args.save_off == 1:
return
xv_cls, yv_cls, zv_cls = np.meshgrid(
range(len(x_grids)),
range(len(y_grids)),
range(len(z_grids)),
indexing='ij')
xv_cls = xv_cls.flatten()
yv_cls = yv_cls.flatten()
zv_cls = zv_cls.flatten()
fig = plt.figure(0)
fig.clear()
ax = fig.add_subplot(111, projection='3d')
# plot the scattered points
ax.scatter(pts_rnd_[:, 0], pts_rnd_[:, 1], pts_rnd_[:, 2], '.',
color='#727272', zorder=1)
# assign the occupancy w.r.t. the probability
rgba_x = np.zeros((len(xv_cls), 4))
rgba_x[:, 0] = 1.0
rgba_x[:, 3] = occupancy.flatten()
# plot the occupancy
ax.scatter(xv_cls, yv_cls, zv_cls, '.', color=rgba_x, zorder=1)
ax.set_xlim(x_grids.min(), x_grids.max())
ax.set_ylim(y_grids.min(), y_grids.max())
ax.set_zlim(z_grids.min(), z_grids.max())
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
if phase == 'train':
fig = 'train_occ_noise%.02f_epoch_%d.png' % (args.noise, ind)
else:
fig = 'val_occ_%s_noise%.02f_ind_%d.png' % (
os.path.splitext(os.path.basename(args.model))[0],
args.noise,
ind)
plt.savefig(os.path.join(args.output_dir, fig))
| 4,919 | 33.893617 | 101 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/setup.py | from setuptools import setup
import torch
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='_cuda_ext',
ext_modules=[
CUDAExtension('_cuda_ext', [
'src/extension.cpp',
'src/curvature_constraint_kernel.cu',
'src/grid_pooling_kernel.cu',
'src/occupancy_to_topology_kernel.cu',
'src/occupancy_connectivity_kernel.cu',
'src/point_triangle_distance_kernel.cu',
]),
],
cmdclass={
'build_ext': BuildExtension
})
| 553 | 26.7 | 67 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/occupancy_connectivity.py | import torch
import math
from torch import nn
from torch.autograd import Function
from torch.autograd import Variable
from ._cuda_ext import occupancy_connectivity_forward, occupancy_connectivity_backward
class OccupancyConnectivityFunction(Function):
@staticmethod
def forward(ctx, occ):
loss = occupancy_connectivity_forward(occ)
ctx.save_for_backward(occ)
return loss
@staticmethod
def backward(ctx, grad_output):
occ, = ctx.saved_tensors
grad_occupancy = torch.zeros(occ.size(), dtype=torch.float32, device='cuda')
occupancy_connectivity_backward(
grad_output,
occ,
grad_occupancy)
# Multiply with incoming gradient
grad_occupancy = grad_occupancy * grad_output
return grad_occupancy
class OccupancyConnectivity(nn.Module):
"""
Module for deriving the Occupancy connectiviy loss
ForwardW
----------
arg1 : tensor
occupancy probabilities [W+1 x H+1 x D+1]
Returns
-------
tensor
Occupancy connectiviy loss 1
"""
def __init__(self):
super(OccupancyConnectivity, self).__init__()
def forward(self, occ):
return OccupancyConnectivityFunction.apply(occ)
| 1,268 | 23.882353 | 86 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/occupancy_to_topology.py | import math
from torch import nn
from torch.autograd import Function
import torch
from ._cuda_ext import occupancy_to_topology_forward, occupancy_to_topology_backward
class OccupancyToTopologyFunction(Function):
@staticmethod
def forward(ctx, occupancy):
W = occupancy.size()[0] - 1
H = occupancy.size()[1] - 1
D = occupancy.size()[2] - 1
T = 256
topology = torch.zeros((W*H*D, T), dtype=torch.float32, device='cuda')
occupancy_to_topology_forward(occupancy, topology)
ctx.save_for_backward(occupancy, topology)
return topology
@staticmethod
def backward(ctx, grad_output):
occupancy, topology = ctx.saved_tensors
grad_occupancy = torch.zeros(occupancy.size(), dtype=torch.float32, device='cuda')
occupancy_to_topology_backward(grad_output, occupancy, topology, grad_occupancy)
return grad_occupancy
class OccupancyToTopology(nn.Module):
"""
Module for deriving the topology probabilities of each cell given the occupancy probabilities
Init
----------
args1: shape of the topology output [W*H*DxT]
Forward
----------
arg1 : tensor
occupancy probability tensor [W+1xH+1xD+1]
Returns
-------
tensor
topology probability tensor [W*H*DxT]
"""
def __init__(self):
super(OccupancyToTopology, self).__init__()
def forward(self, occupancy):
return OccupancyToTopologyFunction.apply(occupancy)
| 1,505 | 26.381818 | 97 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/grid_pooling.py | import torch
import math
from torch import nn
from torch.autograd import Function
from torch.autograd import Variable
from ._cuda_ext import grid_pooling_forward, grid_pooling_backward
class GridPoolingFunction(Function):
""" Perform max-pooling in every cell over the point features
see ../src/extension.cpp
../src/grid_pooling_kernel.cu
for more details
"""
@staticmethod
def forward(ctx, feat_points, points, grid_shape):
feat_points = feat_points.contiguous()
points = points.contiguous()
W = grid_shape[0]
H = grid_shape[1]
D = grid_shape[2]
C = feat_points.size()[1]
grid_shape = grid_shape.cpu().contiguous()
feat_cells = torch.zeros((W*H*D, C), dtype=torch.float32, device='cuda')
indices = -1 * torch.ones((W*H*D, C), dtype=torch.int32, device='cuda')
grid_pooling_forward(points, feat_points, grid_shape, feat_cells, indices)
# save for back-propagation
ctx.save_for_backward(indices, grid_shape)
# save number of points and feature dimension for back-propagation
ctx.N = points.size()[0]
ctx.C = C
return feat_cells
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
indices, grid_shape = ctx.saved_tensors
N, C = ctx.N, ctx.C
grad_points = torch.zeros((N, C), dtype=torch.float32, device='cuda')
grid_pooling_backward(grad_output, grid_shape, indices, grad_points)
# we only need gradient on feat_points
return grad_points, None, None
class GridPooling(nn.Module):
"""
Module for Grid Pooling from Points with features to gird cells with features
Init
----------
args1: gridshape [3]
Forward
----------
arg1 : tensor
point features [N x F]
arg1 : tensor
point locations [N x 3]
Returns
-------
tensor
Feature grid [W*H*D x F]
"""
def __init__(self, gridshape):
super(GridPooling, self).__init__()
self.gridshape = gridshape
def forward(self, features, points):
return GridPoolingFunction.apply(features, points, self.gridshape)
| 2,244 | 27.782051 | 82 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/point_triangle_distance.py | import torch
import math
from torch import nn
from torch.autograd import Function
from torch.autograd import Variable
from im2mesh.dmc.ops.table import get_connected_pairs
from ._cuda_ext import point_topology_distance_forward, point_topology_distance_backward
class PointTriangleDistanceFunction(Function):
@staticmethod
def forward(ctx, offset, points):
W = offset.size()[1]
H = offset.size()[2]
D = offset.size()[3]
# we only considered topologies with up to 3 triangles for calculating
# the distance loss function, the distance can be calculated in regardless
# of the normal vectors, therefore there are only 48 topologies to be
# considered
T = 48
distances_full = torch.zeros((W-1)*(H-1)*(D-1), T).cuda()
indices = -1 * torch.ones((points.size(0), T), dtype=torch.int32, device='cuda')
point_topology_distance_forward(
offset, points, distances_full, indices)
ctx.save_for_backward(offset, points, indices)
return distances_full
@staticmethod
def backward(ctx, grad_output):
offset, points, indices = ctx.saved_tensors
grad_offset = torch.zeros(offset.size(), device='cuda')
point_topology_distance_backward(
grad_output, offset, points, indices, grad_offset)
return grad_offset, None
class PointTriangleDistance(nn.Module):
"""
Module for deriving the Point to Triangle distance
(for each topology with up to 3 triangles)
Forward
----------
arg1 : tensor
offset variable [3 x W+1 x H+1 x D+1]
arg1 : tensor
points [N x 3]
Returns
-------
tensor
distance [W*H*D x T]
"""
def __init__(self):
super(PointTriangleDistance, self).__init__()
def forward(self, offset, points):
return PointTriangleDistanceFunction.apply(offset, points)
| 1,940 | 28.861538 | 88 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/curvature_constraint.py | import torch
import math
from torch import nn
from torch.autograd import Function
from torch.autograd import Variable
from im2mesh.dmc.ops.table import get_connected_pairs
from ._cuda_ext import curvature_constraint_forward, curvature_constraint_backward
######### TEST FAILS #########
# return connected pairs in x, y, z directions, inner cell pairs as well as a topolgy to triangles table
x, y, z, inner, topology_to_triangles = get_connected_pairs()
class CurvatureConstraintFunction(Function):
@staticmethod
def forward(ctx, offset, topology):
loss = torch.zeros(1, dtype=torch.float32, device='cuda')
loss = curvature_constraint_forward(
offset,
topology[:, torch.LongTensor(topology_to_triangles).cuda()],
torch.FloatTensor(x).cuda(),
torch.FloatTensor(y).cuda(),
torch.FloatTensor(z).cuda(),
torch.FloatTensor(inner).cuda())
ctx.save_for_backward(offset, topology)
return loss
@staticmethod
def backward(ctx, grad_output):
offset, topology = ctx.saved_tensors
grad_offset = torch.zeros(offset.size()).cuda()
curvature_constraint_backward(
grad_output,
offset,
topology[:, torch.LongTensor(topology_to_triangles).cuda()],
torch.FloatTensor(x).cuda(),
torch.FloatTensor(y).cuda(),
torch.FloatTensor(z).cuda(),
torch.FloatTensor(inner).cuda(),
grad_offset)
# Multiply with incoming gradient
grad_offset = grad_offset * grad_output
grad_topology = torch.zeros(topology.size()).cuda()
return grad_offset, grad_topology
class CurvatureConstraint(nn.Module):
"""
######### TEST FAILS #########
Module for deriving the Curvature loss of each cell given the offset variables
Forward
----------
arg1 : tensor
offset variables [3 x W+1 x H+1 x D+1]
arg2 : tensor
topology porbabilities [W*H*D x T]
Returns
-------
tensor
curvature loss 1
"""
def __init__(self):
super(CurvatureConstraint, self).__init__()
def forward(self, off, topo):
return CurvatureConstraintFunction.apply(off, topo)
| 2,271 | 29.702703 | 104 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/tests/test_distance.py |
import sys
sys.path.append('../../../..')
import torch
import torch.nn as nn
from torch.autograd import Variable
import time
import numpy as np
import resource
from im2mesh.dmc.ops.tests.loss_autograd import LossAutoGrad
from im2mesh.dmc.ops.point_triangle_distance import PointTriangleDistance
print("Testing CUDA extension...")
dtype = torch.cuda.FloatTensor
dtype_long = torch.cuda.LongTensor
num_cells = 4
# autograd loss
loss_autograd = LossAutoGrad(num_cells, 1.0)
multiGrids = PointTriangleDistance()
if __name__ == '__main__':
print("=========== Input =============")
point = Variable(torch.rand(10, 3).view(-1,3).type(dtype) * 0.9) * num_cells
offset = Variable(torch.zeros(3, num_cells+1, num_cells+1, num_cells+1).type(dtype)*0.5, requires_grad=True)
print(point.shape)
print(offset.shape)
print("============= cuda extension ============")
# forward
tf_c = time.time()
distance = multiGrids.forward(offset, point)
tf_c = time.time() - tf_c
distance_np = distance.data.cpu().numpy()
print("cffi distance:")
print(distance_np.shape)
weight_rnd = Variable(torch.rand(distance.size()).type(dtype), requires_grad=False)
distance_sum = torch.sum(torch.mul(distance, weight_rnd))
# backward
tb_c = time.time()
grad = distance_sum.backward()
tb_c = time.time() - tb_c
offset_np = np.copy(offset.grad.data.cpu().numpy())
print("cffi grad:")
print(offset_np.shape)
print("============= auto ============")
# forward
tf_py = time.time()
distance_auto = loss_autograd.loss_point_to_mesh_distance_autograd(offset, point)
tf_py = time.time()-tf_py
distance_auto_np = distance_auto.data.cpu().numpy()
print("auto distance:")
print(distance_auto_np.shape)
weight_rnd = Variable(weight_rnd.data)
distance_sum_auto = torch.sum(torch.mul(distance_auto, weight_rnd))
# backward
offset.grad.data.zero_()
tb_py = time.time()
distance_sum_auto.backward()
tb_py = time.time() - tb_py
print("auto grad: ")
offset_auto_np = np.copy(offset.grad.data.cpu().numpy())
print(offset_auto_np.shape)
print("========== summary ===========")
print("Forward difference between cffi and auto: "+str(np.sum(np.abs(distance_np[:,:-1]-distance_auto_np[:,:-1]))))
print("Backward difference between cffi and auto: "+str(np.sum(np.abs(offset_np-offset_auto_np)))) | 2,446 | 30.371795 | 119 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/tests/loss_autograd.py | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
#import settings
from im2mesh.dmc.utils.util import (
offset_to_normal, offset_to_vertices, pts_in_cell, dis_to_meshs)
from im2mesh.dmc.ops.table import (
get_connected_pairs, get_accept_topology)
import scipy.ndimage
class LossAutoGrad(object):
"""Implement the loss functions using pytorch,
used in cffi/test/ for gradient checking
"""
def __init__(self, num_cells, len_cell):
self.len_cell = len_cell
self.num_cells = num_cells
self.dtype = torch.cuda.FloatTensor
self.dtype_long = torch.cuda.LongTensor
self.x_grids = np.arange(0, num_cells+1, len_cell)
self.y_grids = np.arange(0, num_cells+1, len_cell)
self.z_grids = np.arange(0, num_cells+1, len_cell)
self.xv_value, self.yv_value, self.zv_value = np.meshgrid(self.x_grids[:-1], self.y_grids[:-1], self.z_grids[:-1], indexing='ij')
self.xv_value = self.xv_value.flatten()
self.yv_value = self.yv_value.flatten()
self.zv_value = self.zv_value.flatten()
connected_x, connected_y, connected_z, connected_inner, topology_to_triangle = get_connected_pairs()
self.nonzero_connection = np.sum(connected_x) + np.sum(connected_y) + np.sum(connected_z)
self.connected_x = connected_x
self.connected_y = connected_y
self.connected_z = connected_z
self.connected_inner = connected_inner
self.topology_to_triangle = topology_to_triangle
self.acceptTopology = torch.LongTensor(get_accept_topology())
self.acceptTopology = self.acceptTopology.cuda()
flip_indices = torch.arange(self.acceptTopology.size()[0]-1, -1, -1).type(self.dtype_long)
self.acceptTopologyWithFlip = torch.cat([self.acceptTopology, 255-self.acceptTopology[flip_indices]], dim=0)
# note we consider the topology with 4 triangles only for visualizing
# will be fixed in the future
self.visTopology = torch.LongTensor(get_accept_topology(4))
self.visTopology = self.visTopology.cuda()
def loss_point_to_mesh_distance_autograd(self, offset, point, phase='train'):
""" Compute the point-to-mesh distance using pytorch,
implemented for gradient check of the c/c++ extensions
"""
dis_empty = Variable(torch.ones(48).type(self.dtype) * 0.4)
dis_empty[-1].item = 0.0
distance_auto = []
for i_,(x_,y_,z_) in enumerate(zip(self.xv_value, self.yv_value, self.zv_value)):
pts_cell = pts_in_cell(torch.unsqueeze(point, 0), [x_,y_,z_,
x_+self.len_cell,y_+self.len_cell,z_+self.len_cell])
if len(pts_cell)==0:
dis = dis_empty
mdis, mind = torch.min(dis, 0)
mind = mind.item
else:
vertices = offset_to_vertices(offset,
np.where(self.x_grids == x_)[0][0],
np.where(self.y_grids == y_)[0][0],
np.where(self.z_grids == z_)[0][0])
dis = dis_to_meshs(torch.unsqueeze(point, 0), pts_cell, vertices, x_, y_, z_)
distance_auto.append(torch.unsqueeze(dis, 1))
distance_auto = torch.t(torch.cat(distance_auto, dim=1))
return distance_auto
def loss_on_curvature_autograd(self, offset, topology):
""" Compute the curvature loss using pytorch,
implemented for gradient check of the c/c++ extensions
"""
loss = 0
connected_x = self.connected_x
connected_y = self.connected_y
connected_z = self.connected_z
connected_inner = self.connected_inner
topology_to_triangle = self.topology_to_triangle
for i_,(x_,y_,z_) in enumerate(zip(self.xv_value, self.yv_value, self.zv_value)):
# x direction
if x_ != self.x_grids[-2]:
# similarity constraint matrix
# create new Variable from the data to avoid gradients on the topology
# as the topology is only taken as a constant weight matrix
p1 = Variable((F.softmax(topology[i_,:], dim=0).data).type(self.dtype), requires_grad=True)
p2 = Variable((F.softmax(topology[i_+self.num_cells*self.num_cells,:], dim=0).data).type(self.dtype), requires_grad=True)
# expand the topology probability to triangle probability
p1 = p1[torch.LongTensor(topology_to_triangle).type(self.dtype_long)]
p2 = p2[torch.LongTensor(topology_to_triangle).type(self.dtype_long)]
p_outer = torch.ger(p1, p2)
W = torch.mul(p_outer, Variable(self.dtype(connected_x)))
D1 = torch.diag(torch.sum(W, dim=1).view(-1))
D2 = torch.diag(torch.sum(W, dim=0).view(-1))
# get normal vector of triangles
norm1 = offset_to_normal(offset, np.where(self.x_grids==x_)[0][0], np.where(self.y_grids==y_)[0][0], np.where(self.z_grids==z_)[0][0], 0)
norm2 = offset_to_normal(offset, np.where(self.x_grids==x_+1)[0][0], np.where(self.y_grids==y_)[0][0], np.where(self.z_grids==z_)[0][0], 1)
# normalize normal vectors
norm1 = torch.div(norm1, torch.norm(norm1, 2, 1).unsqueeze(1).expand_as(norm1))
norm2 = torch.div(norm2, torch.norm(norm2, 2, 1).unsqueeze(1).expand_as(norm2))
# loss from matrix
tmp3 = torch.mm(torch.mm(norm1.transpose(0,1), W ), norm2)
loss1 = torch.sum(W)*2 - torch.trace(tmp3)*2
loss += loss1
# y direction
if y_ != self.y_grids[-2]:
# similarity constraint matrix
# create new Variable from the data to avoid gradients on the topology
# as the topology is only taken as a constant weight matrix
p1 = Variable((F.softmax(topology[i_,:], dim=0).data).type(self.dtype), requires_grad=True)
p2 = Variable((F.softmax(topology[i_+self.num_cells,:], dim=0).data).type(self.dtype), requires_grad=True)
# expand the topology probability to triangle probability
p1 = p1[torch.LongTensor(topology_to_triangle).type(self.dtype_long)]
p2 = p2[torch.LongTensor(topology_to_triangle).type(self.dtype_long)]
p_outer = torch.ger(p1, p2)
W = torch.mul(p_outer, Variable(self.dtype(connected_y)))
D1 = torch.diag(torch.sum(W, dim=1).view(-1))
D2 = torch.diag(torch.sum(W, dim=0).view(-1))
# get normal vector of triangles
norm1 = offset_to_normal(offset, np.where(self.x_grids==x_)[0][0], np.where(self.y_grids==y_)[0][0], np.where(self.z_grids==z_)[0][0], 2)
norm2 = offset_to_normal(offset, np.where(self.x_grids==x_)[0][0], np.where(self.y_grids==y_+1)[0][0], np.where(self.z_grids==z_)[0][0], 3)
# normalize normal vectors
norm1 = torch.div(norm1, torch.norm(norm1, 2, 1).unsqueeze(1).expand_as(norm1))
norm2 = torch.div(norm2, torch.norm(norm2, 2, 1).unsqueeze(1).expand_as(norm2))
# loss from matrix
tmp3 = torch.mm(torch.mm(norm1.transpose(0,1), W ), norm2)
loss1 = torch.sum(W)*2 - torch.trace(tmp3)*2
loss += loss1
# z direction
if z_ != self.z_grids[-2]:
# similarity constraint matrix
# create new Variable from the data to avoid gradients on the topology
# as the topology is only taken as a constant weight matrix
p1 = Variable((F.softmax(topology[i_,:], dim=0).data).type(self.dtype), requires_grad=True)
p2 = Variable((F.softmax(topology[i_+1,:], dim=0).data).type(self.dtype), requires_grad=True)
# expand the topology probability to triangle probability
p1 = p1[torch.LongTensor(topology_to_triangle).type(self.dtype_long)]
p2 = p2[torch.LongTensor(topology_to_triangle).type(self.dtype_long)]
p_outer = torch.ger(p1, p2)
W = torch.mul(p_outer, Variable(self.dtype(connected_z)))
D1 = torch.diag(torch.sum(W, dim=1).view(-1))
D2 = torch.diag(torch.sum(W, dim=0).view(-1))
# get normal vector of triangles
norm1 = offset_to_normal(offset, np.where(self.x_grids==x_)[0][0], np.where(self.y_grids==y_)[0][0], np.where(self.z_grids==z_)[0][0], 4)
norm2 = offset_to_normal(offset, np.where(self.x_grids==x_)[0][0], np.where(self.y_grids==y_)[0][0], np.where(self.z_grids==z_+1)[0][0], 5)
# normalize normal vectors
norm1 = torch.div(norm1, torch.norm(norm1, 2, 1).unsqueeze(1).expand_as(norm1))
norm2 = torch.div(norm2, torch.norm(norm2, 2, 1).unsqueeze(1).expand_as(norm2))
# loss from matrix
tmp3 = torch.mm(torch.mm(norm1.transpose(0,1), W ), norm2)
loss1 = torch.sum(W)*2 - torch.trace(tmp3)*2
loss += loss1
# inner cell
# similarity constraint matrix
# create new Variable from the data to avoid gradients on the topology
# as the topology is only taken as a constant weight matrix
p1 = Variable((F.softmax(topology[i_,:], dim=0).data).type(self.dtype), requires_grad=True)
# expand the topology probability to triangle probability
p1 = p1[torch.LongTensor(topology_to_triangle).type(self.dtype_long)]
p_outer = torch.ger(p1, p1)
W = torch.mul(p_outer, Variable(self.dtype(connected_inner)))
D1 = torch.diag(torch.sum(W, dim=1).view(-1))
D2 = torch.diag(torch.sum(W, dim=0).view(-1))
# get normal vector of triangles
norm1 = offset_to_normal(offset, np.where(self.x_grids==x_)[0][0], np.where(self.y_grids==y_)[0][0], np.where(self.z_grids==z_)[0][0], 6)
# normalize normal vectors
norm1 = torch.div(norm1, torch.norm(norm1, 2, 1).unsqueeze(1).expand_as(norm1))
# loss from matrix
tmp3 = torch.mm(torch.mm(norm1.transpose(0,1), W ), norm1)
loss1 = torch.sum(W)*2 - torch.trace(tmp3)*2
loss += loss1
return loss
def loss_on_smoothness_autograd(self, occupancy):
""" Compute the smoothness loss using pytorch,
implemented for gradient check of the c/c++ extensions
"""
W=occupancy.size()[0]
H=occupancy.size()[1]
D=occupancy.size()[2]
loss = 0
for x_ in range(W):
for y_ in range(H):
for z_ in range(D):
# horizontal direction
if x_<W-1:
# l1 loss
loss += torch.abs(occupancy[x_, y_, z_]-occupancy[x_+1,y_,z_])
# vertical direction
if y_<H-1:
# l1 loss
loss += torch.abs(occupancy[x_, y_, z_]-occupancy[x_,y_+1,z_])
if z_<D-1:
# l1 loss
loss += torch.abs(occupancy[x_, y_, z_]-occupancy[x_,y_,z_+1])
return loss
| 11,473 | 47.210084 | 155 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/tests/test_gridpooling.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.autograd import Function
import time
import numpy as np
import sys
sys.path.append('../../../..')
from im2mesh.dmc.utils.util import pts_in_cell
from torch.autograd import gradcheck
from im2mesh.dmc.ops.grid_pooling import GridPoolingFunction
np.set_printoptions(threshold='nan')
# check the cuda extension or c extension
print( "Testing CUDA extension...")
dtype = torch.cuda.FloatTensor
dtype_long = torch.cuda.LongTensor
#W = 15
#H = 15
#D = 15
N = 100
C = 8
num_cells = 4
len_cell = 1.0
W = H = D = num_cells
x_grids = np.arange(0, num_cells+1, len_cell)
y_grids = np.arange(0, num_cells+1, len_cell)
z_grids = np.arange(0, num_cells+1, len_cell)
# perform maxpool on points in every cell
# set zero vector if cell is empty
def grid_pooling_auto(pts, feat):
xv_value, yv_value, zv_value = np.meshgrid(x_grids[:-1], y_grids[:-1], z_grids[:-1], indexing='ij')
xv_value = xv_value.flatten()
yv_value = yv_value.flatten()
zv_value = zv_value.flatten()
feat_cell = Variable(torch.zeros((len(x_grids)-1) * (len(y_grids)-1) * (len(z_grids)-1), C).type(dtype))
#for k in range(batchsize):
for i_,(x_,y_,z_) in enumerate(zip(xv_value, yv_value, zv_value)):
pts_index = pts_in_cell(pts.unsqueeze(0),[x_,y_,z_,
x_+len_cell, y_+len_cell, z_+len_cell])
if len(pts_index)>0:
pts_index = torch.LongTensor(pts_index).type(dtype_long)
#pts_feat = feat.index_select(0, pts_index)
pts_feat = feat[pts_index,:]
# max pooling
#pts_feat,_ = torch.max(pts_feat, 0)
m = nn.MaxPool1d(len(pts_index))
pts_feat = m(pts_feat.t().unsqueeze(0))
feat_cell[i_, :] = pts_feat.squeeze()
return feat_cell
#class GridPooling(Function):
# def forward(self, points, feat_points):
# feat_cells = torch.zeros(W*H*D, C).type(dtype)
# indices = -1 * torch.ones(W*H*D, C).type(dtype_long)
# shape = torch.LongTensor([W, H, D]).type(dtype_long)
# forward_utils.grid_pooling_forward(points, feat_points, shape, feat_cells, indices)
# self.saved_indices = indices
# return feat_cells
#
# def backward(self, grad_output):
# grad_points = torch.zeros(N, C).type(torch.FloatTensor)
# forward_utils.grid_pooling_backward( grad_output, self.saved_indices, grad_points)
# return None, grad_points
if __name__ == '__main__':
points = Variable(torch.rand(N, 3).view(-1,3).type(dtype), requires_grad=False) * 5.0
feat_points = Variable(torch.rand(N, C).type(dtype), requires_grad=True)
rnd_weights = Variable(torch.rand(W*H*D, C).type(dtype))
shape = Variable(torch.LongTensor([W, H, D]))
print( "=========== Input =============")
print( points)
print (feat_points)
print( "============= cffi ============")
# forward
feat_cells = GridPoolingFunction.apply(feat_points, points, shape)
tf_c = time.time()
feat_cells = GridPoolingFunction.apply(feat_points, points, shape)
tf_c = time.time() - tf_c
print ("cffi forward time: ", tf_c
)
# backward
tb_c = time.time()
torch.sum( torch.mul(feat_cells, rnd_weights) ).backward()
tb_c = time.time() - tb_c
print ("cffi backward time: ", tb_c)
grad_np = np.copy(feat_points.grad.data.cpu().numpy())
# print (grad_np)
print( "============= auto ============")
# forward
tf_py = time.time()
feat_cells_auto = grid_pooling_auto(points, feat_points)
tf_py = time.time()-tf_py
print ("auto forward time: ", tf_py)
# backward
feat_points.grad.data.zero_()
tb_py = time.time()
torch.sum(torch.mul(feat_cells_auto, rnd_weights)).backward()
tb_py = time.time()-tb_py
print( "auto backward time: ", tf_py)
grad_auto_np = np.copy(feat_points.grad.data.cpu().numpy())
# print (grad_auto_np)
print ("========== summary ===========")
print ("Forward difference between cffi and auto: ", np.sum(np.abs(feat_cells.data.cpu().numpy()-feat_cells_auto.data.cpu().numpy())))
print ("Backward difference between cffi and auto: ", np.sum(np.abs(grad_np-grad_auto_np)))
print ("cffi forward time: %f, backward time: %f, full time: %f " % (tf_c, tb_c, tf_c+tb_c))
print ("auto forward time: %f, backward time: %f, full time: %f " % (tf_py, tb_py, tf_py+tb_py))
print ("ratio: ", (tf_py+tb_py)/(tf_c + tb_c))
| 4,507 | 32.894737 | 138 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/tests/test_curvature.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
sys.path.append('../../../..')
from im2mesh.dmc.ops.tests.loss_autograd import LossAutoGrad
from im2mesh.dmc.ops.curvature_constraint import CurvatureConstraint
import torch.nn.functional as F
import numpy as np
import time
# check the cuda extension or c extension
print ("Testing CUDA extension...")
dtype = torch.cuda.FloatTensor
# autograd loss
num_cells = 4
len_cell = 1.0
W = H = D = num_cells
loss_autograd = LossAutoGrad(num_cells, len_cell)
# cffi loss
class SmoothLoss(nn.Module):
def __init__(self):
super(SmoothLoss, self).__init__()
self.smoothLoss = CurvatureConstraint()
def forward(self, offset, topology):
return self.smoothLoss(offset, topology)
if __name__ == '__main__':
# generate offset and topology with relatively low-dimension
print ("=========== Input =============")
T = 96
W = num_cells
H = num_cells
D = num_cells
offset = Variable((torch.rand(3, W+1, H+1, D+1)).type(dtype) * 0.1, requires_grad=True)
topology = Variable(torch.rand(W*H*D, T).type(dtype), requires_grad=True)
#print (offset)
#print (topology)
loss_cffi = SmoothLoss()
l = loss_cffi(offset, F.softmax(topology, dim=1))
l.backward()
offset.grad.data.zero_()
# evaluating the running time of the cffi extension
print ("============= cffi ============")
tf_c = time.time()
l = loss_cffi(offset, F.softmax(topology, dim=1))
print ("cffi loss:")
print (l)
tf_c = time.time()-tf_c
tb_c = time.time()
l.backward()
print ("cffi gradient:")
print( offset.grad)
tb_c = time.time()-tb_c
grad_np = np.copy(offset.grad.data.cpu().numpy())
# evaluating the running time of the autograd version
print ("============= auto ============")
tf_py = time.time()
l_auto = loss_autograd.loss_on_curvature_autograd(offset, topology)
print ("auto loss:")
print (l_auto)
tf_py = time.time()-tf_py
offset.grad.data.zero_()
tb_py = time.time()
l_auto.backward()
print ("auto grad:")
print (offset.grad)
tb_py = time.time()-tb_py
grad_auto_np = np.copy(offset.grad.data.cpu().numpy())
assert np.sum(np.abs(grad_auto_np)) and np.sum(np.abs(grad_np)) != 0.0
# print the loss and grad difference and the time comparison
print ("========== summary ===========")
print ("Forward difference between cffi and auto: ", (l-l_auto).data.cpu().numpy())
print ("Backward difference between cffi and auto: ", np.sum(np.abs(grad_np-grad_auto_np)))
print ("Backward difference between cffi and auto: ", np.mean(np.abs(grad_np-grad_auto_np)))
print ("cffi forward time: %f, backward time: %f, full time: %f " % (tf_c, tb_c, tf_c+tb_c))
print ("auto forward time: %f, backward time: %f, full time: %f " % (tf_py, tb_py, tf_py+tb_py))
print ("ratio: ", (tf_py+tb_py)/(tf_c + tb_c)) | 2,983 | 30.083333 | 100 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/tests/test_occupancy_connectivity_yiyi.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
sys.path.append('../../../..')
import time
import numpy as np
from .loss import Loss
from .loss_autograd import LossAutoGrad
# check the cuda extension or c extension
print "Testing CUDA extension..."
dtype = torch.cuda.FloatTensor
# auto loss
loss_auto = LossAutoGrad(args)
def loss_on_smoothness(occupancy):
"""Compute the smoothness loss defined between neighboring occupancy
variables
"""
weight_smoothness = 3.0
loss = (
self.occupancyConnectivity(occupancy) / (self.num_cells**3)
* self.weight_smoothness
)
return loss
if __name__ == '__main__':
W = H = D = args.num_cells
occupancy = Variable(torch.rand(W+1, H+1, D+1).type(dtype), requires_grad=True)
rnd_weights = Variable(torch.rand(W*H*D, 48).type(dtype))
print "=========== Input ============="
print occupancy
print "============= cffi ============"
# forward
loss = 0.1*loss_on_smoothness(occupancy)*args.num_cells**3
tf_c = time.time()
loss = 0.1*loss_on_smoothness(occupancy)*args.num_cells**3
tf_c = time.time() - tf_c
print "cffi forward time: ", tf_c
print loss
# backward
tb_c = time.time()
loss.backward()
tb_c = time.time() - tb_c
print "cffi backward time: ", tb_c
grad_np = np.copy(occupancy.grad.data.cpu().numpy())
print grad_np
print "============= auto ============"
occupancy = Variable(occupancy.data.cpu(), requires_grad=True)
rnd_weights = Variable(rnd_weights.data.cpu())
# forward
tf_py = time.time()
loss_auto = 0.1*loss_auto.loss_on_smoothness_autograd(occupancy)
tf_py = time.time()-tf_py
print "auto forward time: ", tf_py
print loss_auto
# backward
#occupancy.grad.data.zero_()
tb_py = time.time()
loss_auto.backward()
tb_py = time.time()-tb_py
print "auto backward time: ", tf_py
grad_auto_np = np.copy(occupancy.grad.data.cpu().numpy())
print grad_auto_np
assert grad_auto_np and grad_np == 0.0
print "========== summary ==========="
print "Forward difference between cffi and auto: ", np.sum(np.abs(loss.data.cpu().numpy()-loss_auto.data.cpu().numpy()))
print "Backward difference between cffi and auto: ", np.sum(np.abs(grad_np-grad_auto_np))
print "cffi forward time: %f, backward time: %f, full time: %f " % (tf_c, tb_c, tf_c+tb_c)
print "auto forward time: %f, backward time: %f, full time: %f " % (tf_py, tb_py, tf_py+tb_py)
print "ratio: ", (tf_py+tb_py)/(tf_c + tb_c) | 2,586 | 29.797619 | 124 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/tests/test_occupancy_connectivity.py |
import sys
sys.path.append('../../../..')
import torch
import torch.nn as nn
from torch.autograd import Variable
import time
import numpy as np
from im2mesh.dmc.ops.occupancy_connectivity import OccupancyConnectivity
#from loss import Loss
#from loss_autograd import LossAutoGrad
#from parse_args import parse_args
# check the cuda extension or c extension
def loss_on_smoothness_autograd( occupancy):
""" Compute the smoothness loss using pytorch,
implemented for gradient check of the c/c++ extensions
"""
Wo=occupancy.size()[0]
Ho=occupancy.size()[1]
Do=occupancy.size()[2]
loss = 0
for x_ in range(Wo):
for y_ in range(Ho):
for z_ in range(Do):
# horizontal direction
if x_<Wo-1:
# l1 loss
loss += torch.abs(occupancy[x_, y_, z_]-occupancy[x_+1,y_,z_])
# vertical direction
if y_<Ho-1:
# l1 loss
loss += torch.abs(occupancy[x_, y_, z_]-occupancy[x_,y_+1,z_])
if z_<Do-1:
# l1 loss
loss += torch.abs(occupancy[x_, y_, z_]-occupancy[x_,y_,z_+1])
return loss
W = H = D = 4
loss_mod = OccupancyConnectivity()
def loss_on_smoothness(occupancy):
"""Compute the smoothness loss defined between neighboring occupancy
variables
"""
return 1.0 *loss_mod.forward(occupancy)/ (W*H*D)
print("Testing CUDA extension...")
dtype = torch.cuda.FloatTensor
if __name__ == '__main__':
occupancy = Variable(torch.rand(W+1, H+1, D+1).type(dtype), requires_grad=True)
print("=========== Input =============")
print(occupancy.shape)
print("============= cffi ============")
# forward
tf_c = time.time()
loss = loss_on_smoothness(occupancy)*(W*H*D)
print(loss)
tf_c = (time.time() - tf_c)*1000
print("cffi forward time: {:.2} ms".format(tf_c))
# backward
tb_c = time.time()
loss.backward()
tb_c = (time.time() - tb_c)*1000
print("cffi backward time:{:.2} ms".format(tb_c))
grad_np = np.copy(occupancy.grad.data.cpu().numpy())
print("gra mean"+str(np.mean(np.abs(grad_np))))
print("============= auto ============")
occupancy.grad.data.zero_()
# forward
tf_py = time.time()
loss_auto = loss_on_smoothness_autograd(occupancy)
tf_py = (time.time()-tf_py)*1000
print("auto forward time:{:.2} ms".format(tf_py))
print(loss_auto)
# backward
tb_py = time.time()
loss_auto.backward()
tb_py = (time.time()-tb_py)*1000
print("auto backward time:{:.2} ms".format(tf_py))
grad_auto_np = np.copy(occupancy.grad.data.cpu().numpy())
assert np.sum(np.abs(grad_auto_np)) and np.sum(np.abs(grad_np)) != 0.0
print("gra mean"+str(np.mean(np.abs(grad_auto_np))))
print("Forward sum L1 pytroch vs cuda: {:.2} ".format(np.sum(np.abs(loss.detach().cpu().numpy()-loss_auto.detach().cpu().numpy()))))
print("Backward sum L1 pytroch vs cuda: {:.2} ".format(np.sum(np.abs(grad_np-grad_auto_np))))
| 3,106 | 27.245455 | 136 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/tests/test_occupancy_to_topology.py | import sys
sys.path.append('../../../..')
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import time
import torch.nn.functional as F
from im2mesh.dmc.ops.occupancy_to_topology import OccupancyToTopology
def get_occupancy_table():
"""Return binary occupancy status of 8 vertices for all 256 topology types"""
occTable = np.zeros((256, 8))
for x in range(256):
for v in range(8):
occTable[x, v] = int(x)&(pow(2,v))!=0
return occTable
# look-up-tables
acceptTopology = np.arange(256)
vertexTable=[ [0, 1, 0],
[1, 1, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 1],
[1, 1, 1],
[1, 0, 1],
[0, 0, 1] ]
occupancyTable=get_occupancy_table()
# check the cuda extension or c extension
dtype_gpu = torch.cuda.FloatTensor
dtype_cpu = torch.FloatTensor
# get (WxH)xT topology map from (W+1)x(Hx1) occupancy map
# note here T=14 because of the inside/outside distinction
def occupancy_to_topology(occ):
Wc = occ.size()[0]-1
Hc = occ.size()[1]-1
Dc = occ.size()[2]-1
T = len(acceptTopology)
topology = Variable(torch.zeros(Wc*Hc*Dc, T)).type(torch.FloatTensor)
#vertexTablee = torch.from_numpy(np.array(vertexTable)).cuda()
xv, yv, zv = np.meshgrid(range(Wc), range(Hc), range(Dc), indexing='ij')
xv = xv.flatten()
yv = yv.flatten()
zv = zv.flatten()
for i,j,k in zip(xv, yv, zv):
p_occ = []
for v in range(8):
p_occ.append( occ[i+vertexTable[v][0], j+vertexTable[v][1], k+vertexTable[v][2]] )
p_occ.append( 1 - occ[i+vertexTable[v][0], j+vertexTable[v][1], k+vertexTable[v][2]] )
for t in range(T):
topology_ind = acceptTopology[t]
p_accumu = 1
for v in range(8):
p_accumu = p_accumu*p_occ[ v*2 + int(occupancyTable[topology_ind][v]) ]
topology[i*H*D+j*D+k, t] = p_accumu
return topology
if __name__ == '__main__':
W = H = D = 4
T = 256
print("=========== Input =============")
occupancy = Variable(torch.rand(W+1, H+1, D+1).type(dtype_cpu), requires_grad=True)
rnd_weights = Variable(torch.rand(W*H*D, T).type(dtype_cpu))
print("Occupancy shape: "+str(occupancy.shape))
print("============= Normal Pytorch ============")
# forward
tf_c = time.time()
topo = occupancy_to_topology(occupancy)
tf_c_ = (time.time() - tf_c)*1000
print("normal forward time: {:.2} ms".format(tf_c_))
# backward
tb_py = time.time()
torch.sum(torch.mul(topo, rnd_weights)).backward()
tb_py = (time.time()-tb_py)*1000
print("auto backward time: {:.2} ms".format(tb_py))
grad_auto_np = np.copy(occupancy.grad.data.cpu().numpy())
print(grad_auto_np)
print("============= Cuda Extension ============")
#occupancy.grad.data.zero_()
occupancy2 = Variable(occupancy.data.cuda(), requires_grad=True)
#forward
#occ2topo_modul = OccupancyToTopology()
tf_c = time.time()
topology = OccupancyToTopology()(occupancy2)# occ2topo_modul.forward(occupancy)
tf_cf = (time.time() - tf_c)*1000
print("Cuda forward time: {:3.2} ms".format(tf_cf))
# backward
tb_c = time.time()
loss = torch.sum(torch.mul(topology, rnd_weights.cuda()))
loss.backward()
tb_cb = (time.time() - tb_c)*1000
print("Cuda backward time: {:3.2} ms".format(tb_cb))
grad_np = np.copy(occupancy2.grad.data.cpu().numpy())
print(grad_np)
print("============= Comparison Forward ============")
print("Topolgy shape: "+str(topology.shape))
print("Forward sum L1 pytroch vs cuda: {:.2} ".format(np.sum(np.abs(topology.data.cpu().numpy()-topo.data.cpu().numpy()))))
print("Backward sum L1 pytroch vs cuda: {:.2} ".format(np.sum(np.abs(grad_auto_np - grad_np))))
print("Forward cuda extension is {:.0} times faster".format((tf_c_/tf_cf)))
print("Backward cuda extension is {:.0} times faster".format((tb_py/tb_cb))) | 4,022 | 32.247934 | 127 | py |
Im2Hands | Im2Hands-main/im2mesh/dmc/ops/cpp_modules/setup.py | from setuptools import setup
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='pred2mesh',
ext_modules=[
CppExtension('pred2mesh', [
'pred_to_mesh_.cpp',
# 'commons.cpp'
]),
],
cmdclass={
'build_ext': BuildExtension
}) | 329 | 21 | 66 | py |
Im2Hands | Im2Hands-main/im2mesh/encoder/r2n2.py | import torch.nn as nn
# import torch.nn.functional as F
from im2mesh.common import normalize_imagenet
class SimpleConv(nn.Module):
''' 3D Recurrent Reconstruction Neural Network (3D-R2-N2) encoder network.
Args:
c_dim: output dimension
'''
def __init__(self, c_dim=1024):
super().__init__()
actvn = nn.LeakyReLU()
pooling = nn.MaxPool2d(2, padding=1)
self.convnet = nn.Sequential(
nn.Conv2d(3, 96, 7, padding=3),
pooling, actvn,
nn.Conv2d(96, 128, 3, padding=1),
pooling, actvn,
nn.Conv2d(128, 256, 3, padding=1),
pooling, actvn,
nn.Conv2d(256, 256, 3, padding=1),
pooling, actvn,
nn.Conv2d(256, 256, 3, padding=1),
pooling, actvn,
nn.Conv2d(256, 256, 3, padding=1),
pooling, actvn,
)
self.fc_out = nn.Linear(256*3*3, c_dim)
def forward(self, x):
batch_size = x.size(0)
net = normalize_imagenet(x)
net = self.convnet(net)
net = net.view(batch_size, 256*3*3)
out = self.fc_out(net)
return out
class Resnet(nn.Module):
''' 3D Recurrent Reconstruction Neural Network (3D-R2-N2) ResNet-based
encoder network.
It is the ResNet variant of the previous encoder.s
Args:
c_dim: output dimension
'''
def __init__(self, c_dim=1024):
super().__init__()
actvn = nn.LeakyReLU()
pooling = nn.MaxPool2d(2, padding=1)
self.convnet = nn.Sequential(
nn.Conv2d(3, 96, 7, padding=3),
actvn,
nn.Conv2d(96, 96, 3, padding=1),
actvn, pooling,
ResnetBlock(96, 128),
pooling,
ResnetBlock(128, 256),
pooling,
ResnetBlock(256, 256),
pooling,
ResnetBlock(256, 256),
pooling,
ResnetBlock(256, 256),
pooling,
)
self.fc_out = nn.Linear(256*3*3, c_dim)
def forward(self, x):
batch_size = x.size(0)
net = normalize_imagenet(x)
net = self.convnet(net)
net = net.view(batch_size, 256*3*3)
out = self.fc_out(net)
return out
class ResnetBlock(nn.Module):
''' ResNet block class.
Args:
f_in (int): input dimension
f_out (int): output dimension
'''
def __init__(self, f_in, f_out):
super().__init__()
actvn = nn.LeakyReLU()
self.convnet = nn.Sequential(
nn.Conv2d(f_in, f_out, 3, padding=1),
actvn,
nn.Conv2d(f_out, f_out, 3, padding=1),
actvn,
)
self.shortcut = nn.Conv2d(f_in, f_out, 1)
def forward(self, x):
out = self.convnet(x) + self.shortcut(x)
return out
| 2,857 | 25.220183 | 79 | py |
Im2Hands | Im2Hands-main/im2mesh/encoder/pointnet.py | import torch
import torch.nn as nn
from im2mesh.layers import ResnetBlockFC
def maxpool(x, dim=-1, keepdim=False):
out, _ = x.max(dim=dim, keepdim=keepdim)
return out
class SimplePointnet(nn.Module):
''' PointNet-based encoder network.
Args:
c_dim (int): dimension of latent code c
dim (int): input points dimension
hidden_dim (int): hidden dimension of the network
'''
def __init__(self, c_dim=128, dim=3, hidden_dim=128):
super().__init__()
self.c_dim = c_dim
self.fc_pos = nn.Linear(dim, 2*hidden_dim)
self.fc_0 = nn.Linear(2*hidden_dim, hidden_dim)
self.fc_1 = nn.Linear(2*hidden_dim, hidden_dim)
self.fc_2 = nn.Linear(2*hidden_dim, hidden_dim)
self.fc_3 = nn.Linear(2*hidden_dim, hidden_dim)
self.fc_c = nn.Linear(hidden_dim, c_dim)
self.actvn = nn.ReLU()
self.pool = maxpool
def forward(self, p):
batch_size, T, D = p.size()
# output size: B x T X F
net = self.fc_pos(p)
net = self.fc_0(self.actvn(net))
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.fc_1(self.actvn(net))
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.fc_2(self.actvn(net))
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.fc_3(self.actvn(net))
# Recude to B x F
net = self.pool(net, dim=1)
c = self.fc_c(self.actvn(net))
return c
class ResnetPointnet(nn.Module):
''' PointNet-based encoder network with ResNet blocks.
Args:
c_dim (int): dimension of latent code c
dim (int): input points dimension
hidden_dim (int): hidden dimension of the network
'''
def __init__(self, c_dim=128, dim=3, hidden_dim=128):
super().__init__()
self.c_dim = c_dim
self.fc_pos = nn.Linear(dim, 2*hidden_dim)
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.fc_c = nn.Linear(hidden_dim, c_dim)
self.actvn = nn.ReLU()
self.pool = maxpool
def forward(self, p):
batch_size, T, D = p.size()
# output size: B x T X F
net = self.fc_pos(p)
net = self.block_0(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_1(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_2(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_3(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_4(net)
# Recude to B x F
net = self.pool(net, dim=1)
c = self.fc_c(self.actvn(net))
return c
| 3,420 | 29.008772 | 71 | py |
Im2Hands | Im2Hands-main/im2mesh/encoder/conv.py | import torch.nn as nn
# import torch.nn.functional as F
from torchvision import models
from im2mesh.common import normalize_imagenet
class ConvEncoder(nn.Module):
r''' Simple convolutional encoder network.
It consists of 5 convolutional layers, each downsampling the input by a
factor of 2, and a final fully-connected layer projecting the output to
c_dim dimenions.
Args:
c_dim (int): output dimension of latent embedding
'''
def __init__(self, c_dim=128):
super().__init__()
self.conv0 = nn.Conv2d(3, 32, 3, stride=2)
self.conv1 = nn.Conv2d(32, 64, 3, stride=2)
self.conv2 = nn.Conv2d(64, 128, 3, stride=2)
self.conv3 = nn.Conv2d(128, 256, 3, stride=2)
self.conv4 = nn.Conv2d(256, 512, 3, stride=2)
self.fc_out = nn.Linear(512, c_dim)
self.actvn = nn.ReLU()
def forward(self, x):
batch_size = x.size(0)
net = self.conv0(x)
net = self.conv1(self.actvn(net))
net = self.conv2(self.actvn(net))
net = self.conv3(self.actvn(net))
net = self.conv4(self.actvn(net))
net = net.view(batch_size, 512, -1).mean(2)
out = self.fc_out(self.actvn(net))
return out
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
class Resnet34(nn.Module):
r''' ResNet-34 encoder network.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet34(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
class Resnet50(nn.Module):
r''' ResNet-50 encoder network.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet50(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(2048, c_dim)
elif c_dim == 2048:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 2048 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
class Resnet101(nn.Module):
r''' ResNet-101 encoder network.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet50(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(2048, c_dim)
elif c_dim == 2048:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 2048 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
| 5,021 | 30.78481 | 75 | py |
Im2Hands | Im2Hands-main/im2mesh/encoder/voxels.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class VoxelEncoder(nn.Module):
''' 3D-convolutional encoder network for voxel input.
Args:
dim (int): input dimension
c_dim (int): output dimension
'''
def __init__(self, dim=3, c_dim=128):
super().__init__()
self.actvn = F.relu
self.conv_in = nn.Conv3d(1, 32, 3, padding=1)
self.conv_0 = nn.Conv3d(32, 64, 3, padding=1, stride=2)
self.conv_1 = nn.Conv3d(64, 128, 3, padding=1, stride=2)
self.conv_2 = nn.Conv3d(128, 256, 3, padding=1, stride=2)
self.conv_3 = nn.Conv3d(256, 512, 3, padding=1, stride=2)
self.fc = nn.Linear(512 * 2 * 2 * 2, c_dim)
def forward(self, x):
batch_size = x.size(0)
x = x.unsqueeze(1)
net = self.conv_in(x)
net = self.conv_0(self.actvn(net))
net = self.conv_1(self.actvn(net))
net = self.conv_2(self.actvn(net))
net = self.conv_3(self.actvn(net))
hidden = net.view(batch_size, 512 * 2 * 2 * 2)
c = self.fc(self.actvn(hidden))
return c
class CoordVoxelEncoder(nn.Module):
''' 3D-convolutional encoder network for voxel input.
It additional concatenates the coordinate data.
Args:
dim (int): input dimension
c_dim (int): output dimension
'''
def __init__(self, dim=3, c_dim=128):
super().__init__()
self.actvn = F.relu
self.conv_in = nn.Conv3d(4, 32, 3, padding=1)
self.conv_0 = nn.Conv3d(32, 64, 3, padding=1, stride=2)
self.conv_1 = nn.Conv3d(64, 128, 3, padding=1, stride=2)
self.conv_2 = nn.Conv3d(128, 256, 3, padding=1, stride=2)
self.conv_3 = nn.Conv3d(256, 512, 3, padding=1, stride=2)
self.fc = nn.Linear(512 * 2 * 2 * 2, c_dim)
def forward(self, x):
batch_size = x.size(0)
device = x.device
coord1 = torch.linspace(-0.5, 0.5, x.size(1)).to(device)
coord2 = torch.linspace(-0.5, 0.5, x.size(2)).to(device)
coord3 = torch.linspace(-0.5, 0.5, x.size(3)).to(device)
coord1 = coord1.view(1, -1, 1, 1).expand_as(x)
coord2 = coord2.view(1, 1, -1, 1).expand_as(x)
coord3 = coord3.view(1, 1, 1, -1).expand_as(x)
coords = torch.stack([coord1, coord2, coord3], dim=1)
x = x.unsqueeze(1)
net = torch.cat([x, coords], dim=1)
net = self.conv_in(net)
net = self.conv_0(self.actvn(net))
net = self.conv_1(self.actvn(net))
net = self.conv_2(self.actvn(net))
net = self.conv_3(self.actvn(net))
hidden = net.view(batch_size, 512 * 2 * 2 * 2)
c = self.fc(self.actvn(hidden))
return c
| 2,726 | 29.3 | 65 | py |
Im2Hands | Im2Hands-main/im2mesh/encoder/psgn_cond.py | import torch.nn as nn
class PCGN_Cond(nn.Module):
r''' Point Set Generation Network encoding network.
The PSGN conditioning network from the original publication consists of
several 2D convolution layers. The intermediate outputs from some layers
are used as additional input to the encoder network, similar to U-Net.
Args:
c_dim (int): output dimension of the latent embedding
'''
def __init__(self, c_dim=512):
super().__init__()
actvn = nn.ReLU()
num_fm = int(c_dim/32)
self.conv_block1 = nn.Sequential(
nn.Conv2d(3, num_fm, 3, 1, 1), actvn,
nn.Conv2d(num_fm, num_fm, 3, 1, 1), actvn)
self.conv_block2 = nn.Sequential(
nn.Conv2d(num_fm, num_fm*2, 3, 2, 1), actvn,
nn.Conv2d(num_fm*2, num_fm*2, 3, 1, 1), actvn,
nn.Conv2d(num_fm*2, num_fm*2, 3, 1, 1), actvn)
self.conv_block3 = nn.Sequential(
nn.Conv2d(num_fm*2, num_fm*4, 3, 2, 1), actvn,
nn.Conv2d(num_fm*4, num_fm*4, 3, 1, 1), actvn,
nn.Conv2d(num_fm*4, num_fm*4, 3, 1, 1), actvn)
self.conv_block4 = nn.Sequential(
nn.Conv2d(num_fm*4, num_fm*8, 3, 2, 1), actvn,
nn.Conv2d(num_fm*8, num_fm*8, 3, 1, 1), actvn,
nn.Conv2d(num_fm*8, num_fm*8, 3, 1, 1), actvn)
self.conv_block5 = nn.Sequential(
nn.Conv2d(num_fm*8, num_fm*16, 3, 2, 1), actvn,
nn.Conv2d(num_fm*16, num_fm*16, 3, 1, 1), actvn,
nn.Conv2d(num_fm*16, num_fm*16, 3, 1, 1), actvn)
self.conv_block6 = nn.Sequential(
nn.Conv2d(num_fm*16, num_fm*32, 3, 2, 1), actvn,
nn.Conv2d(num_fm*32, num_fm*32, 3, 1, 1), actvn,
nn.Conv2d(num_fm*32, num_fm*32, 3, 1, 1), actvn,
nn.Conv2d(num_fm*32, num_fm*32, 3, 1, 1), actvn)
self.conv_block7 = nn.Sequential(
nn.Conv2d(num_fm*32, num_fm*32, 5, 2, 2), actvn)
self.trans_conv1 = nn.Conv2d(num_fm*8, num_fm*4, 3, 1, 1)
self.trans_conv2 = nn.Conv2d(num_fm*16, num_fm*8, 3, 1, 1)
self.trans_conv3 = nn.Conv2d(num_fm*32, num_fm*16, 3, 1, 1)
def forward(self, x, return_feature_maps=True):
r''' Performs a forward pass through the network.
Args:
x (tensor): input data
return_feature_maps (bool): whether intermediate feature maps
should be returned
'''
feature_maps = []
x = self.conv_block1(x)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = self.conv_block4(x)
feature_maps.append(self.trans_conv1(x))
x = self.conv_block5(x)
feature_maps.append(self.trans_conv2(x))
x = self.conv_block6(x)
feature_maps.append(self.trans_conv3(x))
x = self.conv_block7(x)
if return_feature_maps:
return x, feature_maps
return x
| 2,930 | 36.576923 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/encoder/pix2mesh_cond.py | import torch.nn as nn
class Pix2mesh_Cond(nn.Module):
r''' Conditioning Network proposed in the authors' Pixel2Mesh implementation.
The network consists of several 2D convolution layers, and several of the
intermediate feature maps are returned to features for the image
projection layer of the encoder network.
'''
def __init__(self, c_dim=512, return_feature_maps=True):
r''' Initialisation.
Args:
c_dim (int): channels of the final output
return_feature_maps (bool): whether intermediate feature maps
should be returned
'''
super().__init__()
actvn = nn.ReLU()
self.return_feature_maps = return_feature_maps
num_fm = int(c_dim/32)
if num_fm != 16:
raise ValueError('Pixel2Mesh requires a fixed c_dim of 512!')
self.block_1 = nn.Sequential(
nn.Conv2d(3, num_fm, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm, num_fm, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm, num_fm*2, 3, stride=2, padding=1), actvn,
nn.Conv2d(num_fm*2, num_fm*2, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm*2, num_fm*2, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm*2, num_fm*4, 3, stride=2, padding=1), actvn,
nn.Conv2d(num_fm*4, num_fm*4, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm*4, num_fm*4, 3, stride=1, padding=1), actvn)
self.block_2 = nn.Sequential(
nn.Conv2d(num_fm*4, num_fm*8, 3, stride=2, padding=1), actvn,
nn.Conv2d(num_fm*8, num_fm*8, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm*8, num_fm*8, 3, stride=1, padding=1), actvn)
self.block_3 = nn.Sequential(
nn.Conv2d(num_fm*8, num_fm*16, 5, stride=2, padding=2), actvn,
nn.Conv2d(num_fm*16, num_fm*16, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm*16, num_fm*16, 3, stride=1, padding=1), actvn)
self.block_4 = nn.Sequential(
nn.Conv2d(num_fm*16, num_fm*32, 5, stride=2, padding=2), actvn,
nn.Conv2d(num_fm*32, num_fm*32, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm*32, num_fm*32, 3, stride=1, padding=1), actvn,
nn.Conv2d(num_fm*32, num_fm*32, 3, stride=1, padding=1), actvn,
)
def forward(self, x):
# x has size 224 x 224
x_0 = self.block_1(x) # 64 x 56 x 56
x_1 = self.block_2(x_0) # 128 x 28 x 28
x_2 = self.block_3(x_1) # 256 x 14 x 14
x_3 = self.block_4(x_2) # 512 x 7 x 7
if self.return_feature_maps:
return x_0, x_1, x_2, x_3
return x_3
| 2,701 | 41.888889 | 81 | py |
Im2Hands | Im2Hands-main/im2mesh/onet/training.py | import os
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
from im2mesh.common import (
compute_iou, make_3d_grid
)
from im2mesh.utils import visualize as vis
from im2mesh.training import BaseTrainer
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, device=None, input_type='img',
vis_dir=None, threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
self.eval_sample = eval_sample
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
# Compute elbo
points = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
voxels_occ = data.get('voxels')
points_iou = data.get('points_iou').to(device)
occ_iou = data.get('points_iou.occ').to(device)
kwargs = {}
with torch.no_grad():
elbo, rec_error, kl = self.model.compute_elbo(
points, occ, inputs, **kwargs)
eval_dict['loss'] = -elbo.mean().item()
eval_dict['rec_error'] = rec_error.mean().item()
eval_dict['kl'] = kl.mean().item()
# Compute iou
batch_size = points.size(0)
with torch.no_grad():
p_out = self.model(points_iou, inputs,
sample=self.eval_sample, **kwargs)
occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
occ_iou_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
# Estimate voxel iou
if voxels_occ is not None:
voxels_occ = voxels_occ.to(device)
points_voxels = make_3d_grid(
(-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, (32,) * 3)
points_voxels = points_voxels.expand(
batch_size, *points_voxels.size())
points_voxels = points_voxels.to(device)
with torch.no_grad():
p_out = self.model(points_voxels, inputs,
sample=self.eval_sample, **kwargs)
voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
return eval_dict
def visualize(self, data):
''' Performs a visualization step for the data.
Args:
data (dict): data dictionary
'''
device = self.device
batch_size = data['points'].size(0)
inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)
shape = (32, 32, 32)
p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
p = p.expand(batch_size, *p.size())
kwargs = {}
with torch.no_grad():
p_r = self.model(p, inputs, sample=self.eval_sample, **kwargs)
occ_hat = p_r.probs.view(batch_size, *shape)
voxels_out = (occ_hat >= self.threshold).cpu().numpy()
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
vis.visualize_data(
inputs[i].cpu(), self.input_type, input_img_path)
vis.visualize_voxels(
voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
p = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(p.size(0), 0)).to(device)
kwargs = {}
c = self.model.encode_inputs(inputs)
q_z = self.model.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
# KL-divergence
kl = dist.kl_divergence(q_z, self.model.p0_z).sum(dim=-1)
loss = kl.mean()
# General points
logits = self.model.decode(p, z, c, **kwargs).logits
loss_i = F.binary_cross_entropy_with_logits(
logits, occ, reduction='none')
loss = loss + loss_i.sum(-1).mean()
return loss
| 5,479 | 30.494253 | 78 | py |
Im2Hands | Im2Hands-main/im2mesh/onet/config.py | import torch
import torch.distributions as dist
from torch import nn
import os
from im2mesh.encoder import encoder_dict
from im2mesh.onet import models, training, generation
from im2mesh import data
from im2mesh import config
def get_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
encoder_latent = cfg['model']['encoder_latent']
dim = cfg['data']['dim']
z_dim = cfg['model']['z_dim']
c_dim = cfg['model']['c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
encoder_kwargs = cfg['model']['encoder_kwargs']
encoder_latent_kwargs = cfg['model']['encoder_latent_kwargs']
decoder = models.decoder_dict[decoder](
dim=dim, z_dim=z_dim, c_dim=c_dim,
**decoder_kwargs
)
if z_dim != 0:
encoder_latent = models.encoder_latent_dict[encoder_latent](
dim=dim, z_dim=z_dim, c_dim=c_dim,
**encoder_latent_kwargs
)
else:
encoder_latent = None
if encoder == 'idx':
encoder = nn.Embedding(len(dataset), c_dim)
elif encoder is not None:
encoder = encoder_dict[encoder](
c_dim=c_dim,
**encoder_kwargs
)
else:
encoder = None
p0_z = get_prior_z(cfg, device)
model = models.OccupancyNetwork(
decoder, encoder, encoder_latent, p0_z, device=device
)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
trainer = training.Trainer(
model, optimizer,
device=device, input_type=input_type,
vis_dir=vis_dir, threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
preprocessor = config.get_preprocessor(cfg, device=device)
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
preprocessor=preprocessor,
)
return generator
def get_prior_z(cfg, device, **kwargs):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = cfg['model']['z_dim']
p0_z = dist.Normal(
torch.zeros(z_dim, device=device),
torch.ones(z_dim, device=device)
)
return p0_z
def get_data_fields(mode, cfg):
''' Returns the data fields.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
points_transform = data.SubsamplePoints(cfg['data']['points_subsample'])
with_transforms = cfg['model']['use_camera']
fields = {}
fields['points'] = data.PointsField(
cfg['data']['points_file'], points_transform,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if mode in ('val', 'test'):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if points_iou_file is not None:
fields['points_iou'] = data.PointsField(
points_iou_file,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if voxels_file is not None:
fields['voxels'] = data.VoxelsField(voxels_file)
return fields
| 4,466 | 28.006494 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/onet/generation.py | import torch
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange
import trimesh
from im2mesh.utils import libmcubes
from im2mesh.common import make_3d_grid
from im2mesh.utils.libsimplify import simplify_mesh
from im2mesh.utils.libmise import MISE
import time
class Generator3D(object):
''' Generator class for Occupancy Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
device (device): pytorch device
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
simplify_nfaces (int): number of faces the mesh should be simplified to
preprocessor (nn.Module): preprocessor for inputs
'''
def __init__(self, model, points_batch_size=100000,
threshold=0.5, refinement_step=0, device=None,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1, sample=False,
simplify_nfaces=None,
preprocessor=None):
self.model = model.to(device)
self.points_batch_size = points_batch_size
self.refinement_step = refinement_step
self.threshold = threshold
self.device = device
self.resolution0 = resolution0
self.upsampling_steps = upsampling_steps
self.with_normals = with_normals
self.padding = padding
self.sample = sample
self.simplify_nfaces = simplify_nfaces
self.preprocessor = preprocessor
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
stats_dict = {}
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
kwargs = {}
# Preprocess if requires
if self.preprocessor is not None:
t0 = time.time()
with torch.no_grad():
inputs = self.preprocessor(inputs)
stats_dict['time (preprocess)'] = time.time() - t0
# Encode inputs
t0 = time.time()
with torch.no_grad():
c = self.model.encode_inputs(inputs)
stats_dict['time (encode inputs)'] = time.time() - t0
z = self.model.get_z_from_prior((1,), sample=self.sample).to(device)
mesh = self.generate_from_latent(z, c, stats_dict=stats_dict, **kwargs)
if return_stats:
return mesh, stats_dict
else:
return mesh
def generate_from_latent(self, z, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Args:
z (tensor): latent code z
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, z, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = torch.FloatTensor(points).to(self.device)
# Normalize to bounding box
pointsf = pointsf / mesh_extractor.resolution
pointsf = box_size * (pointsf - 0.5)
# Evaluate model and update
values = self.eval_points(
pointsf, z, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
mesh = self.extract_mesh(value_grid, z, c, stats_dict=stats_dict)
return mesh
def eval_points(self, p, z, c=None, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
p_split = torch.split(p, self.points_batch_size)
occ_hats = []
for pi in p_split:
pi = pi.unsqueeze(0).to(self.device)
with torch.no_grad():
occ_hat = self.model.decode(pi, z, c, **kwargs).logits
occ_hats.append(occ_hat.squeeze(0).detach().cpu())
occ_hat = torch.cat(occ_hats, dim=0)
return occ_hat
def extract_mesh(self, occ_hat, z, c=None, stats_dict=dict()):
''' Extracts the mesh from the predicted occupancy grid.
Args:
occ_hat (tensor): value grid of occupancies
z (tensor): latent code z
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
# Some short hands
n_x, n_y, n_z = occ_hat.shape
box_size = 1 + self.padding
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
# Make sure that mesh is watertight
t0 = time.time()
occ_hat_padded = np.pad(
occ_hat, 1, 'constant', constant_values=-1e6)
vertices, triangles = libmcubes.marching_cubes(
occ_hat_padded, threshold)
stats_dict['time (marching cubes)'] = time.time() - t0
# Strange behaviour in libmcubes: vertices are shifted by 0.5
vertices -= 0.5
# Undo padding
vertices -= 1
# Normalize to bounding box
vertices /= np.array([n_x-1, n_y-1, n_z-1])
vertices = box_size * (vertices - 0.5)
# mesh_pymesh = pymesh.form_mesh(vertices, triangles)
# mesh_pymesh = fix_pymesh(mesh_pymesh)
# Estimate normals if needed
if self.with_normals and not vertices.shape[0] == 0:
t0 = time.time()
normals = self.estimate_normals(vertices, z, c)
stats_dict['time (normals)'] = time.time() - t0
else:
normals = None
# Create mesh
mesh = trimesh.Trimesh(vertices, triangles,
vertex_normals=normals,
process=False)
# Directly return if mesh is empty
if vertices.shape[0] == 0:
return mesh
# TODO: normals are lost here
if self.simplify_nfaces is not None:
t0 = time.time()
mesh = simplify_mesh(mesh, self.simplify_nfaces, 5.)
stats_dict['time (simplify)'] = time.time() - t0
# Refine mesh
if self.refinement_step > 0:
t0 = time.time()
self.refine_mesh(mesh, occ_hat, z, c)
stats_dict['time (refine)'] = time.time() - t0
return mesh
def estimate_normals(self, vertices, z, c=None):
''' Estimates the normals by computing the gradient of the objective.
Args:
vertices (numpy array): vertices of the mesh
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
device = self.device
vertices = torch.FloatTensor(vertices)
vertices_split = torch.split(vertices, self.points_batch_size)
normals = []
z, c = z.unsqueeze(0), c.unsqueeze(0)
for vi in vertices_split:
vi = vi.unsqueeze(0).to(device)
vi.requires_grad_()
occ_hat = self.model.decode(vi, z, c).logits
out = occ_hat.sum()
out.backward()
ni = -vi.grad
ni = ni / torch.norm(ni, dim=-1, keepdim=True)
ni = ni.squeeze(0).cpu().numpy()
normals.append(ni)
normals = np.concatenate(normals, axis=0)
return normals
def refine_mesh(self, mesh, occ_hat, z, c=None):
''' Refines the predicted mesh.
Args:
mesh (trimesh object): predicted mesh
occ_hat (tensor): predicted occupancy grid
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
self.model.eval()
# Some shorthands
n_x, n_y, n_z = occ_hat.shape
assert(n_x == n_y == n_z)
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Vertex parameter
v0 = torch.FloatTensor(mesh.vertices).to(self.device)
v = torch.nn.Parameter(v0.clone())
# Faces of mesh
faces = torch.LongTensor(mesh.faces).to(self.device)
# Start optimization
optimizer = optim.RMSprop([v], lr=1e-4)
for it_r in trange(self.refinement_step):
optimizer.zero_grad()
# Loss
face_vertex = v[faces]
eps = np.random.dirichlet((0.5, 0.5, 0.5), size=faces.shape[0])
eps = torch.FloatTensor(eps).to(self.device)
face_point = (face_vertex * eps[:, :, None]).sum(dim=1)
face_v1 = face_vertex[:, 1, :] - face_vertex[:, 0, :]
face_v2 = face_vertex[:, 2, :] - face_vertex[:, 1, :]
face_normal = torch.cross(face_v1, face_v2)
face_normal = face_normal / \
(face_normal.norm(dim=1, keepdim=True) + 1e-10)
face_value = torch.sigmoid(
self.model.decode(face_point.unsqueeze(0), z, c).logits
)
normal_target = -autograd.grad(
[face_value.sum()], [face_point], create_graph=True)[0]
normal_target = \
normal_target / \
(normal_target.norm(dim=1, keepdim=True) + 1e-10)
loss_target = (face_value - threshold).pow(2).mean()
loss_normal = \
(face_normal - normal_target).pow(2).sum(dim=1).mean()
loss = loss_target + 0.01 * loss_normal
# Update
loss.backward()
optimizer.step()
mesh.vertices = v.data.cpu().numpy()
return mesh
| 10,942 | 33.850318 | 79 | py |
Im2Hands | Im2Hands-main/im2mesh/onet/models/legacy.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from im2mesh.layers import ResnetBlockFC, AffineLayer
class VoxelDecoder(nn.Module):
def __init__(self, dim=3, z_dim=128, c_dim=128, hidden_size=128):
super().__init__()
self.c_dim = c_dim
self.z_dim = z_dim
# Submodules
self.actvn = F.relu
# 3D decoder
self.fc_in = nn.Linear(c_dim + z_dim, 256*4*4*4)
self.convtrp_0 = nn.ConvTranspose3d(256, 128, 3, stride=2,
padding=1, output_padding=1)
self.convtrp_1 = nn.ConvTranspose3d(128, 64, 3, stride=2,
padding=1, output_padding=1)
self.convtrp_2 = nn.ConvTranspose3d(64, 32, 3, stride=2,
padding=1, output_padding=1)
# Fully connected decoder
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_f = nn.Linear(32, hidden_size)
self.fc_c = nn.Linear(c_dim, hidden_size)
self.fc_p = nn.Linear(dim, hidden_size)
self.block0 = ResnetBlockFC(hidden_size, hidden_size)
self.block1 = ResnetBlockFC(hidden_size, hidden_size)
self.fc_out = nn.Linear(hidden_size, 1)
def forward(self, p, z, c, **kwargs):
batch_size = c.size(0)
if self.z_dim != 0:
net = torch.cat([z, c], dim=1)
else:
net = c
net = self.fc_in(net)
net = net.view(batch_size, 256, 4, 4, 4)
net = self.convtrp_0(self.actvn(net))
net = self.convtrp_1(self.actvn(net))
net = self.convtrp_2(self.actvn(net))
net = F.grid_sample(
net, 2*p.unsqueeze(1).unsqueeze(1), padding_mode='border')
net = net.squeeze(2).squeeze(2).transpose(1, 2)
net = self.fc_f(self.actvn(net))
net_p = self.fc_p(p)
net = net + net_p
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(1)
net = net + net_z
if self.c_dim != 0:
net_c = self.fc_c(c).unsqueeze(1)
net = net + net_c
net = self.block0(net)
net = self.block1(net)
out = self.fc_out(self.actvn(net))
out = out.squeeze(-1)
return out
class FeatureDecoder(nn.Module):
def __init__(self, dim=3, z_dim=128, c_dim=128, hidden_size=256):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.dim = dim
self.actvn = nn.ReLU()
self.affine = AffineLayer(c_dim, dim)
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p1 = nn.Linear(dim, hidden_size)
self.fc_p2 = nn.Linear(dim, hidden_size)
self.fc_c1 = nn.Linear(c_dim, hidden_size)
self.fc_c2 = nn.Linear(c_dim, hidden_size)
self.block0 = ResnetBlockFC(hidden_size, hidden_size)
self.block1 = ResnetBlockFC(hidden_size, hidden_size)
self.block2 = ResnetBlockFC(hidden_size, hidden_size)
self.block3 = ResnetBlockFC(hidden_size, hidden_size)
self.fc_out = nn.Linear(hidden_size, 1)
def forward(self, p, z, c, **kwargs):
batch_size, T, D = p.size()
c1 = c.view(batch_size, self.c_dim, -1).max(dim=2)[0]
Ap = self.affine(c1, p)
Ap2 = Ap[:, :, :2] / (Ap[:, :, 2:].abs() + 1e-5)
c2 = F.grid_sample(c, 2*Ap2.unsqueeze(1), padding_mode='border')
c2 = c2.squeeze(2).transpose(1, 2)
net = self.fc_p1(p) + self.fc_p2(Ap)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(1)
net = net + net_z
net_c = self.fc_c2(c2) + self.fc_c1(c1).unsqueeze(1)
net = net + net_c
net = self.block0(net)
net = self.block1(net)
net = self.block2(net)
net = self.block3(net)
out = self.fc_out(self.actvn(net))
out = out.squeeze(-1)
return out | 4,001 | 31.016 | 72 | py |
Im2Hands | Im2Hands-main/im2mesh/onet/models/encoder_latent.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# Max Pooling operation
def maxpool(x, dim=-1, keepdim=False):
out, _ = x.max(dim=dim, keepdim=keepdim)
return out
class Encoder(nn.Module):
''' Latent encoder class.
It encodes the input points and returns mean and standard deviation for the
posterior Gaussian distribution.
Args:
z_dim (int): dimension if output code z
c_dim (int): dimension of latent conditioned code c
dim (int): input dimension
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, z_dim=128, c_dim=128, dim=3, leaky=False):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
# Submodules
self.fc_pos = nn.Linear(dim, 128)
if c_dim != 0:
self.fc_c = nn.Linear(c_dim, 128)
self.fc_0 = nn.Linear(1, 128)
self.fc_1 = nn.Linear(128, 128)
self.fc_2 = nn.Linear(256, 128)
self.fc_3 = nn.Linear(256, 128)
self.fc_mean = nn.Linear(128, z_dim)
self.fc_logstd = nn.Linear(128, z_dim)
if not leaky:
self.actvn = F.relu
self.pool = maxpool
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.pool = torch.mean
def forward(self, p, x, c=None, **kwargs):
batch_size, T, D = p.size()
# output size: B x T X F
net = self.fc_0(x.unsqueeze(-1))
net = net + self.fc_pos(p)
if self.c_dim != 0:
net = net + self.fc_c(c).unsqueeze(1)
net = self.fc_1(self.actvn(net))
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.fc_2(self.actvn(net))
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.fc_3(self.actvn(net))
# Reduce
# to B x F
net = self.pool(net, dim=1)
mean = self.fc_mean(net)
logstd = self.fc_logstd(net)
return mean, logstd
| 2,112 | 26.802632 | 79 | py |
Im2Hands | Im2Hands-main/im2mesh/onet/models/decoder.py |
import torch.nn as nn
import torch.nn.functional as F
from im2mesh.layers import (
ResnetBlockFC, CResnetBlockConv1d,
CBatchNorm1d, CBatchNorm1d_legacy,
ResnetBlockConv1d
)
class Decoder(nn.Module):
''' Decoder class.
It does not perform any form of normalization.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=128, leaky=False):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
# Submodules
self.fc_p = nn.Linear(dim, hidden_size)
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
if not c_dim == 0:
self.fc_c = nn.Linear(c_dim, hidden_size)
self.block0 = ResnetBlockFC(hidden_size)
self.block1 = ResnetBlockFC(hidden_size)
self.block2 = ResnetBlockFC(hidden_size)
self.block3 = ResnetBlockFC(hidden_size)
self.block4 = ResnetBlockFC(hidden_size)
self.fc_out = nn.Linear(hidden_size, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c=None, **kwargs):
batch_size, T, D = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(1)
net = net + net_z
if self.c_dim != 0:
net_c = self.fc_c(c).unsqueeze(1)
net = net + net_c
net = self.block0(net)
net = self.block1(net)
net = self.block2(net)
net = self.block3(net)
net = self.block4(net)
out = self.fc_out(self.actvn(net))
out = out.squeeze(-1)
return out
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
class DecoderCBatchNorm2(nn.Module):
''' Decoder with CBN class 2.
It differs from the previous one in that the number of blocks can be
chosen.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
n_blocks (int): number of ResNet blocks
'''
def __init__(self, dim=3, z_dim=0, c_dim=128,
hidden_size=256, n_blocks=5):
super().__init__()
self.z_dim = z_dim
if z_dim != 0:
self.fc_z = nn.Linear(z_dim, c_dim)
self.conv_p = nn.Conv1d(dim, hidden_size, 1)
self.blocks = nn.ModuleList([
CResnetBlockConv1d(c_dim, hidden_size) for i in range(n_blocks)
])
self.bn = CBatchNorm1d(c_dim, hidden_size)
self.conv_out = nn.Conv1d(hidden_size, 1, 1)
self.actvn = nn.ReLU()
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.conv_p(p)
if self.z_dim != 0:
c = c + self.fc_z(z)
for block in self.blocks:
net = block(net, c)
out = self.conv_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
class DecoderCBatchNormNoResnet(nn.Module):
''' Decoder CBN with no ResNet blocks class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.fc_0 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_1 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_2 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_3 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_4 = nn.Conv1d(hidden_size, hidden_size, 1)
self.bn_0 = CBatchNorm1d(c_dim, hidden_size)
self.bn_1 = CBatchNorm1d(c_dim, hidden_size)
self.bn_2 = CBatchNorm1d(c_dim, hidden_size)
self.bn_3 = CBatchNorm1d(c_dim, hidden_size)
self.bn_4 = CBatchNorm1d(c_dim, hidden_size)
self.bn_5 = CBatchNorm1d(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.actvn(self.bn_0(net, c))
net = self.fc_0(net)
net = self.actvn(self.bn_1(net, c))
net = self.fc_1(net)
net = self.actvn(self.bn_2(net, c))
net = self.fc_2(net)
net = self.actvn(self.bn_3(net, c))
net = self.fc_3(net)
net = self.actvn(self.bn_4(net, c))
net = self.fc_4(net)
net = self.actvn(self.bn_5(net, c))
out = self.fc_out(net)
out = out.squeeze(1)
return out
class DecoderBatchNorm(nn.Module):
''' Decoder with batch normalization class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
# Submodules
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
if self.c_dim != 0:
self.fc_c = nn.Linear(c_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = ResnetBlockConv1d(hidden_size)
self.block1 = ResnetBlockConv1d(hidden_size)
self.block2 = ResnetBlockConv1d(hidden_size)
self.block3 = ResnetBlockConv1d(hidden_size)
self.block4 = ResnetBlockConv1d(hidden_size)
self.bn = nn.BatchNorm1d(hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
if self.c_dim != 0:
net_c = self.fc_c(c).unsqueeze(2)
net = net + net_c
net = self.block0(net)
net = self.block1(net)
net = self.block2(net)
net = self.block3(net)
net = self.block4(net)
out = self.fc_out(self.actvn(self.bn(net)))
out = out.squeeze(1)
return out
| 9,357 | 29.090032 | 75 | py |
Im2Hands | Im2Hands-main/im2mesh/onet/models/__init__.py | import torch
import torch.nn as nn
from torch import distributions as dist
from im2mesh.onet.models import encoder_latent, decoder
# Encoder latent dictionary
encoder_latent_dict = {
'simple': encoder_latent.Encoder,
}
# Decoder dictionary
decoder_dict = {
'simple': decoder.Decoder,
'cbatchnorm': decoder.DecoderCBatchNorm,
'cbatchnorm2': decoder.DecoderCBatchNorm2,
'batchnorm': decoder.DecoderBatchNorm,
'cbatchnorm_noresnet': decoder.DecoderCBatchNormNoResnet,
}
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
encoder_latent (nn.Module): latent encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, decoder, encoder=None, encoder_latent=None, p0_z=None,
device=None):
super().__init__()
if p0_z is None:
p0_z = dist.Normal(torch.tensor([]), torch.tensor([]))
self.decoder = decoder.to(device)
if encoder_latent is not None:
self.encoder_latent = encoder_latent.to(device)
else:
self.encoder_latent = None
if encoder is not None:
self.encoder = encoder.to(device)
else:
self.encoder = None
self._device = device
self.p0_z = p0_z
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
if self.encoder is not None:
c = self.encoder(inputs)
else:
# Return inputs?
c = torch.empty(inputs.size(0), 0)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
if self.encoder_latent is not None:
mean_z, logstd_z = self.encoder_latent(p, occ, c, **kwargs)
else:
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self._device)
logstd_z = torch.empty(batch_size, 0).to(self._device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self._device)
else:
z = self.p0_z.mean.to(self._device)
z = z.expand(*size, *z.size())
return z
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 4,453 | 27.551282 | 77 | py |
Im2Hands | Im2Hands-main/im2mesh/psgn/training.py | import os
from tqdm import trange
import torch
from im2mesh.common import chamfer_distance
from im2mesh.training import BaseTrainer
from im2mesh.utils import visualize as vis
class Trainer(BaseTrainer):
r''' Trainer object for the Point Set Generation Network.
The PSGN network is trained on Chamfer distance. The Trainer object
obtains methods to perform a train and eval step as well as to visualize
the current training state by plotting the respective point clouds.
Args:
model (nn.Module): PSGN model
optiimzer (PyTorch optimizer): The optimizer that should be used
device (PyTorch device): the PyTorch device
input_type (string): The input type (e.g. 'img')
vis_dir (string): the visualisation directory
'''
def __init__(self, model, optimizer, device=None, input_type='img',
vis_dir=None):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
r''' Performs a train step.
The chamfer loss is calculated and an appropriate backward pass is
performed.
Args:
data (tensor): training data
'''
self.model.train()
points = data.get('pointcloud').to(self.device)
inputs = data.get('inputs').to(self.device)
loss = self.compute_loss(points, inputs)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
r''' Performs an evaluation step.
The chamfer loss is calculated and returned in a dictionary.
Args:
data (tensor): input data
'''
self.model.eval()
device = self.device
points = data.get('pointcloud_chamfer').to(device)
inputs = data.get('inputs').to(device)
with torch.no_grad():
points_out = self.model(inputs)
loss = chamfer_distance(points, points_out).mean()
loss = loss.item()
eval_dict = {
'loss': loss,
'chamfer': loss,
}
return eval_dict
def visualize(self, data):
r''' Visualizes the current output data of the model.
The point clouds for respective input data is plotted.
Args:
data (tensor): input data
'''
device = self.device
points_gt = data.get('pointcloud').to(device)
inputs = data.get('inputs').to(device)
with torch.no_grad():
points_out = self.model(inputs)
points_out = points_out.cpu().numpy()
points_gt = points_gt.cpu().numpy()
batch_size = inputs.size(0)
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
vis.visualize_data(
inputs[i].cpu(), self.input_type, input_img_path)
out_file = os.path.join(self.vis_dir, '%03d.png' % i)
out_file_gt = os.path.join(self.vis_dir, '%03d_gt.png' % i)
vis.visualize_pointcloud(points_out[i], out_file=out_file)
vis.visualize_pointcloud(points_gt[i], out_file=out_file_gt)
def compute_loss(self, points, inputs):
r''' Computes the loss.
The Point Set Generation Network is trained on the Chamfer distance.
Args:
points (tensor): GT point cloud data
inputs (tensor): input data for the model
'''
points_out = self.model(inputs)
loss = chamfer_distance(points, points_out).mean()
return loss
| 3,775 | 30.466667 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/psgn/generation.py | import torch
from im2mesh.utils.io import export_pointcloud
import tempfile
import subprocess
import os
import trimesh
class Generator3D(object):
r''' Generator Class for Point Set Generation Network.
While for point cloud generation the output of the network if used, for
mesh generation, we perform surface reconstruction in the form of ball
pivoting. In practice, this is done by using a respective meshlab script.
Args:
model (nn.Module): Point Set Generation Network model
device (PyTorch Device): the PyTorch devicd
'''
def __init__(self, model, device=None,
knn_normals=5, poisson_depth=10):
self.model = model.to(device)
self.device = device
# TODO Can we remove these variables?
self.knn_normals = knn_normals
self.poisson_depth = poisson_depth
def generate_pointcloud(self, data):
r''' Generates a point cloud by simply using the output of the network.
Args:
data (tensor): input data
'''
self.model.eval()
device = self.device
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
with torch.no_grad():
points = self.model(inputs).squeeze(0)
points = points.cpu().numpy()
return points
def generate_mesh(self, data):
r''' Generates meshes by performing ball pivoting on the output of the network.
Args:
data (tensor): input data
'''
self.model.eval()
device = self.device
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
with torch.no_grad():
points = self.model(inputs).squeeze(0)
points = points.cpu().numpy()
mesh = meshlab_poisson(points)
return mesh
FILTER_SCRIPT_RECONSTRUCTION = '''
<!DOCTYPE FilterScript>
<FilterScript>
<filter name="Surface Reconstruction: Ball Pivoting">
<Param value="0" type="RichAbsPerc" max="1.4129" name="BallRadius" description="Pivoting Ball radius (0 autoguess)" min="0" tooltip="The radius of the ball pivoting (rolling) over the set of points. Gaps that are larger than the ball radius will not be filled; similarly the small pits that are smaller than the ball radius will be filled."/>
<Param value="20" type="RichFloat" name="Clustering" description="Clustering radius (% of ball radius)" tooltip="To avoid the creation of too small triangles, if a vertex is found too close to a previous one, it is clustered/merged with it."/>
<Param value="90" type="RichFloat" name="CreaseThr" description="Angle Threshold (degrees)" tooltip="If we encounter a crease angle that is too large we should stop the ball rolling"/>
<Param value="false" type="RichBool" name="DeleteFaces" description="Delete intial set of faces" tooltip="if true all the initial faces of the mesh are deleted and the whole surface is rebuilt from scratch, other wise the current faces are used as a starting point. Useful if you run multiple times the algorithm with an incrasing ball radius."/>
</filter>
</FilterScript>
'''
def meshlab_poisson(pointcloud):
r''' Runs the meshlab ball pivoting algorithm.
Args:
pointcloud (numpy tensor): input point cloud
'''
with tempfile.TemporaryDirectory() as tmpdir:
script_path = os.path.join(tmpdir, 'script.mlx')
input_path = os.path.join(tmpdir, 'input.ply')
output_path = os.path.join(tmpdir, 'out.off')
# Write script
with open(script_path, 'w') as f:
f.write(FILTER_SCRIPT_RECONSTRUCTION)
# Write pointcloud
export_pointcloud(pointcloud, input_path, as_text=False)
# Export
env = os.environ
subprocess.Popen('meshlabserver -i ' + input_path + ' -o '
+ output_path + ' -s ' + script_path,
env=env, cwd=os.getcwd(), shell=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
).wait()
mesh = trimesh.load(output_path, process=False)
return mesh
| 4,097 | 37.660377 | 348 | py |
Im2Hands | Im2Hands-main/im2mesh/psgn/models/psgn_2branch.py | import torch.nn as nn
import torch
class PCGN_2Branch(nn.Module):
r''' The 2-Branch decoder of the Point Set Generation Network.
The latent embedding of the image is passed through a fully-connected
branch as well as a convolution-based branch which receives additional
input from the conditioning network.
'''
def __init__(self, dim=3, c_dim=512, n_points=1024):
r''' Initialisation.
Args:
dim (int): dimension of the output points (e.g. 3)
c_dim (int): dimension of the output of the conditioning network
n_points (int): number of points to predict
'''
super().__init__()
# Attributes
actvn = nn.ReLU()
self.actvn = actvn
self.dim = dim
num_fm = int(c_dim/32)
conv_c_in = 32 * num_fm
fc_dim_in = 3*4*conv_c_in # input image is downsampled to 3x4
fc_pts = n_points - 768 # conv branch has a fixed output of 768 points
# Submodules
self.fc_branch = nn.Sequential(nn.Linear(fc_dim_in, fc_pts*dim), actvn)
self.deconv_1 = nn.ConvTranspose2d(c_dim, num_fm*16, 5, 2, 2, 1)
self.deconv_2 = nn.ConvTranspose2d(num_fm*16, num_fm*8, 5, 2, 2, 1)
self.deconv_3 = nn.ConvTranspose2d(num_fm*8, num_fm*4, 5, 2, 2, 1)
# TODO: unused, remove? (keep it for now to load old checkpoints)
self.deconv_4 = nn.ConvTranspose2d(num_fm*4, 3, 5, 2, 2, 1)
self.conv_1 = nn.Sequential(
nn.Conv2d(num_fm*16, num_fm*16, 3, 1, 1), actvn)
self.conv_2 = nn.Sequential(
nn.Conv2d(num_fm*8, num_fm*8, 3, 1, 1), actvn)
self.conv_3 = nn.Sequential(
nn.Conv2d(num_fm*4, num_fm*4, 3, 1, 1), actvn)
self.conv_4 = nn.Conv2d(num_fm*4, dim, 3, 1, 1)
def forward(self, c):
x, feature_maps = c
batch_size = x.shape[0]
fc_branch = self.fc_branch(x.view(batch_size, -1))
fc_branch = fc_branch.view(batch_size, -1, 3)
conv_branch = self.deconv_1(x)
conv_branch = self.actvn(torch.add(conv_branch, feature_maps[-1]))
conv_branch = self.conv_1(conv_branch)
conv_branch = self.deconv_2(conv_branch)
conv_branch = self.actvn(torch.add(conv_branch, feature_maps[-2]))
conv_branch = self.conv_2(conv_branch)
conv_branch = self.deconv_3(conv_branch)
conv_branch = self.actvn(torch.add(conv_branch, feature_maps[-3]))
conv_branch = self.conv_3(conv_branch)
conv_branch = self.conv_4(conv_branch)
conv_branch = conv_branch.view(batch_size, -1, self.dim)
output = torch.cat([fc_branch, conv_branch], dim=1)
return output
| 2,700 | 37.042254 | 79 | py |
Im2Hands | Im2Hands-main/im2mesh/psgn/models/decoder.py | import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
r''' Simple decoder for the Point Set Generation Network.
The simple decoder consists of 4 fully-connected layers, resulting in an
output of 3D coordinates for a fixed number of points.
Args:
dim (int): The output dimension of the points (e.g. 3)
c_dim (int): dimension of the input vector
n_points (int): number of output points
'''
def __init__(self, dim=3, c_dim=128, n_points=1024):
super().__init__()
# Attributes
self.dim = dim
self.c_dim = c_dim
self.n_points = n_points
# Submodules
self.actvn = F.relu
self.fc_0 = nn.Linear(c_dim, 512)
self.fc_1 = nn.Linear(512, 512)
self.fc_2 = nn.Linear(512, 512)
self.fc_out = nn.Linear(512, dim*n_points)
def forward(self, c):
batch_size = c.size(0)
net = self.fc_0(c)
net = self.fc_1(self.actvn(net))
net = self.fc_2(self.actvn(net))
points = self.fc_out(self.actvn(net))
points = points.view(batch_size, self.n_points, self.dim)
return points
| 1,176 | 28.425 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/psgn/models/__init__.py | import torch.nn as nn
from im2mesh.psgn.models.decoder import Decoder
from im2mesh.psgn.models.psgn_2branch import PCGN_2Branch
decoder_dict = {
'simple': Decoder,
'psgn_2branch': PCGN_2Branch
}
class PCGN(nn.Module):
r''' The Point Set Generation Network.
For the PSGN, the input image is first passed to a encoder network,
e.g. restnet-18 or the CNN proposed in the original publication. Next,
this latent code is then used as the input for the decoder network, e.g.
the 2-Branch model from the PSGN paper.
Args:
decoder (nn.Module): The decoder network
encoder (nn.Module): The encoder network
'''
def __init__(self, decoder, encoder):
super().__init__()
self.decoder = decoder
self.encoder = encoder
def forward(self, x):
c = self.encoder(x)
points = self.decoder(c)
return points
| 900 | 26.30303 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/pix2mesh/training.py | import torch.nn.functional as F
import torch
from im2mesh.common import chamfer_distance
import os
from torchvision.utils import save_image
from im2mesh.training import BaseTrainer
from im2mesh.utils import visualize as vis
import im2mesh.common as common
class Trainer(BaseTrainer):
r''' Trainer object for the pixel2mesh model.
It provided methods to perform a training step, and evaluation step and
necessary loss calculation functions. We adhered to the official
Pixel2Mesh implementation where 4 different losses were used.
Args:
model (nn.Module): Pixel2Mesh module that should be trained
optimizer (Optimizer): optimizer that should be used
ellipsoid (numpy array): helper file with helper matrices for
respective losses
vis_dir (string): visualisation path
device (device): The device that should be used (GPU or CPU)
'''
def __init__(
self, model, optimizer, ellipsoid, vis_dir, device=None,
adjust_losses=False):
self.model = model
self.optimizer = optimizer
self.device = device
self.vis_dir = vis_dir
# hardcoded indices and weights for the Laplace transformation
self.lape_idx = ellipsoid[7]
self.edges = [] # hardcoded IDs for edges in the mesh
for i in range(1, 4):
adj = ellipsoid[i][1]
self.edges.append(torch.from_numpy(adj[0]).to(device))
# Hyperparameters from the authors' implementation
self.param_chamfer_w = 3000
self.param_chamfer_rel = 0.55
self.param_edge = 300
self.param_n = 0.5
self.param_lap = 1500
self.param_lap_rel = 0.3
self.param_move = 100
if adjust_losses:
print('Adjusting loss hyperparameters.')
self.param_chamfer_w *= 0.57**2
self.param_edge *= 0.57**2
self.param_lap *= 0.57**2
self.param_move *= 0.57**2
def train_step(self, data):
r''' Performs a training step of the model.
Arguments:
data (tensor): The input data
'''
self.model.train()
points = data.get('pointcloud').to(self.device)
normals = data.get('pointcloud.normals').to(self.device)
img = data.get('inputs').to(self.device)
camera_args = common.get_camera_args(
data, 'pointcloud.loc', 'pointcloud.scale', device=self.device)
# Transform GT data into camera coordinate system
world_mat, camera_mat = camera_args['Rt'], camera_args['K']
points_transformed = common.transform_points(points, world_mat)
# Transform GT normals to camera coordinate system
world_normal_mat = world_mat[:, :, :3]
normals = common.transform_points(normals, world_normal_mat)
outputs1, outputs2 = self.model(img, camera_mat)
loss = self.compute_loss(
outputs1, outputs2, points_transformed, normals, img)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def give_edges(self, pred, block_id):
r''' Returns the edges for given block.
Arguments:
pred (tensor): vertex predictions of dim
(batch_size, n_vertices, 3)
block_id (int): deformation block id (1,2 or 3)
'''
batch_size = pred.shape[0] # (batch_size, n_vertices, 3)
num_edges = self.edges[block_id-1].shape[0]
edges = self.edges[block_id-1]
nod1 = torch.index_select(pred, 1, edges[:, 0].long())
nod2 = torch.index_select(pred, 1, edges[:, 1].long())
assert(
nod1.shape == (batch_size, num_edges, 3) and
nod2.shape == (batch_size, num_edges, 3))
final_edges = torch.sub(nod1, nod2)
assert(final_edges.shape == (batch_size, num_edges, 3))
return final_edges
def edge_length_loss(self, pred, block_id):
r''' Returns the edge length loss for given block.
Arguments:
pred (tensor): vertex predictions of dim
(batch_size, n_vertices, 3)
block_id (int): deformation block id (1,2 or 3)
'''
batch_size = pred.shape[0]
num_edges = self.edges[block_id-1].shape[0]
final_edges = self.give_edges(pred, block_id)
l_e = final_edges.pow(2).sum(dim=2)
assert(l_e.shape == (batch_size, num_edges))
l_e = l_e.mean()
return l_e
def give_laplacian_coordinates(self, pred, block_id):
r''' Returns the laplacian coordinates for the predictions and given block.
The helper matrices are used to detect neighbouring vertices and
the number of neighbours which are relevant for the weight matrix.
The maximal number of neighbours is 8, and if a vertex has less,
the index -1 is used which points to the added zero vertex.
Arguments:
pred (tensor): vertex predictions
block_id (int): deformation block id (1,2 or 3)
'''
batch_size = pred.shape[0]
num_vert = pred.shape[1]
# Add "zero vertex" for vertices with less than 8 neighbours
vertex = torch.cat(
[pred, torch.zeros(batch_size, 1, 3).to(self.device)], 1)
assert(vertex.shape == (batch_size, num_vert+1, 3))
# Get 8 neighbours for each vertex; if a vertex has less, the
# remaining indices are -1
indices = torch.from_numpy(
self.lape_idx[block_id-1][:, :8]).to(self.device)
assert(indices.shape == (num_vert, 8))
weights = torch.from_numpy(
self.lape_idx[block_id-1][:, -1]).float().to(self.device)
weights = torch.reciprocal(weights)
weights = weights.view(-1, 1).expand(-1, 3)
vertex_select = vertex[:, indices.long(), :]
assert(vertex_select.shape == (batch_size, num_vert, 8, 3))
laplace = vertex_select.sum(dim=2) # Add neighbours
laplace = torch.mul(laplace, weights) # Multiply by weights
laplace = torch.sub(pred, laplace) # Subtract from prediction
assert(laplace.shape == (batch_size, num_vert, 3))
return laplace
def laplacian_loss(self, pred1, pred2, block_id):
r''' Returns the Laplacian loss and move loss for given block.
Arguments:
pred (tensor): vertex predictions from previous block
pred (tensor): vertex predictions from current block
block_id (int): deformation block id (1,2 or 3)
'''
lap1 = self.give_laplacian_coordinates(pred1, block_id)
lap2 = self.give_laplacian_coordinates(pred2, block_id)
l_l = torch.sub(lap1, lap2).pow(2).sum(dim=2).mean()
# move loss from the authors' implementation
move_loss = 0
if block_id != 1:
move_loss = torch.sub(pred1, pred2).pow(2).sum(dim=2).mean()
return l_l, move_loss
def normal_loss(self, pred, normals, id1, block_id):
r''' Returns the normal loss.
First, the GT normals are selected which are the nearest
neighbours for each predicted vertex. Next, for each edge in the
mesh, the first node is detected and the relevant normal as well
as the respective edge is selected. Finally, the dot product
between these two vectors (normalsed) are calculated and the
absolute value is taken.
Arguments:
pred (tensor): vertex predictions
normals (tensor): normals of the ground truth point cloud of shape
(batch_size, num_gt_points, 3)
id1 (tensor): Chamfer distance IDs for predicted to GT pc of shape
(batch_size, num_pred_pts) with values between (0,
num_gt_points)
block_id (int): deformation block id (1,2 or 3)
'''
batch_size = pred.shape[0]
n_verts = id1.shape[1]
assert(pred.shape[1] == n_verts)
help_ind = torch.arange(batch_size).view(-1, 1)
nod1_ind = self.edges[block_id-1][:, 0]
num_edges = nod1_ind.shape[0]
edges = self.give_edges(pred, block_id)
normals = normals[help_ind, id1.long(), :]
assert(normals.size() == (batch_size, n_verts, 3))
normals_nod1 = torch.index_select(normals, 1, nod1_ind.long())
assert(normals_nod1.shape == (batch_size, num_edges, 3))
normals_nod1 = F.normalize(normals_nod1, dim=2)
edges = F.normalize(edges, dim=2)
res = torch.mul(normals_nod1, edges).sum(dim=2).abs().mean()
return res
def compute_loss(self, outputs1, outputs2, gt_points, normals, img=None):
r''' Returns the complete loss.
The full loss is adopted from the authors' implementation and
consists of
a.) Chamfer distance loss
b.) edge length loss
c.) normal loss
d.) Laplacian loss
e.) move loss
Arguments:
outputs1 (list): first outputs of model
outputs2 (list): second outputs of model
gt_points (tensor): ground truth point cloud locations
normals (tensor): normals of the ground truth point cloud
img (tensor): input images
'''
pred_vertices_1, pred_vertices_2, pred_vertices_3 = outputs1
# Chamfer Distance Loss
lc11, lc12, id11, id12 = chamfer_distance(
pred_vertices_1, gt_points, give_id=True)
lc21, lc22, id21, id22 = chamfer_distance(
pred_vertices_2, gt_points, give_id=True)
lc31, lc32, id31, id32 = chamfer_distance(
pred_vertices_3, gt_points, give_id=True)
l_c = lc11.mean() + lc21.mean() + lc31.mean()
l_c2 = lc12.mean() + lc22.mean() + lc32.mean()
l_c = (l_c2 + self.param_chamfer_rel * l_c) * self.param_chamfer_w
# Edge Length Loss
l_e = (self.edge_length_loss(pred_vertices_1, 1) +
self.edge_length_loss(pred_vertices_2, 2) +
self.edge_length_loss(pred_vertices_3, 3)) * self.param_edge
# Normal Loss
l_n = (
self.normal_loss(pred_vertices_1, normals, id11, 1) +
self.normal_loss(pred_vertices_2, normals, id21, 2) +
self.normal_loss(pred_vertices_3, normals, id31, 3)) * self.param_n
# Laplacian Loss and move loss
l_l1, _ = self.laplacian_loss(pred_vertices_1, outputs2[0], block_id=1)
l_l2, move_loss1 = self.laplacian_loss(
pred_vertices_2, outputs2[1], block_id=2)
l_l3, move_loss2 = self.laplacian_loss(
pred_vertices_3, outputs2[2], block_id=3)
l_l = (self.param_lap_rel*l_l1 + l_l2 + l_l3) * self.param_lap
l_m = (move_loss1 + move_loss2) * self.param_move
# Final loss
loss = l_c + l_e + l_n + l_l + l_m
return loss
def visualize(self, data):
r''' Visualises the GT point cloud and predicted vertices (as a point cloud).
Arguments:
data (tensor): input data
'''
points_gt = data.get('pointcloud').to(self.device)
img = data.get('inputs').to(self.device)
camera_args = common.get_camera_args(
data, 'pointcloud.loc', 'pointcloud.scale', device=self.device)
world_mat, camera_mat = camera_args['Rt'], camera_args['K']
if not os.path.isdir(self.vis_dir):
os.mkdir(self.vis_dir)
with torch.no_grad():
outputs1, outputs2 = self.model(img, camera_mat)
pred_vertices_1, pred_vertices_2, pred_vertices_3 = outputs1
points_out = common.transform_points_back(pred_vertices_3, world_mat)
points_out = points_out.cpu().numpy()
input_img_path = os.path.join(self.vis_dir, 'input.png')
save_image(img.cpu(), input_img_path, nrow=4)
points_gt = points_gt.cpu().numpy()
batch_size = img.size(0)
for i in range(batch_size):
out_file = os.path.join(self.vis_dir, '%03d.png' % i)
out_file_gt = os.path.join(self.vis_dir, '%03d_gt.png' % i)
vis.visualize_pointcloud(points_out[i], out_file=out_file)
vis.visualize_pointcloud(points_gt[i], out_file=out_file_gt)
def eval_step(self, data):
r''' Performs an evaluation step.
Arguments:
data (tensor): input data
'''
self.model.eval()
points = data.get('pointcloud').to(self.device)
img = data.get('inputs').to(self.device)
normals = data.get('pointcloud.normals').to(self.device)
# Transform GT points to camera coordinates
camera_args = common.get_camera_args(
data, 'pointcloud.loc', 'pointcloud.scale', device=self.device)
world_mat, camera_mat = camera_args['Rt'], camera_args['K']
points_transformed = common.transform_points(points, world_mat)
# Transform GT normals to camera coordinates
world_normal_mat = world_mat[:, :, :3]
normals = common.transform_points(normals, world_normal_mat)
with torch.no_grad():
outputs1, outputs2 = self.model(img, camera_mat)
pred_vertices_1, pred_vertices_2, pred_vertices_3 = outputs1
loss = self.compute_loss(
outputs1, outputs2, points_transformed, normals, img)
lc1, lc2, id31, id32 = chamfer_distance(
pred_vertices_3, points_transformed, give_id=True)
l_c = (lc1+lc2).mean()
l_e = self.edge_length_loss(pred_vertices_3, 3)
l_n = self.normal_loss(pred_vertices_3, normals, id31, 3)
l_l, move_loss = self.laplacian_loss(
pred_vertices_3, outputs2[2], block_id=3)
eval_dict = {
'loss': loss.item(),
'chamfer': l_c.item(),
'edge': l_e.item(),
'normal': l_n.item(),
'laplace': l_l.item(),
'move': move_loss.item()
}
return eval_dict
| 14,112 | 40.026163 | 85 | py |
Im2Hands | Im2Hands-main/im2mesh/pix2mesh/layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import im2mesh.common as common
from matplotlib import pyplot as plt
class GraphUnpooling(nn.Module):
''' Graph Unpooling Layer.
Unpools additional vertices following the helper file and uses the
average feature vector from the two adjacent vertices
'''
def __init__(self, pool_idx_array):
''' Initialisation
Args:
pool_idx_array (tensor): vertex IDs that should be comined to new
vertices
'''
super(GraphUnpooling, self).__init__()
self.pool_x1 = pool_idx_array[:, 0]
self.pool_x2 = pool_idx_array[:, 1]
def forward(self, x):
num_new_v = len(self.pool_x1)
batch_size = x.shape[0]
num_feats = x.shape[2]
x1 = x[:, self.pool_x1.long(), :]
x2 = x[:, self.pool_x2.long(), :]
new_v = torch.add(x1, x2).mul(0.5)
assert(new_v.shape == (batch_size, num_new_v, num_feats))
out = torch.cat([x, new_v], dim=1)
return out
class GraphConvolution(nn.Module):
''' Graph Convolution Layer
Performs a Graph Convlution on the input vertices. The neighbouring
vertices for each vertex are extracted by using the helper file
'''
def __init__(self, support_array, input_dim=963,
output_dim=192, bias=True, sparse=False):
''' Intialisation
Args:
support_array (tnsor): sparse weighted adjencency matrix
with non-zero entries on the diagonal
input_dim (int): dimension of input feature vector
output_dim (int): dimension of output feature vector
bias (bool): whether a bias weight should be used
sparse (bool): if sparse matmul
'''
super(GraphConvolution, self).__init__()
self.support_array = support_array.float()
self.sparse = sparse
self.lin = nn.Linear(input_dim, output_dim, bias=bias)
self.lin2 = nn.Linear(input_dim, output_dim, bias=False)
# Assume batch_size = 12
if sparse:
dim_full = self.support_array.size()[0]
ind = self.support_array._indices()
ind_ex = torch.tensor(
[ind.cpu().numpy()+(i*dim_full) for i in range(12)]).long().to(
ind.get_device())
ind_ex = ind_ex.transpose(0, 1)
ind_ex = ind_ex.contiguous().view(2, -1)
val = self.support_array._values()
val_ex = val.repeat(12)
dim_ex = torch.Size(torch.tensor(self.support_array.size())*12)
self.exp_array = torch.sparse.FloatTensor(ind_ex, val_ex, dim_ex)
self.dense_support = self.support_array.to_dense().float()
# Initialise Weights
torch.nn.init.xavier_uniform_(self.lin.weight)
torch.nn.init.constant_(self.lin.bias, 0)
torch.nn.init.xavier_uniform_(self.lin2.weight)
def forward(self, x):
x_1 = self.lin(x)
batch_size = x_1.shape[0]
num_p = x_1.shape[1]
f_dim = x_1.shape[2]
x_2 = self.lin2(x)
if self.sparse and batch_size == 12:
x_2 = x_2.view(-1, x_2.shape[2])
res = torch.matmul(self.exp_array, x_2)
res = res.view(batch_size, num_p, f_dim)
else:
res = torch.matmul(self.dense_support, x_2)
out = torch.add(x_1, res)
return out
class GraphProjection(nn.Module):
"""Graph Projection layer.
Projects the predicted point cloud to the respective 2D coordinates
given the camera and world matrix, and returns the concatenated
features from the respective locations for each point
"""
def __init__(self):
super(GraphProjection, self).__init__()
def visualise_projection(self, points_img, img, output_file='./out.png'):
''' Visualises the vertex projection to the image plane.
Args:
points_img (numpy array): points projected to the image plane
img (numpy array): image
output_file (string): where the result should be saved
'''
plt.imshow(img.transpose(1, 2, 0))
plt.plot(
(points_img[:, 0] + 1)*img.shape[1]/2,
(points_img[:, 1] + 1) * img.shape[2]/2, 'x')
plt.savefig(output_file)
def forward(self, x, fm, camera_mat, img=None, visualise=False):
''' Performs a forward pass through the GP layer.
Args:
x (tensor): coordinates of shape (batch_size, num_vertices, 3)
f (list): list of feature maps from where the image features
should be pooled
camera_mat (tensor): camera matrices for transformation to 2D
image plane
img (tensor): images (just fo visualisation purposes)
'''
points_img = common.project_to_camera(x, camera_mat)
points_img = points_img.unsqueeze(1)
feats = []
feats.append(x)
for fmap in fm:
# bilinearly interpolate to get the corresponding features
feat_pts = F.grid_sample(fmap, points_img)
feat_pts = feat_pts.squeeze(2)
feats.append(feat_pts.transpose(1, 2))
# Just for visualisation purposes
if visualise and (img is not None):
self.visualise_projection(
points_img.squeeze(1)[0].detach().cpu().numpy(),
img[0].cpu().numpy())
outputs = torch.cat([proj for proj in feats], dim=2)
return outputs
| 5,588 | 35.529412 | 79 | py |
Im2Hands | Im2Hands-main/im2mesh/pix2mesh/generation.py | import torch
import trimesh
import im2mesh.common as common
class Generator3D(object):
''' Mesh Generator Class for the Pixel2Mesh model.
A forward pass is made with the image and camera matrices to obtain the
predicted vertex locations for the mesh. Subsequently, the faces of the
base mesh of an ellipsoid are used together with the predicted vertices to
obtain the final mesh
'''
def __init__(self, model, base_mesh, device=None):
''' Initialisation
Args:
model (PyTorch model): the Pixel2Mesh model
base_mesh (tensor): the base ellipsoid provided by the authors
device (PyTorch device): the PyTorch device
'''
self.model = model.to(device)
self.device = device
self.base_mesh = base_mesh
def generate_mesh(self, data, fix_normals=False):
''' Generates a mesh.
Arguments:
data (tensor): input data
fix_normals (boolean): if normals should be fixed
'''
img = data.get('inputs').to(self.device)
camera_args = common.get_camera_args(
data, 'pointcloud.loc', 'pointcloud.scale', device=self.device)
world_mat, camera_mat = camera_args['Rt'], camera_args['K']
with torch.no_grad():
outputs1, outputs2 = self.model(img, camera_mat)
out_1, out_2, out_3 = outputs1
transformed_pred = common.transform_points_back(out_3, world_mat)
vertices = transformed_pred.squeeze().cpu().numpy()
faces = self.base_mesh[:, 1:] # remove the f's in the first column
faces = faces.astype(int) - 1 # To adjust indices to trimesh notation
mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
if fix_normals:
# Fix normals due to wrong base ellipsoid
trimesh.repair.fix_normals(mesh)
return mesh
def generate_pointcloud(self, data):
''' Generates a pointcloud by only returning the vertices
Arguments:
data (tensor): input data
'''
img = data.get('inputs').to(self.device)
camera_args = common.get_camera_args(
data, 'pointcloud.loc', 'pointcloud.scale', device=self.device)
world_mat, camera_mat = camera_args['Rt'], camera_args['K']
with torch.no_grad():
outputs1, _ = self.model(img, camera_mat)
_, _, out_3 = outputs1
transformed_pred = common.transform_points_back(out_3, world_mat)
pc_out = transformed_pred.squeeze().cpu().numpy()
return pc_out
| 2,604 | 35.180556 | 78 | py |
Im2Hands | Im2Hands-main/im2mesh/pix2mesh/models/decoder.py | import torch
import torch.nn as nn
from im2mesh.pix2mesh.layers import (
GraphConvolution, GraphProjection, GraphUnpooling)
class Decoder(nn.Module):
r""" Decoder class for Pixel2Mesh Model.
Args:
ellipsoid (list): list of helper matrices for the graph convolution
and pooling layer
device (PyTorch device): PyTorch device
hidden_dim (int): The hidden dimension of the graph convolution layers
feat_dim (int): The dimension of the feature vector obtained from the
graph projection layer
coor_dim (int): Output point dimension (usually 3)
adjust_ellipsoid (bool): whether the ellipsoid should be adjusted by
inverting the Pixel2Mesh authors' transformation
"""
def __init__(self, ellipsoid, device=None, hidden_dim=192,
feat_dim=1280, coor_dim=3, adjust_ellipsoid=False):
super(Decoder, self).__init__()
# Save necessary helper matrices in respective variables
self.initial_coordinates = torch.tensor(ellipsoid[0]).to(device)
if adjust_ellipsoid:
''' This is the inverse of the operation the Pixel2mesh authors'
performed to original CAT model; it ensures that the ellipsoid
has the same size and scale in the not-transformed coordinate
system we are using. '''
print("Adjusting ellipsoid.")
self.initial_coordinates = self.initial_coordinates / 0.57
self.initial_coordinates[:, 1] = -self.initial_coordinates[:, 1]
self.initial_coordinates[:, 2] = -self.initial_coordinates[:, 2]
self.pool_idx_1 = torch.tensor(ellipsoid[4][0]).to(
device) # IDs for the first unpooling operation
self.pool_idx_2 = torch.tensor(ellipsoid[4][1]).to(
device) # IDs for the second unpooling operation
# sparse support matrices for graph convolution; the indices need to
# be transposed to match pytorch standards
ell_1 = ellipsoid[1][1]
e1, e2, e3 = torch.tensor(ell_1[0]).transpose_(
0, 1), torch.tensor(ell_1[1]), torch.tensor(ell_1[2])
self.support_1 = torch.sparse.FloatTensor(
e1.long(), e2, torch.Size(e3)).to(device)
ell_2 = ellipsoid[2][1]
e1, e2, e3 = torch.tensor(ell_2[0]).transpose_(
0, 1), torch.tensor(ell_2[1]), torch.tensor(ell_2[2])
self.support_2 = torch.sparse.FloatTensor(
e1.long(), e2, torch.Size(e3)).to(device)
ell_3 = ellipsoid[3][1]
e1, e2, e3 = torch.tensor(ell_3[0]).transpose_(
0, 1), torch.tensor(ell_3[1]), torch.tensor(ell_3[2])
self.support_3 = torch.sparse.FloatTensor(
e1.long(), e2, torch.Size(e3)).to(device)
# The respective layers of the model; Note that some layers with NO
# weights are reused to save memory
actvn = nn.ReLU()
self.gp = GraphProjection()
self.gc1 = nn.Sequential(GraphConvolution(
self.support_1, input_dim=feat_dim, output_dim=hidden_dim), actvn)
self.gc2 = []
for _ in range(12):
self.gc2.append(nn.Sequential(GraphConvolution(
self.support_1, input_dim=hidden_dim, output_dim=hidden_dim),
actvn))
self.gc2 = nn.ModuleList(self.gc2)
self.gc3 = GraphConvolution(
self.support_1, input_dim=hidden_dim, output_dim=coor_dim)
self.gup1 = GraphUnpooling(self.pool_idx_1.long())
self.gc4 = nn.Sequential(GraphConvolution(
self.support_2, input_dim=feat_dim+hidden_dim,
output_dim=hidden_dim), actvn)
self.gc5 = []
for _ in range(12):
self.gc5.append(nn.Sequential(GraphConvolution(
self.support_2, input_dim=hidden_dim, output_dim=hidden_dim),
actvn))
self.gc5 = nn.ModuleList(self.gc5)
self.gc6 = GraphConvolution(
self.support_2, input_dim=hidden_dim, output_dim=coor_dim)
self.gup2 = GraphUnpooling(self.pool_idx_2.long())
self.gc7 = nn.Sequential(GraphConvolution(
self.support_3, input_dim=feat_dim+hidden_dim,
output_dim=hidden_dim), actvn)
self.gc8 = []
for _ in range(13):
self.gc8.append(nn.Sequential(GraphConvolution(
self.support_3, input_dim=hidden_dim, output_dim=hidden_dim),
actvn))
self.gc8 = nn.ModuleList(self.gc8)
self.gc9 = GraphConvolution(
self.support_3, input_dim=hidden_dim, output_dim=coor_dim)
def forward(self, x, fm, camera_mat):
""" Makes a forward pass with the given input through the network.
Arguments:
x (tensor): input tensors (e.g. images)
fm (tensor): feature maps from the conditioned network
camera_mat (tensor): camera matrices for projection to image plane
"""
batch_size = x.shape[0]
# List of initial 3D coordinates (first item) and outputs of the layers
out = list()
initial_coordinates_expanded = self.initial_coordinates.expand(
batch_size, -1, -1)
out.append(initial_coordinates_expanded)
# #######################
# First Projection Block
# Layer 0: 156 x feat_dim
out.append(self.gp(initial_coordinates_expanded, fm, camera_mat))
out.append(self.gc1(out[-1])) # Layer 1: 156 x hidden_dim
for i in range(0, 12): # GraphConvs from and to 156 x hidden_dim
val = self.gc2[i](out[-1])
if (i % 2) == 1:
# Add previous output (Restnet style)
val = torch.add(val, out[-2]) * 0.5
out.append(val)
# Layer 14: Out of dim 156x3, will be used as outputs_2[1]
out.append(self.gc3(out[-1]))
# #######################
# Second Projection Block
# Layer 15: 156 x (hidden_dim + feat_dim)
v = self.gp(out[-1], fm, camera_mat)
v = torch.cat([v, out[-2]], dim=2)
out.append(v)
# Layer 16: 618x (hidden_dim + feat_dim)
out.append(self.gup1(out[-1]))
out.append(self.gc4(out[-1])) # Layer 17: 618 x hidden_dim
for i in range(0, 12): # GraphConvs from and to 618 x hidden_dim
val = self.gc5[i](out[-1])
if (i % 2) == 1:
# Add previous output (Restnet style)
val = torch.add(val, out[-2]) * 0.5
out.append(val)
# Layer 30: 618 x 3, will be used as outputs_2[2]
out.append(self.gc6(out[-1]))
# #######################
# Third Projection Block
# Layer 31: 618 x hidden_dim + feat_dim
v = self.gp(out[-1], fm, camera_mat) # 618 x feat_dim
v = torch.cat([v, out[-2]], dim=2)
out.append(v)
# Layer 32: 2466 x hidden_dim + feat_dim
out.append(self.gup2(out[-1]))
out.append(self.gc7(out[-1])) # Layer 33: 2466 x hidden_dim
for i in range(0, 13): # GraphConvs from and to 2466 x hidden_dim
val = self.gc8[i](out[-1])
if i % 2 == 1:
# Add previous output (Restnet style)
val = torch.add(val, out[-2]) * 0.5
out.append(val)
out.append(self.gc9(out[-1])) # Layer 47: 2466 x 3
# 156 x hidden_dim, 618 x hidden_dim, 2466 x hidden_dim
outputs = (out[15], out[31], out[-1])
outputs_2 = (initial_coordinates_expanded,
self.gup1(out[15]), self.gup2(out[31]))
return outputs, outputs_2
| 7,640 | 43.684211 | 79 | py |
Im2Hands | Im2Hands-main/im2mesh/pix2mesh/models/__init__.py | import torch.nn as nn
from im2mesh.pix2mesh.models.decoder import Decoder
decoder_dict = {
'simple': Decoder,
}
class Pix2Mesh(nn.Module):
''' Pixel2Mesh model.
First, the input image is passed through a CNN to extract several feature
maps. These feature maps as well as camera matrices are passed to the
decoder to predict respective vertex locations of the output mesh
'''
def __init__(self, decoder, encoder):
''' Initialisation.
Args:
encoder (PyTorch model): The conditional network to obtain
feature maps
decoder (PyTorch model): The decoder network
'''
super().__init__()
self.decoder = decoder
self.encoder = encoder
def forward(self, x, camera_mat):
fm = self.encoder(x)
pred = self.decoder(x, fm, camera_mat)
return pred
| 904 | 25.617647 | 77 | py |
Im2Hands | Im2Hands-main/im2mesh/utils/visualize.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from torchvision.utils import save_image
import im2mesh.common as common
def visualize_data(data, data_type, out_file):
r''' Visualizes the data with regard to its type.
Args:
data (tensor): batch of data
data_type (string): data type (img, voxels or pointcloud)
out_file (string): output file
'''
if data_type == 'img':
if data.dim() == 3:
data = data.unsqueeze(0)
save_image(data, out_file, nrow=4)
elif data_type == 'voxels':
visualize_voxels(data, out_file=out_file)
elif data_type == 'pointcloud':
visualize_pointcloud(data, out_file=out_file)
elif data_type is None or data_type == 'idx':
pass
else:
raise ValueError('Invalid data_type "%s"' % data_type)
def visualize_voxels(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_pointcloud(points, normals=None,
out_file=None, show=False):
r''' Visualizes point cloud data.
Args:
points (tensor): point data
normals (tensor): normal data (if existing)
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
points = np.asarray(points)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
ax.scatter(points[:, 2], points[:, 0], points[:, 1])
if normals is not None:
ax.quiver(
points[:, 2], points[:, 0], points[:, 1],
normals[:, 2], normals[:, 0], normals[:, 1],
length=0.1, color='k'
)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.set_zlim(-0.5, 0.5)
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualise_projection(
self, points, world_mat, camera_mat, img, output_file='out.png'):
r''' Visualizes the transformation and projection to image plane.
The first points of the batch are transformed and projected to the
respective image. After performing the relevant transformations, the
visualization is saved in the provided output_file path.
Arguments:
points (tensor): batch of point cloud points
world_mat (tensor): batch of matrices to rotate pc to camera-based
coordinates
camera_mat (tensor): batch of camera matrices to project to 2D image
plane
img (tensor): tensor of batch GT image files
output_file (string): where the output should be saved
'''
points_transformed = common.transform_points(points, world_mat)
points_img = common.project_to_camera(points_transformed, camera_mat)
pimg2 = points_img[0].detach().cpu().numpy()
image = img[0].cpu().numpy()
plt.imshow(image.transpose(1, 2, 0))
plt.plot(
(pimg2[:, 0] + 1)*image.shape[1]/2,
(pimg2[:, 1] + 1) * image.shape[2]/2, 'x')
plt.savefig(output_file)
| 3,747 | 30.762712 | 76 | py |
Im2Hands | Im2Hands-main/im2mesh/data/core.py | import os
import logging
from torch.utils import data
import numpy as np
import yaml
logger = logging.getLogger(__name__)
# Fields
class Field(object):
''' Data fields class.
'''
def load(self, data_path, idx, category):
''' Loads a data point.
Args:
data_path (str): path to data file
idx (int): index of data point
category (int): index of category
'''
raise NotImplementedError
def check_complete(self, files):
''' Checks if set is complete.
Args:
files: files
'''
raise NotImplementedError
class Shapes3dDataset(data.Dataset):
''' 3D Shapes dataset class.
'''
def __init__(self, dataset_folder, fields, split=None,
categories=None, no_except=True, transform=None):
''' Initialization of the the 3D shape dataset.
Args:
dataset_folder (str): dataset folder
fields (dict): dictionary of fields
split (str): which split is used
categories (list): list of categories to use
no_except (bool): no exception
transform (callable): transformation applied to data points
'''
# Attributes
self.dataset_folder = dataset_folder
self.fields = fields
self.no_except = no_except
self.transform = transform
# If categories is None, use all subfolders
if categories is None:
categories = os.listdir(dataset_folder)
categories = [c for c in categories
if os.path.isdir(os.path.join(dataset_folder, c))]
# Read metadata file
metadata_file = os.path.join(dataset_folder, 'metadata.yaml')
if os.path.exists(metadata_file):
with open(metadata_file, 'r') as f:
self.metadata = yaml.load(f)
else:
self.metadata = {
c: {'id': c, 'name': 'n/a'} for c in categories
}
# Set index
for c_idx, c in enumerate(categories):
self.metadata[c]['idx'] = c_idx
# Get all models
self.models = []
for c_idx, c in enumerate(categories):
subpath = os.path.join(dataset_folder, c)
if not os.path.isdir(subpath):
logger.warning('Category %s does not exist in dataset.' % c)
split_file = os.path.join(subpath, split + '.lst')
with open(split_file, 'r') as f:
models_c = f.read().split('\n')
self.models += [
{'category': c, 'model': m}
for m in models_c
]
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
category = self.models[idx]['category']
model = self.models[idx]['model']
c_idx = self.metadata[category]['idx']
model_path = os.path.join(self.dataset_folder, category, model)
data = {}
for field_name, field in self.fields.items():
try:
field_data = field.load(model_path, idx, c_idx)
except Exception:
if self.no_except:
logger.warn(
'Error occured when loading field %s of model %s'
% (field_name, model)
)
return None
else:
raise
if isinstance(field_data, dict):
for k, v in field_data.items():
if k is None:
data[field_name] = v
else:
data['%s.%s' % (field_name, k)] = v
else:
data[field_name] = field_data
if self.transform is not None:
data = self.transform(data)
return data
def get_model_dict(self, idx):
return self.models[idx]
def test_model_complete(self, category, model):
''' Tests if model is complete.
Args:
model (str): modelname
'''
model_path = os.path.join(self.dataset_folder, category, model)
files = os.listdir(model_path)
for field_name, field in self.fields.items():
if not field.check_complete(files):
logger.warn('Field "%s" is incomplete: %s'
% (field_name, model_path))
return False
return True
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
| 5,182 | 28.282486 | 77 | py |
Im2Hands | Im2Hands-main/im2mesh/data/real.py | import os
from PIL import Image
import numpy as np
import torch
from torch.utils import data
from torchvision import transforms
class KittiDataset(data.Dataset):
r""" Kitti Instance dataset.
Args:
dataset_folder (str): path to the KITTI dataset
img_size (int): size of the cropped images
transform (list): list of transformations applied to the images
return_idx (bool): wether to return index
"""
def __init__(self, dataset_folder, img_size=224, transform=None, return_idx=False):
self.img_size = img_size
self.img_path = os.path.join(dataset_folder, 'image_2')
crop_path = os.path.join(dataset_folder, 'cropped_images')
self.cropped_images = []
for folder in os.listdir(crop_path):
folder_path = os.path.join(crop_path, folder)
for file_name in os.listdir(folder_path):
current_file_path = os.path.join(folder_path, file_name)
self.cropped_images.append(current_file_path)
self.len = len(self.cropped_images)
self.transform = transform
self.return_idx = return_idx
def get_model_dict(self, idx):
model_dict = {
'model': str(idx),
'category': 'kitti',
}
return model_dict
def get_model(self, idx):
''' Returns the model.
Args:
idx (int): ID of data point
'''
f_name = os.path.basename(self.cropped_images[idx])[:-4]
return f_name
def __len__(self):
''' Returns the length of the dataset.
'''
return self.len
def __getitem__(self, idx):
''' Returns the data point.
Args:
idx (int): ID of data point
'''
ori_file_name = os.path.basename(self.cropped_images[idx])[:9] + '.png'
original_img = Image.open(os.path.join(self.img_path, ori_file_name))
cropped_img = Image.open(self.cropped_images[idx])
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor()
])
original_img = transforms.ToTensor()(original_img)
cropped_img = transform(cropped_img)
idx = torch.tensor(idx)
data = {
'inputs': cropped_img,
'idx': idx,
}
return data
class OnlineProductDataset(data.Dataset):
r""" Stanford Online Product Dataset.
Args:
dataset_folder (str): path to the dataset dataset
img_size (int): size of the cropped images
classes (list): list of classes
max_number_imgs (int): maximum number of images
return_idx (bool): wether to return index
return_category (bool): wether to return category
"""
def __init__(self, dataset_folder, img_size=224, classes=['chair'],
max_number_imgs=1000, return_idx=False, return_category=False):
self.img_size = img_size
self.dataset_folder = dataset_folder
self.transform = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor()
])
self.class_id = {}
self.metadata = []
for i, cl in enumerate(classes):
self.metadata.append({'name': cl})
self.class_id[cl] = i
cl_names = np.loadtxt(
os.path.join(dataset_folder, cl+'_final.txt'), dtype=np.str)
cl_names = cl_names[:max_number_imgs]
att = np.vstack(
(cl_names, np.full_like(cl_names, cl))).transpose(1, 0)
if i > 0:
self.file_names = np.vstack((self.file_names, att))
else:
self.file_names = att
self.len = self.file_names.shape[0]
self.return_idx = return_idx
self.return_category = return_category
def get_model_dict(self, idx):
category_id = self.class_id[self.file_names[idx, 1]]
model_dict = {
'model': str(idx),
'category': category_id
}
return model_dict
def get_model(self, idx):
''' Returns the model.
Args:
idx (int): ID of data point
'''
file_name = os.path.basename(self.file_names[idx, 0])[:-4]
return file_name
def __len__(self):
''' Returns the length of the dataset.
'''
return self.len
def __getitem__(self, idx):
''' Returns the data point.
Args:
idx (int): ID of data point
'''
f = os.path.join(
self.dataset_folder,
self.file_names[idx, 1]+'_final',
self.file_names[idx, 0])
img_in = Image.open(f)
img = Image.new("RGB", img_in.size)
img.paste(img_in)
idx = torch.tensor(idx)
cl_id = torch.tensor(self.class_id[self.file_names[idx, 1]])
if self.transform:
img = self.transform(img)
data = {
'inputs': img,
}
if self.return_idx:
data['idx'] = idx
if self.return_category:
data['category'] = cl_id
return data
IMAGE_EXTENSIONS = (
'.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG'
)
class ImageDataset(data.Dataset):
r""" Cars Dataset.
Args:
dataset_folder (str): path to the dataset dataset
img_size (int): size of the cropped images
transform (list): list of transformations applied to the data points
"""
def __init__(self, dataset_folder, img_size=224, transform=None, return_idx=False):
"""
Arguments:
dataset_folder (path): path to the KITTI dataset
img_size (int): required size of the cropped images
return_idx (bool): wether to return index
"""
self.img_size = img_size
self.img_path = dataset_folder
self.file_list = os.listdir(self.img_path)
self.file_list = [
f for f in self.file_list
if os.path.splitext(f)[1] in IMAGE_EXTENSIONS
]
self.len = len(self.file_list)
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor()
])
self.return_idx = return_idx
def get_model(self, idx):
''' Returns the model.
Args:
idx (int): ID of data point
'''
f_name = os.path.basename(self.file_list[idx])
f_name = os.path.splitext(f_name)[0]
return f_name
def get_model_dict(self, idx):
f_name = os.path.basename(self.file_list[idx])
model_dict = {
'model': f_name
}
return model_dict
def __len__(self):
''' Returns the length of the dataset.'''
return self.len
def __getitem__(self, idx):
''' Returns the data point.
Args:
idx (int): ID of data point
'''
f = os.path.join(self.img_path, self.file_list[idx])
img_in = Image.open(f)
img = Image.new("RGB", img_in.size)
img.paste(img_in)
if self.transform:
img = self.transform(img)
idx = torch.tensor(idx)
data = {
'inputs': img,
}
if self.return_idx:
data['idx'] = idx
return data
| 7,357 | 27.51938 | 87 | py |
Im2Hands | Im2Hands-main/manopth/tensutils.py | import torch
from manopth import rodrigues_layer
def th_posemap_axisang(pose_vectors):
rot_nb = int(pose_vectors.shape[1] / 3)
pose_vec_reshaped = pose_vectors.contiguous().view(-1, 3)
rot_mats = rodrigues_layer.batch_rodrigues(pose_vec_reshaped)
rot_mats = rot_mats.view(pose_vectors.shape[0], rot_nb * 9)
pose_maps = subtract_flat_id(rot_mats)
return pose_maps, rot_mats
def th_with_zeros(tensor):
batch_size = tensor.shape[0]
padding = tensor.new([0.0, 0.0, 0.0, 1.0])
padding.requires_grad = False
concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)]
cat_res = torch.cat(concat_list, 1)
return cat_res
def th_pack(tensor):
batch_size = tensor.shape[0]
padding = tensor.new_zeros((batch_size, 4, 3))
padding.requires_grad = False
pack_list = [padding, tensor]
pack_res = torch.cat(pack_list, 2)
return pack_res
def subtract_flat_id(rot_mats):
# Subtracts identity as a flattened tensor
rot_nb = int(rot_mats.shape[1] / 9)
id_flat = torch.eye(
3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat(
rot_mats.shape[0], rot_nb)
# id_flat.requires_grad = False
results = rot_mats - id_flat
return results
def make_list(tensor):
# type: (List[int]) -> List[int]
return tensor
| 1,341 | 26.958333 | 75 | py |
Im2Hands | Im2Hands-main/manopth/manolayer_backup.py | '''
This manolayer.py is a modified version of Yana's Hasson pytorch implementation
of the MANO model (https://github.com/hassony2/manopth).
We made the following changes to the original file to get the transformation matrices
necessary for training the HALO model.
- No logical change has been made to the way the transformation matrices are computed.
- The forward pass also returns more values including
- trans_matrix: The transformation matrices for each joint
- rest_joints: The joint positions in rest pose after the blendshapes are applied
- rest_pose_vertices
- The returned joint order is according to the internal computation
(without reordering at the end). We use this ordering as it is easier to interface
with the internal computation of this code.
- The returned joints are in (m) instead of (mm)
- The no_root_rot flag is added to the forward pass to ignore the rotation parameters.
These rotations are ignored as we will compute the global transformations using
the canonicalization layer.
'''
import os
import numpy as np
import torch
from torch.nn import Module
from mano.webuser.smpl_handpca_wrapper_HAND_only import ready_arguments
from manopth import rodrigues_layer, rotproj, rot6d
from manopth.tensutils import (th_posemap_axisang, th_with_zeros, th_pack,
subtract_flat_id, make_list)
class ManoLayer(Module):
__constants__ = [
'use_pca', 'rot', 'ncomps', 'ncomps', 'kintree_parents', 'check',
'side', 'center_idx', 'joint_rot_mode'
]
def __init__(self,
center_idx=None,
flat_hand_mean=True,
ncomps=6,
side='right',
mano_root='mano/models',
use_pca=True,
root_rot_mode='axisang',
joint_rot_mode='axisang',
robust_rot=False):
"""
Args:
center_idx: index of center joint in our computations,
if -1 centers on estimate of palm as middle of base
of middle finger and wrist
flat_hand_mean: if True, (0, 0, 0, ...) pose coefficients match
flat hand, else match average hand pose
mano_root: path to MANO pkl files for left and right hand
ncomps: number of PCA components form pose space (<45)
side: 'right' or 'left'
use_pca: Use PCA decomposition for pose space.
joint_rot_mode: 'axisang' or 'rotmat', ignored if use_pca
"""
super().__init__()
self.center_idx = center_idx
self.robust_rot = robust_rot
if root_rot_mode == 'axisang':
self.rot = 3
else:
self.rot = 6
self.flat_hand_mean = flat_hand_mean
self.side = side
self.use_pca = use_pca
self.joint_rot_mode = joint_rot_mode
self.root_rot_mode = root_rot_mode
if use_pca:
self.ncomps = ncomps
else:
self.ncomps = 45
if side == 'right':
self.mano_path = os.path.join(mano_root, 'MANO_RIGHT.pkl')
elif side == 'left':
self.mano_path = os.path.join(mano_root, 'MANO_LEFT.pkl')
smpl_data = ready_arguments(self.mano_path)
hands_components = smpl_data['hands_components']
self.smpl_data = smpl_data
self.register_buffer('th_betas',
torch.Tensor(smpl_data['betas'].r.copy()).unsqueeze(0))
self.register_buffer('th_shapedirs',
torch.Tensor(smpl_data['shapedirs'].r.copy()))
self.register_buffer('th_posedirs',
torch.Tensor(smpl_data['posedirs'].r.copy()))
self.register_buffer(
'th_v_template',
torch.Tensor(smpl_data['v_template'].r.copy()).unsqueeze(0))
self.register_buffer(
'th_J_regressor',
torch.Tensor(np.array(smpl_data['J_regressor'].toarray())))
self.register_buffer('th_weights',
torch.Tensor(smpl_data['weights'].r.copy()))
self.register_buffer('th_faces',
torch.Tensor(smpl_data['f'].astype(np.int32)).long())
# Get hand mean
hands_mean = np.zeros(hands_components.shape[1]
) if flat_hand_mean else smpl_data['hands_mean']
hands_mean = hands_mean.copy()
th_hands_mean = torch.Tensor(hands_mean).unsqueeze(0)
if self.use_pca or self.joint_rot_mode == 'axisang':
# Save as axis-angle
self.register_buffer('th_hands_mean', th_hands_mean)
selected_components = hands_components[:ncomps]
self.register_buffer('th_selected_comps',
torch.Tensor(selected_components))
else:
th_hands_mean_rotmat = rodrigues_layer.batch_rodrigues(
th_hands_mean.view(15, 3)).reshape(15, 3, 3)
self.register_buffer('th_hands_mean_rotmat', th_hands_mean_rotmat)
# Kinematic chain params
self.kintree_table = smpl_data['kintree_table']
parents = list(self.kintree_table[0].tolist())
self.kintree_parents = parents
def forward(self,
th_pose_coeffs,
th_betas=torch.zeros(1),
th_trans=torch.zeros(1),
root_palm=torch.Tensor([0]),
share_betas=torch.Tensor([0]),
no_root_rot=False,
):
"""
Args:
th_trans (Tensor (batch_size x ncomps)): if provided, applies trans to joints and vertices
th_betas (Tensor (batch_size x 10)): if provided, uses given shape parameters for hand shape
else centers on root joint (9th joint)
root_palm: return palm as hand root instead of wrist
no_root_rot" if true, not apply root rotation to the hand.
"""
# if len(th_pose_coeffs) == 0:
# return th_pose_coeffs.new_empty(0), th_pose_coeffs.new_empty(0)
batch_size = th_pose_coeffs.shape[0]
# Get axis angle from PCA components and coefficients
if self.use_pca or self.joint_rot_mode == 'axisang':
# Remove global rot coeffs
th_hand_pose_coeffs = th_pose_coeffs[:, self.rot:self.rot +
self.ncomps]
if self.use_pca:
# PCA components --> axis angles
th_full_hand_pose = th_hand_pose_coeffs.mm(self.th_selected_comps)
else:
th_full_hand_pose = th_hand_pose_coeffs
# Concatenate back global rot
th_full_pose = torch.cat([
th_pose_coeffs[:, :self.rot],
self.th_hands_mean + th_full_hand_pose
], 1)
if self.root_rot_mode == 'axisang':
# compute rotation matrixes from axis-angle while skipping global rotation
th_pose_map, th_rot_map = th_posemap_axisang(th_full_pose)
root_rot = th_rot_map[:, :9].view(batch_size, 3, 3)
th_rot_map = th_rot_map[:, 9:]
th_pose_map = th_pose_map[:, 9:]
else:
# th_posemap offsets by 3, so add offset or 3 to get to self.rot=6
th_pose_map, th_rot_map = th_posemap_axisang(th_full_pose[:, 6:])
if self.robust_rot:
root_rot = rot6d.robust_compute_rotation_matrix_from_ortho6d(th_full_pose[:, :6])
else:
root_rot = rot6d.compute_rotation_matrix_from_ortho6d(th_full_pose[:, :6])
else:
assert th_pose_coeffs.dim() == 4, (
'When not self.use_pca, '
'th_pose_coeffs should have 4 dims, got {}'.format(
th_pose_coeffs.dim()))
assert th_pose_coeffs.shape[2:4] == (3, 3), (
'When not self.use_pca, th_pose_coeffs have 3x3 matrix for two'
'last dims, got {}'.format(th_pose_coeffs.shape[2:4]))
th_pose_rots = rotproj.batch_rotprojs(th_pose_coeffs)
th_rot_map = th_pose_rots[:, 1:].view(batch_size, -1)
th_pose_map = subtract_flat_id(th_rot_map)
root_rot = th_pose_rots[:, 0]
# Full axis angle representation with root joint
if th_betas is None or th_betas.numel() == 1:
th_v_shaped = torch.matmul(self.th_shapedirs,
self.th_betas.transpose(1, 0)).permute(
2, 0, 1) + self.th_v_template
th_j = torch.matmul(self.th_J_regressor, th_v_shaped).repeat(
batch_size, 1, 1)
else:
if share_betas:
th_betas = th_betas.mean(0, keepdim=True).expand(th_betas.shape[0], 10)
th_v_shaped = torch.matmul(self.th_shapedirs,
th_betas.transpose(1, 0)).permute(
2, 0, 1) + self.th_v_template
th_j = torch.matmul(self.th_J_regressor, th_v_shaped)
# th_pose_map should have shape 20x135
th_v_posed = th_v_shaped + torch.matmul(
self.th_posedirs, th_pose_map.transpose(0, 1)).permute(2, 0, 1)
# import pdb; pdb.set_trace()
# Final T pose with transformation done !
# Global rigid transformation
root_j = th_j[:, 0, :].contiguous().view(batch_size, 3, 1)
if no_root_rot:
no_rot = torch.eye(3).repeat(batch_size, 1, 1)
root_trans = th_with_zeros(torch.cat([no_rot, root_j], 2))
else:
root_trans = th_with_zeros(torch.cat([root_rot, root_j], 2))
all_rots = th_rot_map.view(th_rot_map.shape[0], 15, 3, 3)
lev1_idxs = [1, 4, 7, 10, 13]
lev2_idxs = [2, 5, 8, 11, 14]
lev3_idxs = [3, 6, 9, 12, 15]
lev1_rots = all_rots[:, [idx - 1 for idx in lev1_idxs]]
lev2_rots = all_rots[:, [idx - 1 for idx in lev2_idxs]]
lev3_rots = all_rots[:, [idx - 1 for idx in lev3_idxs]]
lev1_j = th_j[:, lev1_idxs]
lev2_j = th_j[:, lev2_idxs]
lev3_j = th_j[:, lev3_idxs]
# From base to tips
# Get lev1 results
all_transforms = [root_trans.unsqueeze(1)]
lev1_j_rel = lev1_j - root_j.transpose(1, 2)
lev1_rel_transform_flt = th_with_zeros(torch.cat([lev1_rots, lev1_j_rel.unsqueeze(3)], 3).view(-1, 3, 4))
root_trans_flt = root_trans.unsqueeze(1).repeat(1, 5, 1, 1).view(root_trans.shape[0] * 5, 4, 4)
lev1_flt = torch.matmul(root_trans_flt, lev1_rel_transform_flt)
all_transforms.append(lev1_flt.view(all_rots.shape[0], 5, 4, 4))
# Get lev2 results
lev2_j_rel = lev2_j - lev1_j
lev2_rel_transform_flt = th_with_zeros(torch.cat([lev2_rots, lev2_j_rel.unsqueeze(3)], 3).view(-1, 3, 4))
lev2_flt = torch.matmul(lev1_flt, lev2_rel_transform_flt)
all_transforms.append(lev2_flt.view(all_rots.shape[0], 5, 4, 4))
# Get lev3 results
lev3_j_rel = lev3_j - lev2_j
lev3_rel_transform_flt = th_with_zeros(torch.cat([lev3_rots, lev3_j_rel.unsqueeze(3)], 3).view(-1, 3, 4))
lev3_flt = torch.matmul(lev2_flt, lev3_rel_transform_flt)
all_transforms.append(lev3_flt.view(all_rots.shape[0], 5, 4, 4))
reorder_idxs = [0, 1, 6, 11, 2, 7, 12, 3, 8, 13, 4, 9, 14, 5, 10, 15]
th_results = torch.cat(all_transforms, 1)[:, reorder_idxs]
th_results_global = th_results
joint_js = torch.cat([th_j, th_j.new_zeros(th_j.shape[0], 16, 1)], 2)
tmp2 = torch.matmul(th_results, joint_js.unsqueeze(3))
th_results2 = (th_results - torch.cat([tmp2.new_zeros(*tmp2.shape[:2], 4, 3), tmp2], 3)).permute(0, 2, 3, 1)
th_T = torch.matmul(th_results2, self.th_weights.transpose(0, 1))
th_rest_shape_h = torch.cat([
th_v_posed.transpose(2, 1),
torch.ones((batch_size, 1, th_v_posed.shape[1]),
dtype=th_T.dtype,
device=th_T.device),
], 1)
th_verts = (th_T * th_rest_shape_h.unsqueeze(1)).sum(2).transpose(2, 1)
th_verts = th_verts[:, :, :3]
th_jtr = th_results_global[:, :, :3, 3]
# Get transformation matrix
trans_matrix = th_results_global
# In addition to MANO reference joints we sample vertices on each finger
# to serve as finger tips
# tips = th_verts[:, [744, 320, 443, 555, 672]] # Zimmermann
if self.side == 'right':
tips = th_verts[:, [745, 317, 444, 556, 673]]
rest_tips = th_v_posed[:, [745, 317, 444, 556, 673]]
else:
tips = th_verts[:, [745, 317, 445, 556, 673]]
rest_tips = th_v_posed[:, [745, 317, 445, 556, 673]]
if bool(root_palm):
palm = (th_verts[:, 95] + th_verts[:, 22]).unsqueeze(1) / 2
th_jtr = torch.cat([palm, th_jtr[:, 1:]], 1)
th_jtr = torch.cat([th_jtr, tips], 1)
rest_joints = torch.cat([th_j, rest_tips], 1)
# Reorder joints to match visualization utilities
# th_jtr = th_jtr[:, [0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20]]
if th_trans is None or bool(torch.norm(th_trans) == 0):
if self.center_idx is not None:
center_joint = th_jtr[:, self.center_idx].unsqueeze(1)
th_jtr = th_jtr - center_joint
th_verts = th_verts - center_joint
else:
th_jtr = th_jtr + th_trans.unsqueeze(1)
th_verts = th_verts + th_trans.unsqueeze(1)
# Scale to milimeters
# th_verts = th_verts * 1000
# th_jtr = th_jtr * 1000
# mesh vertices, joints, trans_matrix, rest_pose_vertices, rest_post_joints (th_j)
return th_verts, th_jtr, trans_matrix, th_v_posed, rest_joints, th_results2.permute(0, 3, 1, 2)
| 14,017 | 44.219355 | 116 | py |
Im2Hands | Im2Hands-main/manopth/rot6d.py | import torch
def compute_rotation_matrix_from_ortho6d(poses):
"""
Code from
https://github.com/papagina/RotationContinuity
On the Continuity of Rotation Representations in Neural Networks
Zhou et al. CVPR19
https://zhouyisjtu.github.io/project_rotation/rotation.html
"""
x_raw = poses[:, 0:3] # batch*3
y_raw = poses[:, 3:6] # batch*3
x = normalize_vector(x_raw) # batch*3
z = cross_product(x, y_raw) # batch*3
z = normalize_vector(z) # batch*3
y = cross_product(z, x) # batch*3
x = x.view(-1, 3, 1)
y = y.view(-1, 3, 1)
z = z.view(-1, 3, 1)
matrix = torch.cat((x, y, z), 2) # batch*3*3
return matrix
def robust_compute_rotation_matrix_from_ortho6d(poses):
"""
Instead of making 2nd vector orthogonal to first
create a base that takes into account the two predicted
directions equally
"""
x_raw = poses[:, 0:3] # batch*3
y_raw = poses[:, 3:6] # batch*3
x = normalize_vector(x_raw) # batch*3
y = normalize_vector(y_raw) # batch*3
middle = normalize_vector(x + y)
orthmid = normalize_vector(x - y)
x = normalize_vector(middle + orthmid)
y = normalize_vector(middle - orthmid)
# Their scalar product should be small !
# assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001
z = normalize_vector(cross_product(x, y))
x = x.view(-1, 3, 1)
y = y.view(-1, 3, 1)
z = z.view(-1, 3, 1)
matrix = torch.cat((x, y, z), 2) # batch*3*3
# Check for reflection in matrix ! If found, flip last vector TODO
assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0
return matrix
def normalize_vector(v):
batch = v.shape[0]
v_mag = torch.sqrt(v.pow(2).sum(1)) # batch
v_mag = torch.max(v_mag, v.new([1e-8]))
v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1])
v = v/v_mag
return v
def cross_product(u, v):
batch = u.shape[0]
i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]
j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]
k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]
out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1)
return out
| 2,212 | 29.736111 | 78 | py |
Im2Hands | Im2Hands-main/manopth/demo.py | from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import torch
from manopth.manolayer import ManoLayer
def generate_random_hand(batch_size=1, ncomps=6, mano_root='mano/models'):
nfull_comps = ncomps + 3 # Add global orientation dims to PCA
random_pcapose = torch.rand(batch_size, nfull_comps)
mano_layer = ManoLayer(mano_root=mano_root)
verts, joints = mano_layer(random_pcapose)
return {'verts': verts, 'joints': joints, 'faces': mano_layer.th_faces}
def display_hand(hand_info, mano_faces=None, ax=None, alpha=0.2, show=True):
"""
Displays hand batch_idx in batch of hand_info, hand_info as returned by
generate_random_hand
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
verts, joints = hand_info['verts'], hand_info['joints']
rest_joints = hand_info['rest_joints'] #
verts_joints_assoc = hand_info['verts_assoc']
visualize_bone = 13
# rest_verts = hand_info['rest_verts'] #
if mano_faces is None:
ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.1)
else:
mesh = Poly3DCollection(verts[mano_faces], alpha=alpha)
face_color = (141 / 255, 184 / 255, 226 / 255)
edge_color = (50 / 255, 50 / 255, 50 / 255)
mesh.set_edgecolor(edge_color)
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
# print("Joints", joints)
print("joint shape", joints.shape)
ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r')
# ax.scatter(joints[:16, 0], joints[:16, 1], joints[:16, 2], color='r')
ax.scatter(rest_joints[:4, 0], rest_joints[:4, 1], rest_joints[:4, 2], color='g')
ax.scatter(rest_joints[4:, 0], rest_joints[4:, 1], rest_joints[4:, 2], color='b')
# visualize only some part
# seleceted = verts_joints_assoc[:-1] == visualize_bone
# ax.scatter(verts[seleceted, 0], verts[seleceted, 1], verts[seleceted, 2], color='black', alpha=0.5)
# cam_equal_aspect_3d(ax, verts.numpy())
cam_equal_aspect_3d(ax, verts)
# cam_equal_aspect_3d(ax, rest_joints.numpy())
if show:
plt.show()
def cam_equal_aspect_3d(ax, verts, flip_x=False):
"""
Centers view on cuboid containing hand and flips y and z axis
and fixes azimuth
"""
extents = np.stack([verts.min(0), verts.max(0)], axis=1)
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize / 2
if flip_x:
ax.set_xlim(centers[0] + r, centers[0] - r)
else:
ax.set_xlim(centers[0] - r, centers[0] + r)
# Invert y and z axis
ax.set_ylim(centers[1] + r, centers[1] - r)
ax.set_zlim(centers[2] + r, centers[2] - r)
| 2,832 | 35.320513 | 105 | py |
Im2Hands | Im2Hands-main/manopth/rodrigues_layer.py | """
This part reuses code from https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py
which is part of a PyTorch port of SMPL.
Thanks to Zhang Xiong (MandyMo) for making this great code available on github !
"""
import argparse
from torch.autograd import gradcheck
import torch
from torch.autograd import Variable
from manopth import argutils
def quat2mat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [batch_size, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [batch_size, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:,
2], norm_quat[:,
3]
batch_size = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([
w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy,
w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz,
w2 - x2 - y2 + z2
],
dim=1).view(batch_size, 3, 3)
return rotMat
def batch_rodrigues(axisang):
#axisang N x 3
axisang_norm = torch.norm(axisang + 1e-8, p=2, dim=1)
angle = torch.unsqueeze(axisang_norm, -1)
axisang_normalized = torch.div(axisang, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * axisang_normalized], dim=1)
rot_mat = quat2mat(quat)
rot_mat = rot_mat.view(rot_mat.shape[0], 9)
return rot_mat
def th_get_axis_angle(vector):
angle = torch.norm(vector, 2, 1)
axes = vector / angle.unsqueeze(1)
return axes, angle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
argutils.print_args(args)
n_components = 6
rot = 3
inputs = torch.rand(args.batch_size, rot)
inputs_var = Variable(inputs.double(), requires_grad=True)
if args.cuda:
inputs = inputs.cuda()
# outputs = batch_rodrigues(inputs)
test_function = gradcheck(batch_rodrigues, (inputs_var, ))
print('batch test passed !')
inputs = torch.rand(rot)
inputs_var = Variable(inputs.double(), requires_grad=True)
test_function = gradcheck(th_cv2_rod_sub_id.apply, (inputs_var, ))
print('th_cv2_rod test passed')
inputs = torch.rand(rot)
inputs_var = Variable(inputs.double(), requires_grad=True)
test_th = gradcheck(th_cv2_rod.apply, (inputs_var, ))
print('th_cv2_rod_id test passed !')
| 2,920 | 31.455556 | 89 | py |
Im2Hands | Im2Hands-main/manopth/manolayer.py | '''
This manolayer.py is a modified version of Yana's Hasson pytorch implementation
of the MANO model (https://github.com/hassony2/manopth).
We made the following changes to the original file to get the transformation matrices
necessary for training the HALO model.
- No logical change has been made to the way the transformation matrices are computed.
- The forward pass also returns more values including
- trans_matrix: The transformation matrices for each joint
- rest_joints: The joint positions in rest pose after the blendshapes are applied
- rest_pose_vertices
- The returned joint order is according to the internal computation
(without reordering at the end). We use this ordering as it is easier to interface
with the internal computation of this code.
- The returned joints are in (m) instead of (mm)
- The no_root_rot flag is added to the forward pass to ignore the rotation parameters.
These rotations are ignored as we will compute the global transformations using
the canonicalization layer.
'''
import os
import numpy as np
import torch
from torch.nn import Module
from mano.webuser.smpl_handpca_wrapper_HAND_only import ready_arguments
from manopth import rodrigues_layer, rotproj, rot6d
from manopth.tensutils import (th_posemap_axisang, th_with_zeros, th_pack,
subtract_flat_id, make_list)
class ManoLayer(Module):
__constants__ = [
'use_pca', 'rot', 'ncomps', 'ncomps', 'kintree_parents', 'check',
'side', 'center_idx', 'joint_rot_mode'
]
def __init__(self,
center_idx=None,
flat_hand_mean=True,
ncomps=6,
side='right',
mano_root='mano/models',
use_pca=True,
root_rot_mode='axisang',
joint_rot_mode='axisang',
robust_rot=False):
"""
Args:
center_idx: index of center joint in our computations,
if -1 centers on estimate of palm as middle of base
of middle finger and wrist
flat_hand_mean: if True, (0, 0, 0, ...) pose coefficients match
flat hand, else match average hand pose
mano_root: path to MANO pkl files for left and right hand
ncomps: number of PCA components form pose space (<45)
side: 'right' or 'left'
use_pca: Use PCA decomposition for pose space.
joint_rot_mode: 'axisang' or 'rotmat', ignored if use_pca
"""
super().__init__()
self.center_idx = center_idx
self.robust_rot = robust_rot
if root_rot_mode == 'axisang':
self.rot = 3
else:
self.rot = 6
self.flat_hand_mean = flat_hand_mean
self.side = side
self.use_pca = use_pca
self.joint_rot_mode = joint_rot_mode
self.root_rot_mode = root_rot_mode
if use_pca:
self.ncomps = ncomps
else:
self.ncomps = 45
if side == 'right':
self.mano_path = os.path.join(mano_root, 'MANO_RIGHT.pkl')
elif side == 'left':
self.mano_path = os.path.join(mano_root, 'MANO_LEFT.pkl')
smpl_data = ready_arguments(self.mano_path)
hands_components = smpl_data['hands_components']
self.smpl_data = smpl_data
self.register_buffer('th_betas',
torch.Tensor(smpl_data['betas'].r.copy()).unsqueeze(0))
self.register_buffer('th_shapedirs',
torch.Tensor(smpl_data['shapedirs'].r.copy()))
self.register_buffer('th_posedirs',
torch.Tensor(smpl_data['posedirs'].r.copy()))
self.register_buffer(
'th_v_template',
torch.Tensor(smpl_data['v_template'].r.copy()).unsqueeze(0))
self.register_buffer(
'th_J_regressor',
torch.Tensor(np.array(smpl_data['J_regressor'].toarray())))
self.register_buffer('th_weights',
torch.Tensor(smpl_data['weights'].r.copy()))
self.register_buffer('th_faces',
torch.Tensor(smpl_data['f'].astype(np.int32)).long())
# Get hand mean
hands_mean = np.zeros(hands_components.shape[1]
) if flat_hand_mean else smpl_data['hands_mean']
hands_mean = hands_mean.copy()
th_hands_mean = torch.Tensor(hands_mean).unsqueeze(0)
if self.use_pca or self.joint_rot_mode == 'axisang':
# Save as axis-angle
self.register_buffer('th_hands_mean', th_hands_mean)
selected_components = hands_components[:ncomps]
self.register_buffer('th_selected_comps',
torch.Tensor(selected_components))
else:
th_hands_mean_rotmat = rodrigues_layer.batch_rodrigues(
th_hands_mean.view(15, 3)).reshape(15, 3, 3)
self.register_buffer('th_hands_mean_rotmat', th_hands_mean_rotmat)
# Kinematic chain params
self.kintree_table = smpl_data['kintree_table']
parents = list(self.kintree_table[0].tolist())
self.kintree_parents = parents
def forward(self,
th_pose_coeffs,
th_betas=torch.zeros(1),
th_trans=torch.zeros(1),
root_palm=torch.Tensor([0]),
share_betas=torch.Tensor([0]),
no_root_rot=False,
):
"""
Args:
th_trans (Tensor (batch_size x ncomps)): if provided, applies trans to joints and vertices
th_betas (Tensor (batch_size x 10)): if provided, uses given shape parameters for hand shape
else centers on root joint (9th joint)
root_palm: return palm as hand root instead of wrist
no_root_rot" if true, not apply root rotation to the hand.
"""
# if len(th_pose_coeffs) == 0:
# return th_pose_coeffs.new_empty(0), th_pose_coeffs.new_empty(0)
batch_size = th_pose_coeffs.shape[0]
# Get axis angle from PCA components and coefficients
if self.use_pca or self.joint_rot_mode == 'axisang':
# Remove global rot coeffs
th_hand_pose_coeffs = th_pose_coeffs[:, self.rot:self.rot +
self.ncomps]
if self.use_pca:
# PCA components --> axis angles
th_full_hand_pose = th_hand_pose_coeffs.mm(self.th_selected_comps)
else:
th_full_hand_pose = th_hand_pose_coeffs
# Concatenate back global rot
th_full_pose = torch.cat([
th_pose_coeffs[:, :self.rot],
self.th_hands_mean + th_full_hand_pose
], 1)
if self.root_rot_mode == 'axisang':
# compute rotation matrixes from axis-angle while skipping global rotation
th_pose_map, th_rot_map = th_posemap_axisang(th_full_pose)
root_rot = th_rot_map[:, :9].view(batch_size, 3, 3)
th_rot_map = th_rot_map[:, 9:]
th_pose_map = th_pose_map[:, 9:]
else:
# th_posemap offsets by 3, so add offset or 3 to get to self.rot=6
th_pose_map, th_rot_map = th_posemap_axisang(th_full_pose[:, 6:])
if self.robust_rot:
root_rot = rot6d.robust_compute_rotation_matrix_from_ortho6d(th_full_pose[:, :6])
else:
root_rot = rot6d.compute_rotation_matrix_from_ortho6d(th_full_pose[:, :6])
else:
assert th_pose_coeffs.dim() == 4, (
'When not self.use_pca, '
'th_pose_coeffs should have 4 dims, got {}'.format(
th_pose_coeffs.dim()))
assert th_pose_coeffs.shape[2:4] == (3, 3), (
'When not self.use_pca, th_pose_coeffs have 3x3 matrix for two'
'last dims, got {}'.format(th_pose_coeffs.shape[2:4]))
th_pose_rots = rotproj.batch_rotprojs(th_pose_coeffs)
th_rot_map = th_pose_rots[:, 1:].view(batch_size, -1)
th_pose_map = subtract_flat_id(th_rot_map)
root_rot = th_pose_rots[:, 0]
# Full axis angle representation with root joint
if th_betas is None or th_betas.numel() == 1:
th_v_shaped = torch.matmul(self.th_shapedirs,
self.th_betas.transpose(1, 0)).permute(
2, 0, 1) + self.th_v_template
th_j = torch.matmul(self.th_J_regressor, th_v_shaped).repeat(
batch_size, 1, 1)
else:
if share_betas:
th_betas = th_betas.mean(0, keepdim=True).expand(th_betas.shape[0], 10)
th_v_shaped = torch.matmul(self.th_shapedirs,
th_betas.transpose(1, 0)).permute(
2, 0, 1) + self.th_v_template
th_j = torch.matmul(self.th_J_regressor, th_v_shaped)
# th_pose_map should have shape 20x135
th_v_posed = th_v_shaped + torch.matmul(
self.th_posedirs, th_pose_map.transpose(0, 1)).permute(2, 0, 1)
# import pdb; pdb.set_trace()
# Final T pose with transformation done !
# Global rigid transformation
root_j = th_j[:, 0, :].contiguous().view(batch_size, 3, 1)
if no_root_rot:
no_rot = torch.eye(3).repeat(batch_size, 1, 1)
root_trans = th_with_zeros(torch.cat([no_rot, root_j], 2))
else:
root_trans = th_with_zeros(torch.cat([root_rot, root_j], 2))
all_rots = th_rot_map.view(th_rot_map.shape[0], 15, 3, 3)
lev1_idxs = [1, 4, 7, 10, 13]
lev2_idxs = [2, 5, 8, 11, 14]
lev3_idxs = [3, 6, 9, 12, 15]
lev1_rots = all_rots[:, [idx - 1 for idx in lev1_idxs]]
lev2_rots = all_rots[:, [idx - 1 for idx in lev2_idxs]]
lev3_rots = all_rots[:, [idx - 1 for idx in lev3_idxs]]
lev1_j = th_j[:, lev1_idxs]
lev2_j = th_j[:, lev2_idxs]
lev3_j = th_j[:, lev3_idxs]
# From base to tips
# Get lev1 results
all_transforms = [root_trans.unsqueeze(1)]
lev1_j_rel = lev1_j - root_j.transpose(1, 2)
lev1_rel_transform_flt = th_with_zeros(torch.cat([lev1_rots, lev1_j_rel.unsqueeze(3)], 3).view(-1, 3, 4))
root_trans_flt = root_trans.unsqueeze(1).repeat(1, 5, 1, 1).view(root_trans.shape[0] * 5, 4, 4)
lev1_flt = torch.matmul(root_trans_flt, lev1_rel_transform_flt)
all_transforms.append(lev1_flt.view(all_rots.shape[0], 5, 4, 4))
# Get lev2 results
lev2_j_rel = lev2_j - lev1_j
lev2_rel_transform_flt = th_with_zeros(torch.cat([lev2_rots, lev2_j_rel.unsqueeze(3)], 3).view(-1, 3, 4))
lev2_flt = torch.matmul(lev1_flt, lev2_rel_transform_flt)
all_transforms.append(lev2_flt.view(all_rots.shape[0], 5, 4, 4))
# Get lev3 results
lev3_j_rel = lev3_j - lev2_j
lev3_rel_transform_flt = th_with_zeros(torch.cat([lev3_rots, lev3_j_rel.unsqueeze(3)], 3).view(-1, 3, 4))
lev3_flt = torch.matmul(lev2_flt, lev3_rel_transform_flt)
all_transforms.append(lev3_flt.view(all_rots.shape[0], 5, 4, 4))
reorder_idxs = [0, 1, 6, 11, 2, 7, 12, 3, 8, 13, 4, 9, 14, 5, 10, 15]
th_results = torch.cat(all_transforms, 1)[:, reorder_idxs]
th_results_global = th_results
joint_js = torch.cat([th_j, th_j.new_zeros(th_j.shape[0], 16, 1)], 2)
tmp2 = torch.matmul(th_results, joint_js.unsqueeze(3))
th_results2 = (th_results - torch.cat([tmp2.new_zeros(*tmp2.shape[:2], 4, 3), tmp2], 3)).permute(0, 2, 3, 1)
th_T = torch.matmul(th_results2, self.th_weights.transpose(0, 1))
th_rest_shape_h = torch.cat([
th_v_posed.transpose(2, 1),
torch.ones((batch_size, 1, th_v_posed.shape[1]),
dtype=th_T.dtype,
device=th_T.device),
], 1)
th_verts = (th_T * th_rest_shape_h.unsqueeze(1)).sum(2).transpose(2, 1)
th_verts = th_verts[:, :, :3]
th_jtr = th_results_global[:, :, :3, 3]
# Get transformation matrix
trans_matrix = th_results_global
# In addition to MANO reference joints we sample vertices on each finger
# to serve as finger tips
# tips = th_verts[:, [744, 320, 443, 555, 672]] # Zimmermann
tips = th_verts[:, [745, 317, 444, 556, 673]]
rest_tips = th_v_posed[:, [745, 317, 444, 556, 673]]
if bool(root_palm):
print('root_palm')
palm = (th_verts[:, 95] + th_verts[:, 22]).unsqueeze(1) / 2
th_jtr = torch.cat([palm, th_jtr[:, 1:]], 1)
th_jtr = torch.cat([th_jtr, tips], 1)
rest_joints = torch.cat([th_j, rest_tips], 1)
# Reorder joints to match visualization utilities
# th_jtr = th_jtr[:, [0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20]]
if th_trans is None or bool(torch.norm(th_trans) == 0):
if self.center_idx is not None:
center_joint = th_jtr[:, self.center_idx].unsqueeze(1)
th_jtr = th_jtr - center_joint
th_verts = th_verts - center_joint
else:
th_jtr = th_jtr + th_trans.unsqueeze(1)
th_verts = th_verts + th_trans.unsqueeze(1)
# Scale to milimeters
# th_verts = th_verts * 1000
# th_jtr = th_jtr * 1000
# mesh vertices, joints, trans_matrix, rest_pose_vertices, rest_post_joints (th_j)
return th_verts, th_jtr, trans_matrix, th_v_posed, rest_joints, th_results2.permute(0, 3, 1, 2)
| 13,870 | 44.18241 | 116 | py |
Im2Hands | Im2Hands-main/manopth/rotproj.py | import torch
def batch_rotprojs(batches_rotmats):
proj_rotmats = []
for batch_idx, batch_rotmats in enumerate(batches_rotmats):
proj_batch_rotmats = []
for rot_idx, rotmat in enumerate(batch_rotmats):
# GPU implementation of svd is VERY slow
# ~ 2 10^-3 per hit vs 5 10^-5 on cpu
U, S, V = rotmat.cpu().svd()
rotmat = torch.matmul(U, V.transpose(0, 1))
orth_det = rotmat.det()
# Remove reflection
if orth_det < 0:
rotmat[:, 2] = -1 * rotmat[:, 2]
rotmat = rotmat.cuda()
proj_batch_rotmats.append(rotmat)
proj_rotmats.append(torch.stack(proj_batch_rotmats))
return torch.stack(proj_rotmats)
| 753 | 33.272727 | 63 | py |
GANterfactual | GANterfactual-main/GANterfactual/discriminator.py | from keras.layers import Input
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Conv2D
from keras.models import Model
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
def build_discriminator(img_shape, df):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=img_shape)
d1 = d_layer(img, df, normalization=False)
d2 = d_layer(d1, df * 2)
d3 = d_layer(d2, df * 4)
d4 = d_layer(d3, df * 8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity) | 883 | 33 | 90 | py |
GANterfactual | GANterfactual-main/GANterfactual/cyclegan.py | from __future__ import print_function, division
import datetime
import os
import keras
import matplotlib.pyplot as plt
import numpy as np
from skimage.transform import resize
from keras.layers import Input, Dropout, Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from classifier import load_classifier
from dataloader import DataLoader
from discriminator import build_discriminator
from generator import build_generator
class CycleGAN():
def __init__(self):
# Input shape
self.img_rows = 512
self.img_cols = 512
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2 ** 4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 32
self.df = 64
# Loss weights
self.lambda_cycle = 10.0 # Cycle-consistency loss
self.lambda_id = 0.1 * self.lambda_cycle # Identity loss
self.d_N = None
self.d_P = None
self.g_NP = None
self.g_PN = None
self.combined = None
self.classifier = None
def construct(self, classifier_path=None, classifier_weight=None):
# Build the discriminators
self.d_N = build_discriminator(self.img_shape, self.df)
self.d_P = build_discriminator(self.img_shape, self.df)
# Build the generators
self.g_NP = build_generator(self.img_shape, self.gf, self.channels)
self.g_PN = build_generator(self.img_shape, self.gf, self.channels)
self.build_combined(classifier_path, classifier_weight)
def load_existing(self, cyclegan_folder, classifier_path=None, classifier_weight=None):
custom_objects = {"InstanceNormalization": InstanceNormalization}
# Load discriminators from disk
self.d_N = keras.models.load_model(os.path.join(cyclegan_folder, 'discriminator_n.h5'),
custom_objects=custom_objects)
self.d_N._name = "d_N"
self.d_P = keras.models.load_model(os.path.join(cyclegan_folder, 'discriminator_p.h5'),
custom_objects=custom_objects)
self.d_P._name = "d_P"
# Load generators from disk
self.g_NP = keras.models.load_model(os.path.join(cyclegan_folder, 'generator_np.h5'),
custom_objects=custom_objects)
self.g_NP._name = "g_NP"
self.g_PN = keras.models.load_model(os.path.join(cyclegan_folder, 'generator_pn.h5'),
custom_objects=custom_objects)
self.g_PN._name = "g_PN"
self.build_combined(classifier_path, classifier_weight)
def save(self, cyclegan_folder):
os.makedirs(cyclegan_folder, exist_ok=True)
# Save discriminators to disk
self.d_N.save(os.path.join(cyclegan_folder, 'discriminator_n.h5'))
self.d_P.save(os.path.join(cyclegan_folder, 'discriminator_p.h5'))
# Save generators to disk
self.g_NP.save(os.path.join(cyclegan_folder, 'generator_np.h5'))
self.g_PN.save(os.path.join(cyclegan_folder, 'generator_pn.h5'))
def build_combined(self, classifier_path=None, classifier_weight=None):
optimizer = Adam(0.0002, 0.5)
self.d_N.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
self.d_P.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
# Input images from both domains
img_N = Input(shape=self.img_shape)
img_P = Input(shape=self.img_shape)
# Translate images to the other domain
fake_P = self.g_NP(img_N)
fake_N = self.g_PN(img_P)
# Translate images back to original domain
reconstr_N = self.g_PN(fake_P)
reconstr_P = self.g_NP(fake_N)
# Identity mapping of images
img_N_id = self.g_PN(img_N)
img_P_id = self.g_NP(img_P)
# For the combined model we will only train the generators
self.d_N.trainable = False
self.d_P.trainable = False
# Discriminators determines validity of translated images
valid_N = self.d_N(fake_N)
valid_P = self.d_P(fake_P)
if classifier_path is not None and os.path.isfile(classifier_path):
self.classifier = load_classifier(classifier_path, self.img_shape)
self.classifier._name = "classifier"
self.classifier.trainable = False
class_N_loss = self.classifier(fake_N)
class_P_loss = self.classifier(fake_P)
# Combined model trains generators to fool discriminators
self.combined = Model(inputs=[img_N, img_P],
outputs=[valid_N, valid_P,
class_N_loss, class_P_loss,
reconstr_N, reconstr_P,
img_N_id, img_P_id])
self.combined.compile(loss=['mse', 'mse',
'mse', 'mse',
'mae', 'mae',
'mae', 'mae'],
loss_weights=[1, 1,
classifier_weight, classifier_weight,
self.lambda_cycle, self.lambda_cycle,
self.lambda_id, self.lambda_id],
optimizer=optimizer)
else:
# Combined model trains generators to fool discriminators
self.combined = Model(inputs=[img_N, img_P],
outputs=[valid_N, valid_P,
reconstr_N, reconstr_P,
img_N_id, img_P_id])
self.combined.compile(loss=['mse', 'mse',
'mae', 'mae',
'mae', 'mae'],
loss_weights=[1, 1,
self.lambda_cycle, self.lambda_cycle,
self.lambda_id, self.lambda_id],
optimizer=optimizer)
def train(self, dataset_name, epochs, batch_size=1, train_N="NEGATIVE", train_P="POSITIVE", print_interval=100,
sample_interval=1000):
# Configure data loader
data_loader = DataLoader(dataset_name=dataset_name, img_res=(self.img_rows, self.img_cols))
start_time = datetime.datetime.now()
# Adversarial loss ground truths
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
class_N = np.stack([np.ones(batch_size), np.zeros(batch_size)]).T
class_P = np.stack([np.zeros(batch_size), np.ones(batch_size)]).T
for epoch in range(epochs):
for batch_i, (imgs_N, imgs_P) in enumerate(data_loader.load_batch(train_N, train_P, batch_size)):
# ----------------------
# Train Discriminators
# ----------------------
# Translate images to opposite domain
fake_P = self.g_NP.predict(imgs_N)
fake_N = self.g_PN.predict(imgs_P)
# Train the discriminators (original images = real / translated = Fake)
dN_loss_real = self.d_N.train_on_batch(imgs_N, valid)
dN_loss_fake = self.d_N.train_on_batch(fake_N, fake)
dN_loss = 0.5 * np.add(dN_loss_real, dN_loss_fake)
dP_loss_real = self.d_P.train_on_batch(imgs_P, valid)
dP_loss_fake = self.d_P.train_on_batch(fake_P, fake)
dP_loss = 0.5 * np.add(dP_loss_real, dP_loss_fake)
# Total disciminator loss
d_loss = 0.5 * np.add(dN_loss, dP_loss)
# ------------------
# Train Generators
# ------------------
if self.classifier is not None:
# Train the generators
g_loss = self.combined.train_on_batch([imgs_N, imgs_P],
[valid, valid,
class_N, class_P,
imgs_N, imgs_P,
imgs_N, imgs_P])
else:
g_loss = self.combined.train_on_batch([imgs_N, imgs_P],
[valid, valid,
imgs_N, imgs_P,
imgs_N, imgs_P])
elapsed_time = datetime.datetime.now() - start_time
if self.classifier is not None:
progress_str = f"[Epoch: {epoch}/{epochs}] [Batch: {batch_i}] [D_loss: {d_loss[0]:.5f}, acc: {100 * d_loss[1]:.5f}] " \
f"[G_loss: {g_loss[0]:.5f}, adv: {np.mean(g_loss[1:3]):.5f}, classifier_N: {g_loss[3]:.5f}, classifier_P: {g_loss[4]:.5f}, " \
f"recon: {np.mean(g_loss[5:7]):.5f}, id: {np.mean(g_loss[7:9]):.5f}] " \
f"time: {elapsed_time}"
else:
progress_str = f"[Epoch: {epoch}/{epochs}] [Batch: {batch_i}] [D_loss: {d_loss[0]:.5f}, acc: {100 * d_loss[1]:.5f}] " \
f"[G_loss: {g_loss[0]:.5f}, adv: {np.mean(g_loss[1:3]):.5f}, recon: {np.mean(g_loss[3:5]):.5f}, id: {np.mean(g_loss[5:7]):.5f}] " \
f"time: {elapsed_time}"
# Plot the progress
if batch_i % print_interval == 0:
print(progress_str)
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
self.sample_images(epoch, batch_i, imgs_N[0], imgs_P[0])
# Comment this in if you want to save checkpoints:
#self.save(os.path.join('..','models','GANterfactual','ep_' + str(epoch)))
def sample_images(self, epoch, batch_i, testN, testP):
os.makedirs('images', exist_ok=True)
r, c = 2, 3
img_N = testN[np.newaxis, :, :, :]
img_P = testP[np.newaxis, :, :, :]
# Translate images to the other domain
fake_P = self.g_NP.predict(img_N)
fake_N = self.g_PN.predict(img_P)
# Translate back to original domain
reconstr_N = self.g_PN.predict(fake_P)
reconstr_P = self.g_NP.predict(fake_N)
imgs = [img_N, fake_P, reconstr_N, img_P, fake_N, reconstr_P]
classification = [['NEGATIVE', 'POSITIVE'][int(np.argmax(self.classifier.predict(x)))] for x in imgs]
gen_imgs = np.concatenate(imgs)
correct_classification = ['NEGATIVE', 'POSITIVE', 'NEGATIVE', 'POSITIVE', 'NEGATIVE', 'POSITIVE']
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Original', 'Translated', 'Reconstructed']
fig, axs = plt.subplots(r, c, figsize=(15, 10))
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(gen_imgs[cnt][:, :, 0], cmap='gray')
axs[i, j].set_title(f'{titles[j]} ({correct_classification[cnt]} | {classification[cnt]})')
axs[i, j].set_title(f'{titles[j]} ({correct_classification[cnt]})')
axs[i, j].axis('off')
cnt += 1
fig.savefig("images/%d_%d.png" % (epoch, batch_i))
plt.close()
def predict(self, original_in_path, translated_out_path, reconstructed_out_path, force_original_aspect_ratio=False):
assert (self.classifier is not None)
data_loader = DataLoader(img_res=(self.img_rows, self.img_cols))
original = data_loader.load_single(original_in_path)
original = original.reshape(1, original.shape[0], original.shape[1], original.shape[2])
pred_original = self.classifier.predict(original)
if int(np.argmax(pred_original)) == 0:
print("PREDICTION -- NEGATIVE")
translated = self.g_NP.predict(original)
reconstructed = self.g_PN.predict(translated)
else:
print("PREDICTION -- POSITIVE")
translated = self.g_PN.predict(original)
reconstructed = self.g_NP.predict(translated)
pred_translated = self.classifier.predict(translated)
pred_reconstructed = self.classifier.predict(reconstructed)
if force_original_aspect_ratio:
orig_no_res = keras.preprocessing.image.load_img(original_in_path)
translated = resize(translated[0], (orig_no_res.height, orig_no_res.width))
reconstructed = resize(reconstructed[0], (orig_no_res.height, orig_no_res.width))
else:
translated = translated[0]
reconstructed = reconstructed[0]
data_loader.save_single(translated, translated_out_path)
data_loader.save_single(reconstructed, reconstructed_out_path)
return [pred_original, pred_translated, pred_reconstructed]
if __name__ == '__main__':
gan = CycleGAN()
gan.construct(classifier_path=os.path.join('..', 'models', 'classifier', 'model.h5'), classifier_weight=1)
gan.train(dataset_name=os.path.join("..","data"), epochs=20, batch_size=1, print_interval=10,
sample_interval=100)
gan.save(os.path.join('..', 'models', 'GANterfactual'))
| 14,019 | 42.949843 | 166 | py |
GANterfactual | GANterfactual-main/GANterfactual/dataloader.py | from __future__ import print_function, division
import os
import keras
import numpy as np
class DataLoader():
def __init__(self, dataset_name=None, img_res=(128, 128)):
self.dataset_name = dataset_name
self.img_res = img_res
self.image_gen_config = {
"horizontal_flip": False,
"preprocessing_function": (lambda x: x / 127.5 - 1.),
"rescale": None,
}
def load_batch(self, train_N="NEGATIVE", train_P="POSITIVE", batch_size=16, is_testing=False):
generator = keras.preprocessing.image.ImageDataGenerator(**self.image_gen_config)
flow_args = dict(
class_mode="categorical",
batch_size=batch_size,
color_mode="grayscale",
shuffle=True,
target_size=self.img_res,
)
subdir = "validation" if is_testing else "train"
negative_path = os.path.join(self.dataset_name, subdir, train_N)
positive_path = os.path.join(self.dataset_name, subdir, train_P)
negative_flow = generator.flow_from_directory(negative_path, **flow_args)
positive_flow = generator.flow_from_directory(positive_path, **flow_args)
# endless loop so we can use the maximum
n_batches = max(len(negative_flow), len(positive_flow))
for b_normal, b_pneumo, _ in zip(negative_flow, positive_flow, range(n_batches)):
normal, _ = b_normal
pneumo, _ = b_pneumo
yield normal, pneumo
def load_single(self, path):
img = keras.preprocessing.image.load_img(path, color_mode="grayscale", target_size=self.img_res)
x = keras.preprocessing.image.img_to_array(img) / 127.5 - 1
return x
def save_single(self, x, path):
# Rescale images 0 - 1
x = 0.5 * x + 0.5
keras.preprocessing.image.save_img(path, x)
| 1,872 | 31.859649 | 104 | py |
GANterfactual | GANterfactual-main/GANterfactual/classifier.py | from __future__ import print_function, division
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Lambda
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
import keras
# The trained classifier is loaded.
# Rewrite this function if you want to use another model architecture than our modified AlexNET.
# A model, which provides a 'predict' function, has to be returned.
def load_classifier(path, img_shape):
original = keras.models.load(path)
classifier = build_classifier(img_shape)
counter = 0
for layer in original.layers:
assert (counter < len(classifier.layers))
classifier.layers[counter].set_weights(layer.get_weights())
counter += 1
classifier.summary()
return classifier
def build_classifier(img_shape):
input = Input(shape=img_shape)
# 1st Convolutional Layer
x = Conv2D(filters=96,
kernel_size=(11, 11),
strides=(4, 4),
padding='valid')(input)
x = Activation('relu')(x)
# Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
# Batch Normalisation before passing it to the next layer
x = BatchNormalization()(x, training=False)
# 2nd Convolutional Layer
x = Conv2D(filters=256,
kernel_size=(11, 11),
strides=(1, 1),
padding='valid')(x)
x = Activation('relu')(x)
# Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
# Batch Normalisation
x = BatchNormalization()(x, training=False)
# 3rd Convolutional Layer
x = Conv2D(filters=384,
kernel_size=(3, 3),
strides=(1, 1),
padding='valid')(x)
x = Activation('relu')(x)
# Batch Normalisation
x = BatchNormalization()(x, training=False)
# 4th Convolutional Layer
x = Conv2D(filters=384,
kernel_size=(3, 3),
strides=(1, 1),
padding='valid')(x)
x = Activation('relu')(x)
# Batch Normalisation
x = BatchNormalization()(x, training=False)
# 5th Convolutional Layer
x = Conv2D(filters=256,
kernel_size=(3, 3),
strides=(1, 1),
padding='valid')(x)
x = Activation('relu')(x)
# Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
# Batch Normalisation
x = BatchNormalization()(x, training=False)
# Passing it to a dense layer
x = Flatten()(x)
# 1st Dense Layer
x = Dense(4096, input_shape=img_shape)(x)
x = Activation('relu')(x)
# Add Dropout to prevent overfitting
x = Dropout(0.4)(x, training=False)
# Batch Normalisation
x = BatchNormalization()(x, training=False)
# 2nd Dense Layer
x = Dense(4096)(x)
x = Activation('relu')(x)
# Add Dropout
x = Dropout(0.4)(x, training=False)
# Batch Normalisation
x = BatchNormalization()(x, training=False)
# 3rd Dense Layer
x = Dense(1000)(x)
x = Activation('relu')(x)
# Add Dropout
x = Dropout(0.4)(x, training=False)
# Batch Normalisation
x = BatchNormalization()(x, training=False)
x = Dense(2)(x)
x = Activation('softmax')(x)
return Model(input, x) | 3,472 | 30.862385 | 96 | py |
GANterfactual | GANterfactual-main/GANterfactual/train_alexNet.py | import keras
from keras import Input, Model
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
import numpy as np
from keras.regularizers import l2
import os
tensorboard_callback = keras.callbacks.TensorBoard(log_dir="log")
np.random.seed(1000)
dimension = 512
def get_adapted_alexNet():
input = Input(shape=(dimension, dimension, 1))
# 1st Convolutional Layer
x = Conv2D(filters=96,
kernel_size=(11, 11),
strides=(4, 4),
padding='valid',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(input)
x = Activation('relu')(x)
# Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
# Batch Normalisation before passing it to the next layer
x = BatchNormalization()(x)
# 2nd Convolutional Layer
x = Conv2D(filters=256,
kernel_size=(11, 11),
strides=(1, 1),
padding='valid',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(x)
x = Activation('relu')(x)
# Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
# Batch Normalisation
x = BatchNormalization()(x)
# 3rd Convolutional Layer
x = Conv2D(filters=384,
kernel_size=(3, 3),
strides=(1, 1),
padding='valid') (x)
x = Activation('relu')(x)
# Batch Normalisation
x = BatchNormalization()(x)
# 4th Convolutional Layer
x = Conv2D(filters=384,
kernel_size=(3, 3),
strides=(1, 1),
padding='valid',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(x)
x = Activation('relu')(x)
# Batch Normalisation
x = BatchNormalization()(x)
# 5th Convolutional Layer
x = Conv2D(filters=256,
kernel_size=(3, 3),
strides=(1, 1),
padding='valid') (x)
x = Activation('relu')(x)
# Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
# Batch Normalisation
x = BatchNormalization()(x)
# Passing it to a dense layer
x = Flatten()(x)
# 1st Dense Layer
x = Dense(4096,
input_shape=(dimension * dimension * 1, ),
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(x)
x = Activation('relu')(x)
# Add Dropout to prevent overfitting
x = Dropout(0.4)(x)
# Batch Normalisation
x = BatchNormalization()(x)
# 2nd Dense Layer
x = Dense(4096, kernel_regularizer=l2(0.001), bias_regularizer=l2(0.001))(x)
x = Activation('relu')(x)
# Add Dropout
x = Dropout(0.4)(x)
# Batch Normalisation
x = BatchNormalization()(x)
# 3rd Dense Layer
x = Dense(1000, kernel_regularizer=l2(0.001), bias_regularizer=l2(0.001))(x)
x = Activation('relu')(x)
# Add Dropout
x = Dropout(0.4)(x)
# Batch Normalisation
x = BatchNormalization()(x)
x = Dense(2)(x)
x = Activation('softmax')(x)
opt = keras.optimizers.SGD(0.0001, 0.9)
model = Model(input, x)
model.compile(loss='mse',
metrics=['accuracy'],
optimizer=opt)
return model
def get_data():
image_size = dimension
batch_size = 32
# Load data for training
train_gen = ImageDataGenerator(preprocessing_function=(lambda x: x / 127.5 - 1.))
train_data = train_gen.flow_from_directory(
directory="../data/train",
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode='categorical',
shuffle=True,
color_mode='grayscale')
validation_data = train_gen.flow_from_directory(
directory="../data/validation",
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode='categorical',
shuffle=True,
color_mode='grayscale')
return train_data, validation_data
model = get_adapted_alexNet()
model.summary()
train, test = get_data()
check_point = keras.callbacks.ModelCheckpoint("classifier.h5", save_best_only=True, monitor='val_accuracy', mode='max')
early_stopping = keras.callbacks.EarlyStopping(min_delta=0.001, patience=10, restore_best_weights=True)
if __name__ == "__main__":
hist = model.fit_generator(train,
epochs=1000,
validation_data=test,
callbacks=[check_point, early_stopping,tensorboard_callback],
steps_per_epoch=len(train),
validation_steps=len(test))
model.save(os.path.join('..','models','classifier','model.h5'), include_optimizer=False)
| 4,910 | 30.88961 | 119 | py |
GANterfactual | GANterfactual-main/GANterfactual/generator.py | from keras.layers import Dropout
from keras.layers import Input, Concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Model
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
def build_generator(img_shape, gf, channels):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
d = InstanceNormalization()(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = InstanceNormalization()(u)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=img_shape)
# Downsampling
d1 = conv2d(d0, gf)
d2 = conv2d(d1, gf * 2)
d3 = conv2d(d2, gf * 4)
d4 = conv2d(d3, gf * 8)
# Upsampling
u1 = deconv2d(d4, d3, gf * 4)
u2 = deconv2d(u1, d2, gf * 2)
u3 = deconv2d(u2, d1, gf)
u4 = UpSampling2D(size=2)(u3)
output_img = Conv2D(channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)
return Model(d0, output_img) | 1,557 | 32.869565 | 98 | py |
compsensing_dip | compsensing_dip-master/cs_dip.py | import numpy as np
import parser
import torch
from torch.autograd import Variable
import baselines
import utils
import time
args = parser.parse_args('configs.json')
CUDA = torch.cuda.is_available()
dtype = utils.set_dtype(CUDA)
se = torch.nn.MSELoss(reduction='none').type(dtype)
BATCH_SIZE = 1
EXIT_WINDOW = 51
loss_re, recons_re = utils.init_output_arrays(args)
def dip_estimator(args):
def estimator(A_val, y_batch_val, args):
y = torch.FloatTensor(y_batch_val).type(dtype) # init measurements y
A = torch.FloatTensor(A_val).type(dtype) # init measurement matrix A
mu, sig_inv, tvc, lrc = utils.get_constants(args, dtype)
for j in range(args.NUM_RESTARTS):
net = utils.init_dcgan(args)
z = torch.zeros(BATCH_SIZE*args.Z_DIM).type(dtype).view(BATCH_SIZE,args.Z_DIM,1,1)
z.data.normal_().type(dtype) #init random input seed
if CUDA:
net.cuda() # cast network to GPU if available
optim = torch.optim.RMSprop(net.parameters(),lr=0.001, momentum=0.9, weight_decay=0)
loss_iter = []
recons_iter = []
for i in range(args.NUM_ITER):
optim.zero_grad()
# calculate measurement loss || y - A*G(z) ||
G = net(z)
AG = torch.matmul(G.view(BATCH_SIZE,-1),A) # A*G(z)
y_loss = torch.mean(torch.sum(se(AG,y),dim=1))
# calculate total variation loss
tv_loss = (torch.sum(torch.abs(G[:,:,:,:-1] - G[:,:,:,1:]))\
+ torch.sum(torch.abs(G[:,:,:-1,:] - G[:,:,1:,:])))
# calculate learned regularization loss
layers = net.parameters()
layer_means = torch.cat([layer.mean().view(1) for layer in layers])
lr_loss = torch.matmul(layer_means-mu,torch.matmul(sig_inv,layer_means-mu))
total_loss = y_loss + lrc*lr_loss + tvc*tv_loss # total loss for iteration i
# stopping condition to account for optimizer convergence
if i >= args.NUM_ITER - EXIT_WINDOW:
recons_iter.append(G.data.cpu().numpy())
loss_iter.append(total_loss.data.cpu().numpy())
if i == args.NUM_ITER - 1:
idx_iter = np.argmin(loss_iter)
total_loss.backward() # backprop
optim.step()
recons_re[j] = recons_iter[idx_iter]
loss_re[j] = y_loss.data.cpu().numpy()
idx_re = np.argmin(loss_re,axis=0)
x_hat = recons_re[idx_re]
return x_hat
return estimator
| 2,757 | 33.475 | 96 | py |
compsensing_dip | compsensing_dip-master/utils.py | import numpy as np
import os
import errno
import parser
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets,transforms
BATCH_SIZE = 1
class DCGAN_XRAY(nn.Module):
def __init__(self, nz, ngf=64, output_size=256, nc=3, num_measurements=1000):
super(DCGAN_XRAY, self).__init__()
self.nc = nc
self.output_size = output_size
self.conv1 = nn.ConvTranspose2d(nz, ngf, 4, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(ngf)
self.conv2 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn2 = nn.BatchNorm2d(ngf)
self.conv3 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn3 = nn.BatchNorm2d(ngf)
self.conv4 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn4 = nn.BatchNorm2d(ngf)
self.conv5 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn5 = nn.BatchNorm2d(ngf)
self.conv6 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn6 = nn.BatchNorm2d(ngf)
self.conv7 = nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False) #output is image
def forward(self, z):
input_size = z.size()
x = F.relu(self.bn1(self.conv1(z)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.relu(self.bn6(self.conv6(x)))
x = torch.tanh(self.conv7(x,output_size=(-1,self.nc,self.output_size,self.output_size)))
return x
class DCGAN_MNIST(nn.Module):
def __init__(self, nz, ngf=64, output_size=28, nc=1, num_measurements=10):
super(DCGAN_MNIST, self).__init__()
self.nc = nc
self.output_size = output_size
self.conv1 = nn.ConvTranspose2d(nz, ngf*8, 2, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(ngf*8)
self.conv2 = nn.ConvTranspose2d(ngf*8, ngf*4, 4, 1, 0, bias=False)
self.bn2 = nn.BatchNorm2d(ngf*4)
self.conv3 = nn.ConvTranspose2d(ngf*4, ngf*2, 3, 1, 1, bias=False)
self.bn3 = nn.BatchNorm2d(ngf*2)
self.conv4 = nn.ConvTranspose2d(ngf*2, ngf, 3, 1, 1, bias=False)
self.bn4 = nn.BatchNorm2d(ngf)
self.conv5 = nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)
def forward(self, x):
input_size = x.size()
# DCGAN_MNIST with old PyTorch version
# x = F.upsample(F.relu(self.bn1(self.conv1(x))),scale_factor=2)
# x = F.relu(self.bn2(self.conv2(x)))
# x = F.upsample(F.relu(self.bn3(self.conv3(x))),scale_factor=2)
# x = F.upsample(F.relu(self.bn4(self.conv4(x))),scale_factor=2)
# x = torch.tanh(self.conv5(x,output_size=(-1,self.nc,self.output_size,self.output_size)))
x = F.interpolate(F.relu(self.bn1(self.conv1(x))),scale_factor=2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.interpolate(F.relu(self.bn3(self.conv3(x))),scale_factor=2)
x = F.interpolate(F.relu(self.bn4(self.conv4(x))),scale_factor=2)
x = torch.tanh(self.conv5(x,output_size=(-1,self.nc,self.output_size,self.output_size)))
return x
class DCGAN_RETINO(nn.Module):
def __init__(self, nz, ngf=64, output_size=256, nc=3, num_measurements=1000):
super(DCGAN_RETINO, self).__init__()
self.nc = nc
self.output_size = output_size
self.conv1 = nn.ConvTranspose2d(nz, ngf, 4, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(ngf)
self.conv2 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn2 = nn.BatchNorm2d(ngf)
self.conv3 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn3 = nn.BatchNorm2d(ngf)
self.conv4 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn4 = nn.BatchNorm2d(ngf)
self.conv5 = nn.ConvTranspose2d(ngf, ngf, 6, 2, 2, bias=False)
self.bn5 = nn.BatchNorm2d(ngf)
self.conv6 = nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False)
#self.fc = nn.Linear((output_size)*(output_size)*nc,num_measurements, bias=False) #fc layer - old version
def forward(self, x):
input_size = x.size()
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = torch.tanh(self.conv6(x,output_size=(-1,self.nc,self.output_size,self.output_size)))
return x
NGF = 64
def init_dcgan(args):
if args.DATASET == 'xray':
net = DCGAN_XRAY(args.Z_DIM, NGF, args.IMG_SIZE,\
args.NUM_CHANNELS, args.NUM_MEASUREMENTS)
elif args.DATASET == 'mnist':
net = DCGAN_MNIST(args.Z_DIM, NGF, args.IMG_SIZE,\
args.NUM_CHANNELS, args.NUM_MEASUREMENTS)
elif args.DATASET == 'retino':
net = DCGAN_RETINO(args.Z_DIM, NGF, args.IMG_SIZE,\
args.NUM_CHANNELS, args.NUM_MEASUREMENTS)
return net
def init_output_arrays(args):
loss_re = np.zeros((args.NUM_RESTARTS, BATCH_SIZE))
recons_re = np.zeros((args.NUM_RESTARTS, BATCH_SIZE, args.NUM_CHANNELS, \
args.IMG_SIZE, args.IMG_SIZE))
return loss_re, recons_re
lambdas_tv = {'mnist': 1e-2, 'xray': 5e-2, 'retino': 2e-2}
lambdas_lr = {'mnist': 0, 'xray': 100, 'retino': 1000}
def get_constants(args, dtype):
MU_FN = 'mu_{0}.npy'.format(args.NUM_MEASUREMENTS)
MU_PATH = os.path.join(args.LR_FOLDER,MU_FN)
SIG_FN = "sig_{0}.npy".format(args.NUM_MEASUREMENTS)
SIG_PATH = os.path.join(args.LR_FOLDER,SIG_FN)
mu_ = np.load(MU_PATH)
sig_ = np.load(SIG_PATH)
mu = torch.FloatTensor(mu_).type(dtype)
sig_inv = torch.FloatTensor(np.linalg.inv(sig_)).type(dtype)
try:
tvc = lambdas_tv[args.DATASET]
except AttributeError:
tvc = 1e-2
try:
lrc = lambdas_lr[args.DATASET]
except AttributeError:
lrc = 0
return mu, sig_inv, tvc, lrc
def renorm(x):
return 0.5*x + 0.5
def plot(x,renormalize=True):
if renormalize:
plt.imshow(renorm(x).data[0].cpu().numpy(), cmap='gray')
else:
plt.imshow(x.data[0].cpu().numpy(), cmap='gray')
exit_window = 50 # number of consecutive MSE values upon which we compare
thresh_ratio = 45 # number of MSE values that must be larger for us to exit
def exit_check(window, i): # if converged, then exit current experiment
mse_base = window[0] # get first mse value in window
if len(np.where(window > mse_base)[0]) >= thresh_ratio: # if 20/25 values in window are higher than mse_base
return True, mse_base
else:
mse_last = window[exit_window-1] #get the last value of MSE in window
return False, mse_last
def define_compose(NC, IMG_SIZE): # define compose based on NUM_CHANNELS, IMG_SIZE
if NC == 1: #grayscale
compose = transforms.Compose([
transforms.Resize((IMG_SIZE,IMG_SIZE)),
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((.5,.5,.5),(.5,.5,.5))
])
elif NC == 3: #rgb
compose = transforms.Compose([
transforms.Resize((IMG_SIZE,IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize((.5,.5,.5),(.5,.5,.5))
])
return compose
def set_dtype(CUDA):
if CUDA: # if cuda is available
return torch.cuda.FloatTensor
else:
return torch.FloatTensor
def get_path_out(args, path_in):
fn = path_leaf(path_in[0]) # format filename from path
if args.ALG == 'bm3d' or args.ALG == 'tval3':
file_ext = 'mat' # if algorithm is implemented in matlab
else:
file_ext = 'npy' # if algorithm is implemented in python
path_out = 'reconstructions/{0}/{1}/meas{2}/im{3}.{4}'.format( \
args.DATASET, args.ALG, args.NUM_MEASUREMENTS, fn, file_ext)
full_path = os.getcwd() + '/' + path_out
return full_path
def recons_exists(args, path_in):
path_out = get_path_out(args, path_in)
print(path_out)
if os.path.isfile(path_out):
return True
else:
return False
def save_reconstruction(x_hat, args, path_in):
path_out = get_path_out(args, path_in)
if not os.path.exists(os.path.dirname(path_out)):
try:
os.makedirs(os.path.dirname(path_out))
except OSError as exc: # guard against race condition
if exc.errno != errno.EEXIST:
raise
np.save(path_out, x_hat)
def check_args(args): # check args for correctness
IM_DIMN = args.IMG_SIZE * args.IMG_SIZE * args.NUM_CHANNELS
if isinstance(args.NUM_MEASUREMENTS, int):
if args.NUM_MEASUREMENTS > IM_DIMN:
raise ValueError('NUM_MEASUREMENTS must be less than image dimension ' \
+ str(IM_DIMN))
else:
for num_measurements in args.NUM_MEASUREMENTS:
if num_measurements > IM_DIMN:
raise ValueError('NUM_MEASUREMENTS must be less than image dimension ' \
+ str(IM_DIMN))
if not args.DEMO == 'False':
if not args.DEMO == 'True':
raise ValueError('DEMO must be either True or False.')
def convert_to_list(args): # returns list for NUM_MEAS, BATCH
if not isinstance(args.NUM_MEASUREMENTS, list):
NUM_MEASUREMENTS_LIST = [args.NUM_MEASUREMENTS]
else:
NUM_MEASUREMENTS_LIST = args.NUM_MEASUREMENTS
if not isinstance(args.ALG, list):
ALG_LIST = [args.ALG]
else:
ALG_LIST = args.ALG
return NUM_MEASUREMENTS_LIST, ALG_LIST
def path_leaf(path):
# if '/' in path and if '\\' in path:
# raise ValueError('Path to image cannot contain both forward and backward slashes')
if '.' in path: # remove file extension
path_no_extn = os.path.splitext(path)[0]
else:
raise ValueError('Filename does not contain extension')
head, tail = os.path.split(path_no_extn)
return tail or os.path.basename(head)
def get_data(args):
compose = define_compose(args.NUM_CHANNELS, args.IMG_SIZE)
if args.DEMO == 'True':
image_direc = 'data/{0}_demo/'.format(args.DATASET)
else:
image_direc = 'data/{0}/'.format(args.DATASET)
dataset = ImageFolderWithPaths(image_direc, transform = compose)
dataloader = torch.utils.data.DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE)
return dataloader
class ImageFolderWithPaths(datasets.ImageFolder):
"""Custom dataset that includes image file paths. Extends
torchvision.datasets.ImageFolder
"""
# override the __getitem__ method. this is the method dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
# the image file path
path = self.imgs[index][0]
# make a new tuple that includes original and the path
tuple_with_path = (original_tuple + (path,))
return tuple_with_path
| 11,104 | 36.265101 | 113 | py |
compsensing_dip | compsensing_dip-master/comp_sensing.py | import numpy as np
import pickle as pkl
import os
import parser
import numpy as np
import torch
from torchvision import datasets
import utils
import cs_dip
import baselines as baselines
import time
NEW_RECONS = False
args = parser.parse_args('configs.json')
print(args)
NUM_MEASUREMENTS_LIST, ALG_LIST = utils.convert_to_list(args)
dataloader = utils.get_data(args) # get dataset of images
for num_meas in NUM_MEASUREMENTS_LIST:
args.NUM_MEASUREMENTS = num_meas
# init measurement matrix
A = baselines.get_A(args.IMG_SIZE*args.IMG_SIZE*args.NUM_CHANNELS, args.NUM_MEASUREMENTS)
for _, (batch, _, im_path) in enumerate(dataloader):
eta_sig = 0 # set value to induce noise
eta = np.random.normal(0, eta_sig * (1.0 / args.NUM_MEASUREMENTS) ,args.NUM_MEASUREMENTS)
x = batch.view(1,-1).cpu().numpy() # define image
y = np.dot(x,A) + eta
for alg in ALG_LIST:
args.ALG = alg
if utils.recons_exists(args, im_path): # to avoid redundant reconstructions
continue
NEW_RECONS = True
if alg == 'csdip':
estimator = cs_dip.dip_estimator(args)
elif alg == 'dct':
estimator = baselines.lasso_dct_estimator(args)
elif alg == 'wavelet':
estimator = baselines.lasso_wavelet_estimator(args)
elif alg == 'bm3d' or alg == 'tval3':
raise NotImplementedError('BM3D-AMP and TVAL3 are implemented in Matlab. \
Please see GitHub repository for details.')
else:
raise NotImplementedError
x_hat = estimator(A, y, args)
utils.save_reconstruction(x_hat, args, im_path)
if NEW_RECONS == False:
print('Duplicate experiment configurations. No new data generated.')
else:
print('Reconstructions generated!')
| 1,944 | 28.029851 | 97 | py |
PrivateCovariance | PrivateCovariance-main/functions.py | import torch
import os
import gzip
import argparse
import numpy as np
from exponential.algos import EMCov
from adaptive.algos import GaussCov, LapCov, SeparateCov, SeparateLapCov, AdaptiveCov, AdaptiveLapCov
from coinpress.algos import cov_est
from urllib.request import urlretrieve
from sklearn.feature_extraction.text import HashingVectorizer
from glob import glob
import re
import random
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--total_budget', default=.1, type=float, help='total privacy budget (rho)')
parser.add_argument('--d', default=200, type=int, help='data dimension/number of features')
parser.add_argument('--n', default=50000, type=int, help='sample size')
parser.add_argument('--r', default=1.0, type=float, help='l2 norm upperbound')
parser.add_argument('--u', default=1.0, type=float, help='eigenvalue upperbound for coinpress')
parser.add_argument('--beta', default=0.1, type=float, help='prob. bound')
parser.add_argument('--s', default=3, type=float, help='steepness in Zipf law')
parser.add_argument('--N', default=4, type=float, help='number of buckets in Zipf law')
args = parser.parse_args()
return args
def write_output(x,y,folder,filename):
if not os.path.isdir(folder):
os.makedirs(folder)
if (len(np.array(y).shape)) == 1:
out = [x,y]
else:
out = []
for i in range(len(x)):
out.append([x[i]])
out[i].extend(y[i])
np.savetxt(folder+filename,np.transpose(out))
def write_text(x,folder,filename):
if not os.path.isdir(folder):
os.makedirs(folder)
fOut = open(folder+filename,'w')
for xi in x:
fOut.write(','.join(xi)+'\n')
fOut.close()
def extract_news_info(filename, num_news=1000, dtype=str):
with gzip.open(filename) as bytestream:
buf = bytestream.read().decode()
articles = buf.split('\n\n')
dict_words = {}
for article in articles:
words = re.split('\n|;|,|:| |-',article)
for word in words:
word_l = word.lower()
if word_l.isalpha():
if word_l in dict_words.keys():
dict_words[word_l] += 1
else:
dict_words[word_l] = 1
return dict_words, articles
def get_news_data(d,norm="l2",b_alt=False):
folder = './data/'
if not os.path.exists(folder):
os.mkdir('data')
filenames = ['news-commentary-v16.en.gz']
for name in filenames:
urlretrieve('https://data.statmt.org/news-commentary/v16/training-monolingual/' + name, folder+name)
articles = extract_data_news(folder,filenames)
vectorizer = HashingVectorizer(n_features=d,norm=norm,alternate_sign=b_alt,dtype=np.float32)
X = vectorizer.fit_transform(articles)
return torch.from_numpy(X.todense())
def extract_data_news(folder,filenames):
articles = []
for filename in filenames:
with gzip.open(folder+filename) as bytestream:
buf = bytestream.read().decode()
articles.extend(buf.split('\n\n'))
bad_art = set()
delim = r'[ ,;:!\n"?!-]'
for i in range(len(articles)):
words = re.split(delim,articles[i])
m = len(words)
if m < 20:
bad_art.add(i)
articles = [articles[j] for j in range(len(articles)) if j not in bad_art]
return articles
def extract_data(filename, num_images, dtype=np.float32):
d = 28*28
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(num_images*d)
data = np.frombuffer(buf, dtype=np.uint8).astype(dtype)
data = (data / 255) / 28
data = data.reshape(num_images, d)
return data
def get_mnist_data():
if not os.path.exists('data'):
os.mkdir('data')
filenames = ["train-images-idx3-ubyte.gz", "t10k-images-idx3-ubyte.gz"]
for name in filenames:
urlretrieve('http://yann.lecun.com/exdb/mnist/' + name, "data/"+name)
train_data = extract_data("data/train-images-idx3-ubyte.gz", 60000)
test_data = extract_data("data/t10k-images-idx3-ubyte.gz", 10000)
return train_data, test_data
def extract_labels(filename, num_images):
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels
def get_mnist_labels():
if not os.path.exists('data'):
os.mkdir('data')
filenames = ["train-labels-idx1-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]
for name in filenames:
if not os.path.exists("data/"+name):
urlretrieve('http://yann.lecun.com/exdb/mnist/' + name, "data/"+name)
train_labels = extract_labels("data/train-labels-idx1-ubyte.gz", 60000)
test_labels = extract_labels("data/t10k-labels-idx1-ubyte.gz", 10000)
return train_labels, test_labels
def gen_synthetic_data_fix(d, n, s, N, seed=0):
torch.manual_seed(seed)
X = torch.distributions.multivariate_normal.MultivariateNormal(torch.zeros(d),torch.eye(d)).sample((n,))
U = torch.distributions.uniform.Uniform(torch.tensor([0.0 for i in range(d)]), torch.tensor([1.0 for i in range(d)])).sample((d,))
X = torch.mm(X,U)
mu = torch.mean(X, dim=0)
X = X - mu
if (N>0):
probs, buckets = get_zipf_buckets(s, N)
X = adjust_weight_fix(X,probs,buckets,N,n)
return X
def adjust_weight_fix(X,probs,buckets,N,n):
x_norm = torch.norm(X,dim=1)
scale = torch.zeros(n)
count_range = [0]
count_range.extend([int(n*probs[k]) for k in range(N-1)])
count_range.append(n)
for k in range(N):
l = count_range[k]
r = count_range[k+1]
scale[l:r] = x_norm[l:r]/buckets[k]
return torch.div(X.t(),scale).t()
def get_zipf_buckets(s, N):
numer = [1./((k+1)**s) for k in range(N)]
denom = sum(numer)
probs = []
prob = 0
for k in range(N):
prob = prob + numer[k]/denom
probs.append(prob)
buckets = [2**(k+1-N) for k in range(N)]
return probs, buckets
def test_news_rho(args,rhos,strfolder,params,norm="l1",b_alt=True,scale=1):
d = args.d
n = args.n
err_em_paths = []
err_gauss_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
err_cpt1_paths = []
err_cpt2_paths = []
err_cpt3_paths = []
err_cpt4_paths = []
err_cpt5_paths = []
for j in range(len(rhos)):
rho = rhos[j]
args.total_budget = rho
Ps1 = [args.total_budget]
Ps2 = [(1.0/4.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps3 = [(1.0/8.0)*args.total_budget, (1.0/8.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps4 = [(1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps5 = [(1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (3.0/4.0)*args.total_budget]
X = get_news_data(d,norm=norm,b_alt=b_alt)
if (scale=='max'):
x_norm = torch.norm(X,dim=1,p=2)
adj = max(x_norm)
else:
adj = scale
X = X/adj
n1,d1 = X.shape
assert(n1==n)
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=False,b_fleig=True)
cov_gauss = GaussCov(X.clone(),n,d,rho,b_fleig=True)
cov_sep = SeparateCov(X.clone(),n,d,rho,b_fleig=True)
cov_adapt = AdaptiveCov(X.clone(),args)
args.t = 1
args.rho = Ps1
cov_cpt1 = cov_est(X.clone(), args)
args.t = 2
args.rho = Ps2
cov_cpt2 = cov_est(X.clone(), args)
args.t = 3
args.rho = Ps3
cov_cpt3 = cov_est(X.clone(), args)
args.t = 4
args.rho = Ps4
cov_cpt4 = cov_est(X.clone(), args)
args.t = 5
args.rho = Ps5
cov_cpt5 = cov_est(X.clone(), args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_gauss_paths.append(torch.norm(cov-cov_gauss,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
err_cpt1_paths.append(torch.norm(cov-cov_cpt1,'fro'))
err_cpt2_paths.append(torch.norm(cov-cov_cpt2,'fro'))
err_cpt3_paths.append(torch.norm(cov-cov_cpt3,'fro'))
err_cpt4_paths.append(torch.norm(cov-cov_cpt4,'fro'))
err_cpt5_paths.append(torch.norm(cov-cov_cpt5,'fro'))
write_output(rhos,err_em_paths,strfolder,'err_em_paths.txt')
write_output(rhos,err_gauss_paths,strfolder,'err_gauss_paths.txt')
write_output(rhos,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(rhos,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(rhos,err_zero_paths,strfolder,'err_zero_paths.txt')
write_output(rhos,err_cpt1_paths,strfolder,'err_cpt1_paths.txt')
write_output(rhos,err_cpt2_paths,strfolder,'err_cpt2_paths.txt')
write_output(rhos,err_cpt3_paths,strfolder,'err_cpt3_paths.txt')
write_output(rhos,err_cpt4_paths,strfolder,'err_cpt4_paths.txt')
write_output(rhos,err_cpt5_paths,strfolder,'err_cpt5_paths.txt')
write_text(params,strfolder,'params.txt')
def test_news_eps(args,epss,strfolder,params,norm="l1",b_alt=True,scale=1):
d = args.d
n = args.n
err_em_paths = []
err_lap_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
for j in range(len(epss)):
eps = epss[j]
rho = eps*eps/2.
args.total_budget = rho
X = get_news_data(d,norm=norm,b_alt=b_alt)
if (scale=='max'):
x_norm = torch.norm(X,dim=1,p=2)
adj = max(x_norm)
else:
adj = scale
X = X/adj
n1,d1 = X.shape
assert(n1==n)
cov = torch.mm(X.t(),X)/n
print('trace: ', float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=True,b_fleig=True)
cov_lap = LapCov(X.clone(),n,d,eps,b_fleig=True)
cov_sep = SeparateLapCov(X.clone(),n,d,eps,b_fleig=True)
cov_adapt = AdaptiveLapCov(X.clone(),args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_lap_paths.append(torch.norm(cov-cov_lap,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
write_output(epss,err_em_paths,strfolder,'err_em_paths.txt')
write_output(epss,err_lap_paths,strfolder,'err_lap_paths.txt')
write_output(epss,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(epss,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(epss,err_zero_paths,strfolder,'err_zero_paths.txt')
write_text(params,strfolder,'params.txt')
def test_minist_fix_digit(args,rhos,strfolder,params,digit):
train_data, test_data = get_mnist_data()
train_labels, test_labels = get_mnist_labels()
ind = (train_labels==digit)
d = args.d
n = sum(ind)
args.n = n
err_em_paths = []
err_gauss_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
err_cpt1_paths = []
err_cpt2_paths = []
err_cpt3_paths = []
err_cpt4_paths = []
err_cpt5_paths = []
Y = torch.from_numpy(train_data)
X = Y[ind]
n1,d1 = X.shape
assert(n1==n and d1==d)
for j in range(len(rhos)):
rho = rhos[j]
args.total_budget = rho
Ps1 = [args.total_budget]
Ps2 = [(1.0/4.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps3 = [(1.0/8.0)*args.total_budget, (1.0/8.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps4 = [(1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps5 = [(1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (3.0/4.0)*args.total_budget]
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)),'; digit: ',digit)
cov_em = EMCov(X.clone(),args,b_budget=False,b_fleig=True)
cov_gauss = GaussCov(X.clone(),n,d,rho,b_fleig=True)
cov_sep = SeparateCov(X.clone(),n,d,rho,b_fleig=True)
cov_adapt = AdaptiveCov(X.clone(),args)
args.t = 1
args.rho = Ps1
cov_cpt1 = cov_est(X.clone(), args)
args.t = 2
args.rho = Ps2
cov_cpt2 = cov_est(X.clone(), args)
args.t = 3
args.rho = Ps3
cov_cpt3 = cov_est(X.clone(), args)
args.t = 4
args.rho = Ps4
cov_cpt4 = cov_est(X.clone(), args)
args.t = 5
args.rho = Ps5
cov_cpt5 = cov_est(X.clone(), args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_gauss_paths.append(torch.norm(cov-cov_gauss,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
err_cpt1_paths.append(torch.norm(cov-cov_cpt1,'fro'))
err_cpt2_paths.append(torch.norm(cov-cov_cpt2,'fro'))
err_cpt3_paths.append(torch.norm(cov-cov_cpt3,'fro'))
err_cpt4_paths.append(torch.norm(cov-cov_cpt4,'fro'))
err_cpt5_paths.append(torch.norm(cov-cov_cpt5,'fro'))
write_output(rhos,err_em_paths,strfolder,'err_em_paths.txt')
write_output(rhos,err_gauss_paths,strfolder,'err_gauss_paths.txt')
write_output(rhos,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(rhos,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(rhos,err_zero_paths,strfolder,'err_zero_paths.txt')
write_output(rhos,err_cpt1_paths,strfolder,'err_cpt1_paths.txt')
write_output(rhos,err_cpt2_paths,strfolder,'err_cpt2_paths.txt')
write_output(rhos,err_cpt3_paths,strfolder,'err_cpt3_paths.txt')
write_output(rhos,err_cpt4_paths,strfolder,'err_cpt4_paths.txt')
write_output(rhos,err_cpt5_paths,strfolder,'err_cpt5_paths.txt')
write_text(params,strfolder,'params.txt')
def test_minist_fix_n(args,rhos,strfolder,params,seeds):
train_data, test_data = get_mnist_data()
d = args.d
n = args.n
err_em_paths = []
err_gauss_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
err_cpt1_paths = []
err_cpt2_paths = []
err_cpt3_paths = []
err_cpt4_paths = []
err_cpt5_paths = []
Y = torch.from_numpy(train_data)
n0,d0 = Y.shape
assert(d==d0)
if (n < n0):
torch.manual_seed(seeds[0])
ind = random.sample(range(0,n0),n)
X = Y[ind]
else:
X = Y.clone()
for j in range(len(rhos)):
rho = rhos[j]
args.total_budget = rho
Ps1 = [args.total_budget]
Ps2 = [(1.0/4.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps3 = [(1.0/8.0)*args.total_budget, (1.0/8.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps4 = [(1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps5 = [(1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (3.0/4.0)*args.total_budget]
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=False,b_fleig=True)
cov_gauss = GaussCov(X.clone(),n,d,rho,b_fleig=True)
cov_sep = SeparateCov(X.clone(),n,d,rho,b_fleig=True)
cov_adapt = AdaptiveCov(X.clone(),args)
args.t = 1
args.rho = Ps1
cov_cpt1 = cov_est(X.clone(), args)
args.t = 2
args.rho = Ps2
cov_cpt2 = cov_est(X.clone(), args)
args.t = 3
args.rho = Ps3
cov_cpt3 = cov_est(X.clone(), args)
args.t = 4
args.rho = Ps4
cov_cpt4 = cov_est(X.clone(), args)
args.t = 5
args.rho = Ps5
cov_cpt5 = cov_est(X.clone(), args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_gauss_paths.append(torch.norm(cov-cov_gauss,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
err_cpt1_paths.append(torch.norm(cov-cov_cpt1,'fro'))
err_cpt2_paths.append(torch.norm(cov-cov_cpt2,'fro'))
err_cpt3_paths.append(torch.norm(cov-cov_cpt3,'fro'))
err_cpt4_paths.append(torch.norm(cov-cov_cpt4,'fro'))
err_cpt5_paths.append(torch.norm(cov-cov_cpt5,'fro'))
write_output(rhos,err_em_paths,strfolder,'err_em_paths.txt')
write_output(rhos,err_gauss_paths,strfolder,'err_gauss_paths.txt')
write_output(rhos,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(rhos,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(rhos,err_zero_paths,strfolder,'err_zero_paths.txt')
write_output(rhos,err_cpt1_paths,strfolder,'err_cpt1_paths.txt')
write_output(rhos,err_cpt2_paths,strfolder,'err_cpt2_paths.txt')
write_output(rhos,err_cpt3_paths,strfolder,'err_cpt3_paths.txt')
write_output(rhos,err_cpt4_paths,strfolder,'err_cpt4_paths.txt')
write_output(rhos,err_cpt5_paths,strfolder,'err_cpt5_paths.txt')
write_text(params,strfolder,'params.txt')
def test_minist_fix_n_pure(args,epss,strfolder,params,seeds):
train_data, test_data = get_mnist_data()
d = args.d
n = args.n
err_em_paths = []
err_lap_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
Y = torch.from_numpy(train_data)
n0,d0 = Y.shape
assert(d==d0)
if (n < n0):
torch.manual_seed(seeds[0])
ind = random.sample(range(0,n0),n)
X = Y[ind]
else:
X = Y.clone()
for j in range(len(epss)):
eps = epss[j]
rho = eps*eps/2.
args.total_budget = rho
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=True,b_fleig=True)
cov_lap = LapCov(X.clone(),n,d,eps,b_fleig=True)
cov_sep = SeparateLapCov(X.clone(),n,d,eps,b_fleig=True)
cov_adapt = AdaptiveLapCov(X.clone(),args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_lap_paths.append(torch.norm(cov-cov_lap,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
write_output(epss,err_em_paths,strfolder,'err_em_paths.txt')
write_output(epss,err_lap_paths,strfolder,'err_lap_paths.txt')
write_output(epss,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(epss,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(epss,err_zero_paths,strfolder,'err_zero_paths.txt')
write_text(params,strfolder,'params.txt')
def test_mnist_fix_pure(args,rhos,strfolder,params):
train_data, test_data = get_mnist_data()
d = args.d
n = args.n
err_em_paths = []
err_lap_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
epss = []
X = torch.from_numpy(train_data[:n])
for j in range(len(rhos)):
rho = rhos[j]
args.total_budget = rho
eps = np.sqrt(2*rho)
epss.append(eps)
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=True,b_fleig=True)
cov_lap = LapCov(X.clone(),n,d,eps,b_fleig=True)
cov_sep = SeparateLapCov(X.clone(),n,d,eps,b_fleig=True)
cov_adapt = AdaptiveLapCov(X.clone(),args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_lap_paths.append(torch.norm(cov-cov_lap,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
write_output(epss,err_em_paths,strfolder,'err_em_paths.txt')
write_output(epss,err_lap_paths,strfolder,'err_lap_paths.txt')
write_output(epss,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(epss,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(epss,err_zero_paths,strfolder,'err_zero_paths.txt')
write_text(params,strfolder,'params.txt')
def test_n_pure(args,ns,strfolder,params,seeds):
d = args.d
s = args.s
N = args.N
rho = args.total_budget
eps = np.sqrt(2*rho)
err_em_paths = []
err_lap_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
for j in range(len(ns)):
n = int(ns[j])
args.n = n
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ', float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=True,b_fleig=True)
cov_lap = LapCov(X.clone(),n,d,eps,b_fleig=True)
cov_sep = SeparateLapCov(X.clone(),n,d,eps,b_fleig=True)
cov_adapt = AdaptiveLapCov(X.clone(),args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_lap_paths.append(torch.norm(cov-cov_lap,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
write_output(ns,err_em_paths,strfolder,'err_em_paths.txt')
write_output(ns,err_lap_paths,strfolder,'err_lap_paths.txt')
write_output(ns,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(ns,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(ns,err_zero_paths,strfolder,'err_zero_paths.txt')
write_text(params,strfolder,'params.txt')
def test_d_pure(args,ds,strfolder,params,seeds):
n = args.n
s = args.s
N = args.N
rho = args.total_budget
eps = np.sqrt(2*rho)
err_em_paths = []
err_lap_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
for j in range(len(ds)):
d = int(ds[j])
args.d = d
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
#cov_em = EMCov(X.clone(),args,b_budget=True,b_fleig=True)
cov_lap = LapCov(X.clone(),n,d,eps,b_fleig=True)
cov_sep = SeparateLapCov(X.clone(),n,d,eps,b_fleig=True)
cov_adapt = AdaptiveLapCov(X.clone(),args)
#err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_lap_paths.append(torch.norm(cov-cov_lap,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
#write_output(ds,err_em_paths,strfolder,'err_em_paths.txt')
write_output(ds,err_lap_paths,strfolder,'err_lap_paths.txt')
write_output(ds,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(ds,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(ds,err_zero_paths,strfolder,'err_zero_paths.txt')
write_text(params,strfolder,'params.txt')
def test_eps_pure(args,epss,strfolder,params,seeds):
d = args.d
n = args.n
s = args.s
N = args.N
err_em_paths = []
err_lap_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
for j in range(len(epss)):
eps = epss[j]
rho = eps*eps/2.
args.total_budget = rho
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=True,b_fleig=True)
# cov_lap = LapCov(X.clone(),n,d,eps,b_fleig=True)
# cov_sep = SeparateLapCov(X.clone(),n,d,eps,b_fleig=True)
# cov_adapt = AdaptiveLapCov(X.clone(),args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
# err_lap_paths.append(torch.norm(cov-cov_lap,'fro'))
# err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
# err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
# err_zero_paths.append(torch.norm(cov,'fro'))
write_output(epss,err_em_paths,strfolder,'err_em_paths.txt')
# write_output(epss,err_lap_paths,strfolder,'err_lap_paths.txt')
# write_output(epss,err_sep_paths,strfolder,'err_sep_paths.txt')
# write_output(epss,err_adapt_paths,strfolder,'err_adapt_paths.txt')
# write_output(epss,err_zero_paths,strfolder,'err_zero_paths.txt')
# write_text(params,strfolder,'params.txt')
def test_Ns_pure(args,Ns,strfolder,params,seeds):
n = args.n
d = args.d
s = args.s
rho = args.total_budget
eps = np.sqrt(2*rho)
err_em_paths = []
err_lap_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
for j in range(len(Ns)):
N = int(Ns[j])
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=True,b_fleig=True)
cov_lap = LapCov(X.clone(),n,d,eps,b_fleig=True)
cov_sep = SeparateLapCov(X.clone(),n,d,eps,b_fleig=True)
cov_adapt = AdaptiveLapCov(X.clone(),args)
cov_adapt = AdaptiveLapCov(X.clone(),args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_lap_paths.append(torch.norm(cov-cov_lap,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
write_output(Ns,err_em_paths,strfolder,'err_em_paths.txt')
write_output(Ns,err_lap_paths,strfolder,'err_lap_paths.txt')
write_output(Ns,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(Ns,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(Ns,err_zero_paths,strfolder,'err_zero_paths.txt')
write_text(params,strfolder,'params.txt')
def test_n(args,ns,strfolder,params,seeds):
d = args.d
s = args.s
N = args.N
rho = args.total_budget
Ps1 = [args.total_budget]
Ps2 = [(1.0/4.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps3 = [(1.0/8.0)*args.total_budget, (1.0/8.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps4 = [(1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps5 = [(1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (3.0/4.0)*args.total_budget]
err_em_paths = []
err_gauss_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
err_cpt1_paths = []
err_cpt2_paths = []
err_cpt3_paths = []
err_cpt4_paths = []
err_cpt5_paths = []
for j in range(len(ns)):
n = int(ns[j])
args.n = n
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ', float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=False,b_fleig=True)
cov_gauss = GaussCov(X.clone(),n,d,rho,b_fleig=True)
cov_sep = SeparateCov(X.clone(),n,d,rho,b_fleig=True)
cov_adapt = AdaptiveCov(X.clone(),args)
args.t = 1
args.rho = Ps1
cov_cpt1 = cov_est(X.clone(), args)
args.t = 2
args.rho = Ps2
cov_cpt2 = cov_est(X.clone(), args)
args.t = 3
args.rho = Ps3
cov_cpt3 = cov_est(X.clone(), args)
args.t = 4
args.rho = Ps4
cov_cpt4 = cov_est(X.clone(), args)
args.t = 5
args.rho = Ps5
cov_cpt5 = cov_est(X.clone(), args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_gauss_paths.append(torch.norm(cov-cov_gauss,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
err_cpt1_paths.append(torch.norm(cov-cov_cpt1,'fro'))
err_cpt2_paths.append(torch.norm(cov-cov_cpt2,'fro'))
err_cpt3_paths.append(torch.norm(cov-cov_cpt3,'fro'))
err_cpt4_paths.append(torch.norm(cov-cov_cpt4,'fro'))
err_cpt5_paths.append(torch.norm(cov-cov_cpt5,'fro'))
write_output(ns,err_em_paths,strfolder,'err_em_paths.txt')
write_output(ns,err_gauss_paths,strfolder,'err_gauss_paths.txt')
write_output(ns,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(ns,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(ns,err_zero_paths,strfolder,'err_zero_paths.txt')
write_output(ns,err_cpt1_paths,strfolder,'err_cpt1_paths.txt')
write_output(ns,err_cpt2_paths,strfolder,'err_cpt2_paths.txt')
write_output(ns,err_cpt3_paths,strfolder,'err_cpt3_paths.txt')
write_output(ns,err_cpt4_paths,strfolder,'err_cpt4_paths.txt')
write_output(ns,err_cpt5_paths,strfolder,'err_cpt5_paths.txt')
write_text(params,strfolder,'params.txt')
def test_d(args,ds,strfolder,params,seeds):
n = args.n
s = args.s
N = args.N
rho = args.total_budget
Ps1 = [args.total_budget]
Ps2 = [(1.0/4.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps3 = [(1.0/8.0)*args.total_budget, (1.0/8.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps4 = [(1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps5 = [(1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (3.0/4.0)*args.total_budget]
err_em_paths = []
err_gauss_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
err_cpt1_paths = []
err_cpt2_paths = []
err_cpt3_paths = []
err_cpt4_paths = []
err_cpt5_paths = []
for j in range(len(ds)):
d = int(ds[j])
args.d = d
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ', float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=False,b_fleig=True)
cov_gauss = GaussCov(X.clone(),n,d,rho,b_fleig=True)
cov_sep = SeparateCov(X.clone(),n,d,rho,b_fleig=True)
cov_adapt = AdaptiveCov(X.clone(),args)
args.t = 1
args.rho = Ps1
cov_cpt1 = cov_est(X.clone(), args)
args.t = 2
args.rho = Ps2
cov_cpt2 = cov_est(X.clone(), args)
args.t = 3
args.rho = Ps3
cov_cpt3 = cov_est(X.clone(), args)
args.t = 4
args.rho = Ps4
cov_cpt4 = cov_est(X.clone(), args)
args.t = 5
args.rho = Ps5
cov_cpt5 = cov_est(X.clone(), args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_gauss_paths.append(torch.norm(cov-cov_gauss,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
err_cpt1_paths.append(torch.norm(cov-cov_cpt1,'fro'))
err_cpt2_paths.append(torch.norm(cov-cov_cpt2,'fro'))
err_cpt3_paths.append(torch.norm(cov-cov_cpt3,'fro'))
err_cpt4_paths.append(torch.norm(cov-cov_cpt4,'fro'))
err_cpt5_paths.append(torch.norm(cov-cov_cpt5,'fro'))
write_output(ds,err_em_paths,strfolder,'err_em_paths.txt')
write_output(ds,err_gauss_paths,strfolder,'err_gauss_paths.txt')
write_output(ds,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(ds,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(ds,err_zero_paths,strfolder,'err_zero_paths.txt')
write_output(ds,err_cpt1_paths,strfolder,'err_cpt1_paths.txt')
write_output(ds,err_cpt2_paths,strfolder,'err_cpt2_paths.txt')
write_output(ds,err_cpt3_paths,strfolder,'err_cpt3_paths.txt')
write_output(ds,err_cpt4_paths,strfolder,'err_cpt4_paths.txt')
write_output(ds,err_cpt5_paths,strfolder,'err_cpt5_paths.txt')
write_text(params,strfolder,'params.txt')
def test_rho(args,rhos,strfolder,params,seeds):
d = args.d
n = args.n
s = args.s
N = args.N
err_em_paths = []
err_gauss_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
err_cpt1_paths = []
err_cpt2_paths = []
err_cpt3_paths = []
err_cpt4_paths = []
err_cpt5_paths = []
for j in range(len(rhos)):
rho = rhos[j]
args.total_budget = rho
Ps1 = [args.total_budget]
Ps2 = [(1.0/4.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps3 = [(1.0/8.0)*args.total_budget, (1.0/8.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps4 = [(1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps5 = [(1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (3.0/4.0)*args.total_budget]
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ',float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=False,b_fleig=True)
cov_gauss = GaussCov(X.clone(),n,d,rho,b_fleig=True)
cov_sep = SeparateCov(X.clone(),n,d,rho,b_fleig=True)
cov_adapt = AdaptiveCov(X.clone(),args)
args.t = 1
args.rho = Ps1
cov_cpt1 = cov_est(X.clone(), args)
args.t = 2
args.rho = Ps2
cov_cpt2 = cov_est(X.clone(), args)
args.t = 3
args.rho = Ps3
cov_cpt3 = cov_est(X.clone(), args)
args.t = 4
args.rho = Ps4
cov_cpt4 = cov_est(X.clone(), args)
args.t = 5
args.rho = Ps5
cov_cpt5 = cov_est(X.clone(), args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_gauss_paths.append(torch.norm(cov-cov_gauss,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
err_cpt1_paths.append(torch.norm(cov-cov_cpt1,'fro'))
err_cpt2_paths.append(torch.norm(cov-cov_cpt2,'fro'))
err_cpt3_paths.append(torch.norm(cov-cov_cpt3,'fro'))
err_cpt4_paths.append(torch.norm(cov-cov_cpt4,'fro'))
err_cpt5_paths.append(torch.norm(cov-cov_cpt5,'fro'))
write_output(rhos,err_em_paths,strfolder,'err_em_paths.txt')
write_output(rhos,err_gauss_paths,strfolder,'err_gauss_paths.txt')
write_output(rhos,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(rhos,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(rhos,err_zero_paths,strfolder,'err_zero_paths.txt')
write_output(rhos,err_cpt1_paths,strfolder,'err_cpt1_paths.txt')
write_output(rhos,err_cpt2_paths,strfolder,'err_cpt2_paths.txt')
write_output(rhos,err_cpt3_paths,strfolder,'err_cpt3_paths.txt')
write_output(rhos,err_cpt4_paths,strfolder,'err_cpt4_paths.txt')
write_output(rhos,err_cpt5_paths,strfolder,'err_cpt5_paths.txt')
write_text(params,strfolder,'params.txt')
def test_Ns(args,Ns,strfolder,params,seeds):
n = args.n
d = args.d
s = args.s
rho = args.total_budget
Ps1 = [args.total_budget]
Ps2 = [(1.0/4.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps3 = [(1.0/8.0)*args.total_budget, (1.0/8.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps4 = [(1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (1.0/12.0)*args.total_budget, (3.0/4.0)*args.total_budget]
Ps5 = [(1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (1.0/16.0)*args.total_budget, (3.0/4.0)*args.total_budget]
err_em_paths = []
err_gauss_paths = []
err_sep_paths = []
err_adapt_paths = []
err_zero_paths = []
err_cpt1_paths = []
err_cpt2_paths = []
err_cpt3_paths = []
err_cpt4_paths = []
err_cpt5_paths = []
for j in range(len(Ns)):
N = int(Ns[j])
X = gen_synthetic_data_fix(d,int(n),s,N,seed=seeds[j])
cov = torch.mm(X.t(),X)/n
print('trace: ', float(torch.trace(cov)))
cov_em = EMCov(X.clone(),args,b_budget=False,b_fleig=True)
cov_gauss = GaussCov(X.clone(),n,d,rho,b_fleig=True)
cov_sep = SeparateCov(X.clone(),n,d,rho,b_fleig=True)
cov_adapt = AdaptiveCov(X.clone(),args)
args.t = 1
args.rho = Ps1
cov_cpt1 = cov_est(X.clone(), args)
args.t = 2
args.rho = Ps2
cov_cpt2 = cov_est(X.clone(), args)
args.t = 3
args.rho = Ps3
cov_cpt3 = cov_est(X.clone(), args)
args.t = 4
args.rho = Ps4
cov_cpt4 = cov_est(X.clone(), args)
args.t = 5
args.rho = Ps5
cov_cpt5 = cov_est(X.clone(), args)
err_em_paths.append(torch.norm(cov-cov_em,'fro'))
err_gauss_paths.append(torch.norm(cov-cov_gauss,'fro'))
err_sep_paths.append(torch.norm(cov-cov_sep,'fro'))
err_adapt_paths.append(torch.norm(cov-cov_adapt,'fro'))
err_zero_paths.append(torch.norm(cov,'fro'))
err_cpt1_paths.append(torch.norm(cov-cov_cpt1,'fro'))
err_cpt2_paths.append(torch.norm(cov-cov_cpt2,'fro'))
err_cpt3_paths.append(torch.norm(cov-cov_cpt3,'fro'))
err_cpt4_paths.append(torch.norm(cov-cov_cpt4,'fro'))
err_cpt5_paths.append(torch.norm(cov-cov_cpt5,'fro'))
write_output(Ns,err_em_paths,strfolder,'err_em_paths.txt')
write_output(Ns,err_gauss_paths,strfolder,'err_gauss_paths.txt')
write_output(Ns,err_sep_paths,strfolder,'err_sep_paths.txt')
write_output(Ns,err_adapt_paths,strfolder,'err_adapt_paths.txt')
write_output(Ns,err_zero_paths,strfolder,'err_zero_paths.txt')
write_output(Ns,err_cpt1_paths,strfolder,'err_cpt1_paths.txt')
write_output(Ns,err_cpt2_paths,strfolder,'err_cpt2_paths.txt')
write_output(Ns,err_cpt3_paths,strfolder,'err_cpt3_paths.txt')
write_output(Ns,err_cpt4_paths,strfolder,'err_cpt4_paths.txt')
write_output(Ns,err_cpt5_paths,strfolder,'err_cpt5_paths.txt')
write_text(params,strfolder,'params.txt')
def make_summary(strfolder,names,num=6):
dict_results = {}
dict_headers = {}
dirlist = glob(strfolder+'/test_run*')
n = len(dirlist)
for name in names:
dict_results[name] = np.zeros((num,n))
for i in range(n):
dir = dirlist[i]
for name in names:
filename = name+'_paths.txt'
data = np.genfromtxt(dir+'/'+filename)
dict_results[name][:,i] = data[:,1]
if not(name in dict_headers.keys()):
dict_headers[name] = data[:,0]
folderout = strfolder+'/summary/'
if not os.path.isdir(folderout):
os.makedirs(folderout)
for name in names:
filename = name+'_summary.txt'
x = dict_headers[name]
y = np.mean(dict_results[name],axis=1)
np.savetxt(folderout+'/'+filename,np.transpose([x,y]))
| 39,810 | 39.873717 | 163 | py |
PrivateCovariance | PrivateCovariance-main/coinpress/utils.py | # coding: utf-8
'''
Utilities functions
'''
import torch
import argparse
import os.path as osp
import numpy as np
import math
import pdb
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--total_budget', default=.5, type=float, help='Total privacy budget')
parser.add_argument('--d', default=10, type=int, help='Feature dimension (dimension of synthetic data)')
parser.add_argument('--n', default=3000, type=int, help='Number of samples to synthesize (for synthetic data)')
parser.add_argument('--u', default=33, type=float, help='Initial upper bound for covariance')
parser.add_argument('--fig_title', default=None, type=str, help='figure title')
parser.add_argument('-f', default=None, type=str, help='needed for ipython starting')
opt = parser.parse_args()
return opt
def cov_nocenter(X):
X = X
cov = torch.mm(X.t(), X)/X.size(0)
return cov
def cov(X):
X = X - X.mean(0)
cov = torch.mm(X.t(), X)/X.size(0)
return cov
'''
PSD projection
'''
def psd_proj_symm(S):
U, D, V_t = torch.svd(S)
D = torch.clamp(D, min=0, max=None).diag()
A = torch.mm(torch.mm(U, D), U.t())
return A
'''
Mean Estimation Methods --------------------------------------------------------
'''
'''
Fine mean estimation algorithm
- list params are purely for graphing purposes and can be ignored if not needed
returns: fine DP estimate for mean
'''
def fineMeanEst(x, sigma, R, epsilon, epsilons=[], sensList=[], rounding_outliers=False):
B = R+sigma*3
sens = 2*B/(len(x)*epsilon)
epsilons.append([epsilon])
sensList.append([sens])
if rounding_outliers:
for i in x:
if i > B:
i = B
elif i < -1*B:
i = -1*B
noise = np.random.laplace(loc=0.0, scale=sens)
result = sum(x)/len(x) + noise
return result
'''
Coarse mean estimation algorithm with Private Histogram
returns: [start of intrvl, end of intrvl, freq or probability], bin number
- the coarse mean estimation would just be the midpoint of the intrvl (in case this is needed)
'''
def privateRangeEst(x, epsilon, delta, alpha, R, sd):
# note alpha ∈ (0, 1/2)
r = int(math.ceil(R/sd))
bins = {}
for i in range(-1*r,r+1):
start = (i - 0.5)*sd # each bin is s ((j − 0.5)σ,(j + 0.5)σ]
end = (i + 0.5)*sd
bins[i] = [start, end, 0] # first 2 elements specify intrvl, third element is freq
# note: epsilon, delta ∈ (0, 1/n) based on https://arxiv.org/pdf/1711.03908.pdf Lemma 2.3
# note n = len(x)
# set delta here
L = privateHistLearner(x, bins, epsilon, delta, r, sd)
return bins[L], L
# helper function
# returns: max probability bin number
def privateHistLearner(x, bins, epsilon, delta, r, sd): # r, sd added to transmit info
# fill bins
max_prob = 0
max_r = 0
# creating probability bins
for i in x:
r_temp = int(round(i/sd))
if r_temp in bins:
bins[r_temp][2] += 1/len(x)
for r_temp in bins:
noise = np.random.laplace(loc=0.0, scale=2/(epsilon*len(x)))
if delta == 0 or r_temp < 2/delta:
# epsilon DP case
bins[r_temp][2] += noise
else:
# epsilon-delta DP case
if bins[r_temp][2] > 0:
bins[r_temp][2] += noise
t = 2*math.log(2/delta)/(epsilon*len(x)) + (1/len(x))
if bins[r_temp][2] < t:
bins[r_temp][2] = 0
if bins[r_temp][2] > max_prob:
max_prob = bins[r_temp][2]
max_r = r_temp
return max_r
'''
Two shot algorithm
- may want to optimize distribution ratio between fine & coarse estimation
eps1 = epsilon for private histogram algorithm
eps2 = epsilon for fine mean estimation algorithm
returns: DP estimate for mean
'''
def twoShot(x, eps1, eps2, delta, R, sd):
alpha = 0.5
# coarse estimation
[start, end, prob], r = privateRangeEst(x, eps1, delta, alpha, R, sd)
for i in range(len(x)):
if x[i] < start - 3*sd:
x[i] = start - 3*sd
elif x[i] > end + 3*sd:
x[i] = end + 3*sd
# fine estimation with smaller range (less sensitivity)
est = fineMeanEst(x, sd, 3.5*sd, eps2)
return est
| 4,308 | 29.34507 | 115 | py |
PrivateCovariance | PrivateCovariance-main/coinpress/algos.py |
'''
Privately estimating covariance.
'''
import torch
import coinpress.utils as utils
import numpy as np
import math
def cov_est_step(X, A, rho, cur_iter, args):
"""
One step of multivariate covariance estimation, scale cov a.
"""
W = torch.mm(X, A)
n = args.n
d = args.d
#Hyperparameters
gamma = gaussian_tailbound(d, 0.1)
eta = 0.5*(2*(np.sqrt(d/n)) + (np.sqrt(d/n))**2)
#truncate points
W_norm = np.sqrt((W**2).sum(-1, keepdim=True))
norm_ratio = gamma / W_norm
large_norm_mask = (norm_ratio < 1).squeeze()
W[large_norm_mask] = W[large_norm_mask] * norm_ratio[large_norm_mask]
#noise
Y = torch.randn(d, d)
noise_var = (gamma**4/(rho*n**2))
Y *= np.sqrt(noise_var)
#can also do Y = torch.triu(Y, diagonal=1) + torch.triu(Y).t()
Y = torch.triu(Y)
Y = Y + Y.t() - Y.diagonal().diag_embed() #Don't duplicate diagonal entries
Z = (torch.mm(W.t(), W))/n
#add noise
Z = Z + Y
#ensure psd of Z
Z = utils.psd_proj_symm(Z)
U = Z + eta*torch.eye(d)
inv = torch.inverse(U)
invU, invD, invV = inv.svd()
inv_sqrt = torch.mm(invU, torch.mm(invD.sqrt().diag_embed(), invV.t()))
A = torch.mm(inv_sqrt, A)
return A, Z
def cov_est(X, args ):
"""
Multivariate covariance estimation.
Returns: zCDP estimate of cov.
"""
A = torch.eye(args.d) / np.sqrt(args.u)
assert len(args.rho) == args.t
for i in range(args.t-1):
A, Z = cov_est_step(X, A, args.rho[i], i, args)
A_t, Z_t = cov_est_step(X, A, args.rho[-1], args.t-1, args)
cov = torch.mm(torch.mm(A.inverse(), Z_t), A.inverse())
return cov
def gaussian_tailbound(d,b):
return ( d + 2*( d * math.log(1/b) )**0.5 + 2*math.log(1/b) )**0.5
def mahalanobis_dist(M, Sigma):
Sigma_inv = torch.inverse(Sigma)
U_inv, D_inv, V_inv = Sigma_inv.svd()
Sigma_inv_sqrt = torch.mm(U_inv, torch.mm(D_inv.sqrt().diag_embed(), V_inv.t()))
M_normalized = torch.mm(Sigma_inv_sqrt, torch.mm(M, Sigma_inv_sqrt))
return torch.norm(M_normalized - torch.eye(M.size()[0]), 'fro')
'''
Functions for mean estimation
'''
## X = dataset
## c,r = prior knowledge that mean is in B2(c,r)
## t = number of iterations
## Ps =
def multivariate_mean_iterative(X, c, r, t, Ps):
for i in range(t-1):
c, r = multivariate_mean_step(X, c, r, Ps[i])
c, r = multivariate_mean_step(X, c, r, Ps[t-1])
return c
def multivariate_mean_step(X, c, r, p):
n, d = X.shape
## Determine a good clipping threshold
gamma = gaussian_tailbound(d,0.01)
clip_thresh = min((r**2 + 2*r*3 + gamma**2)**0.5,r + gamma) #3 in place of sqrt(log(2/beta))
## Round each of X1,...,Xn to the nearest point in the ball B2(c,clip_thresh)
x = X - c
mag_x = np.linalg.norm(x, axis=1)
outside_ball = (mag_x > clip_thresh)
x_hat = (x.T / mag_x).T
X[outside_ball] = c + (x_hat[outside_ball] * clip_thresh)
## Compute sensitivity
delta = 2*clip_thresh/float(n)
sd = delta/(2*p)**0.5
## Add noise calibrated to sensitivity
Y = np.random.normal(0, sd, size=d)
c = np.sum(X, axis=0)/float(n) + Y
r = ( 1/float(n) + sd**2 )**0.5 * gaussian_tailbound(d,0.01)
return c, r
def L1(est): # assuming 0 vector is gt
return np.sum(np.abs(est))
def L2(est): # assuming 0 vector is gt
return np.linalg.norm(est)
| 3,435 | 28.118644 | 96 | py |
PrivateCovariance | PrivateCovariance-main/adaptive/utils.py | import torch
import numpy as np
from scipy.optimize import root_scalar
def SVT(T,eps,D,func,args):
T_tilde = T + np.random.laplace(scale=2.0/eps)
i = 0
m = len(args)
while i < m:
Qi = func(D,args[i]) + np.random.laplace(scale=4.0/eps)
if Qi >= T_tilde:
break
i = i + 1
return i
def convert_symm_mat(ZZ,d):
S = torch.empty([d,d])
k = 0
for i in range(d):
for j in range(i,d):
S[i,j] = ZZ[0,k]
k = k+1
for i in range(d):
for j in range(i+1,d):
S[j,i] = S[i,j]
return S
def get_gauss_wigner_matrix(d):
Z = torch.normal(0,1,size=(1,int(d*(d+1)/2)))
W = convert_symm_mat(Z,d)
return W
def get_lap_wigner_matrix(d):
Z = torch.distributions.laplace.Laplace(0,1).sample((1,int(d*(d+1)/2)))
W = convert_symm_mat(Z,d)
return W
def get_gauss_noise_vector(d):
Z = torch.normal(0,1,size=(1,d)).squeeze()#np.random.normal(0,1,d)
return Z
def get_lap_noise_vector(d):
Z = torch.distributions.laplace.Laplace(0,1).sample((d,))
return Z
def inv_sqrt(S):
S_inv = torch.inverse(S)
U, D, V = S_inv.svd()
S_inv_sqrt = torch.mm(U,torch.mm(D.sqrt().diag_embed(),V.t()))
return S_inv_sqrt
# def get_bincounts(x_norm,n,t,k1=0):
# k3 = k1+1-t
# counts = np.zeros(t)
# counts_sum = [0]
# counts_sum.extend([int(sum(x_norm>2**k)) for k in range(k1-1,k3-1,-1)])
# counts[0] = 0
# for k in range(k1-1,k3-1,-1):
# j = -k-k1
# counts[j] = counts_sum[j] - counts_sum[j-1]
# return counts
def get_bincounts(x_norm,n,t,k1=0):
counts = np.zeros(t)
#counts[0] = 0
for i in range(n):
l1 = -int(np.floor(np.log2(x_norm[i])))
if not(x_norm[i] > 2**(-l1)):
l1 = l1+1
l1 = max(l1,1)
if l1 < t:
counts[l1] = counts[l1] + 1
return counts
def gaussian_tailbound(d,b):
bound = np.sqrt(d+2*np.sqrt(d*np.log(1/b))+2*np.log(1/b))
return bound
def laplace_tailbound(d,b):
K = 2*max((np.sqrt(2)+1)/(np.sqrt(2)-1),2*np.log(d)/np.log(2))
bound = 1.5*np.sqrt(d)+np.log(2./b)*K
return bound
def wigner_gauss_tailbound(d,b):
theta = np.log(d)/d
theta = min(theta**0.3, 0.5)
bound = (1+theta)*(2*np.sqrt(d)+6*np.sqrt(np.log(d))/np.sqrt(np.log(1+theta)))
bound = bound + np.sqrt(2*np.log(2/b))
return bound
def wigner_lap_tailbound(d,b):
K = 2*max((np.sqrt(2)+1)/(np.sqrt(2)-1),2*np.log(0.5*(d*d+d))/np.log(2))
theta = np.log(d)/d
theta = min(theta**0.3, 0.5)
bound = (1+theta)*(2*np.sqrt(d)+6*np.log(d)/np.sqrt(np.log(1+theta)))
bound = bound + np.sqrt(2)*np.log(2/b)*K
return bound
def wigner_gauss_fnormbound(d,b):
bound = np.sqrt(d*d+2.*np.sqrt(d*np.log(2./b))*(1+np.sqrt(2*(d-1)))+6*np.log(2./b))
return bound
def wigner_lap_fnormbound(d,b):
K = 2*max((np.sqrt(2)+1)/(np.sqrt(2)-1),2*np.log(0.5*(d*d+d))/np.log(2))
bound = 1.5*d + np.log(2./b)*K
return bound
def clip(X, x_norm,r):
scale = r / x_norm
clipped = (scale < 1).squeeze()
W = X.detach().clone()
W[clipped] = W[clipped] * scale[clipped]
return W
def rho_eps_eq(x,eps0,delta):
f = x+2*np.sqrt(x*np.log(1./delta))
return f-eps0
def get_rho(eps,delta):
rho = root_scalar(rho_eps_eq, args=(eps,delta),bracket=[0,eps]).root
return rho | 3,410 | 26.288 | 87 | py |
PrivateCovariance | PrivateCovariance-main/adaptive/algos.py | import numpy as np
import torch
from adaptive.utils import get_gauss_wigner_matrix, get_lap_wigner_matrix, get_gauss_noise_vector, get_lap_noise_vector, SVT, get_bincounts, gaussian_tailbound, laplace_tailbound, wigner_gauss_fnormbound, wigner_lap_fnormbound, wigner_gauss_tailbound, wigner_lap_tailbound, clip
def GaussCov(X, n, d, rho, delta=0.0,r=1.0,b_fleig=False):
cov = torch.mm(X.t(),X)/n
W = get_gauss_wigner_matrix(d)
sens = np.sqrt(2)*r*r/n
if delta > 0.0:
sens = sens*np.sqrt(2*np.log(1.25/delta))
eps = rho+2.*np.sqrt(rho*np.log(1/delta))
cov_tilde = cov + sens/eps*W
else:
cov_tilde = cov + sens/np.sqrt(2*rho)*W
if b_fleig:
D, U = torch.linalg.eigh(cov_tilde)
for i in range(d):
D[i] = max(min(D[i],r*r),0)
cov_tilde = torch.mm(U,torch.mm(D.diag_embed(),U.t()))
return cov_tilde
def GaussApproxCov(X, n, d, eps, delta,r=1.0,b_fleig=False):
cov = torch.mm(X.t(),X)/n
W = get_gauss_wigner_matrix(d)
sens = np.sqrt(2)*r*r/n
sens = sens*np.sqrt(2*np.log(1.25/delta))
cov_tilde = cov + sens/eps*W
if b_fleig:
D, U = torch.linalg.eigh(cov_tilde)
for i in range(d):
D[i] = max(min(D[i],r*r),0)
cov_tilde = torch.mm(U,torch.mm(D.diag_embed(),U.t()))
return cov_tilde
def LapCov(X, n, d, eps,r=1.0,b_fleig=False):
cov = torch.mm(X.t(),X)/n
W = get_lap_wigner_matrix(d)
sens = np.sqrt(2)*d*r*r/n
cov_tilde = cov + sens/eps*W
if b_fleig:
D, U = torch.linalg.eigh(cov_tilde)
for i in range(d):
D[i] = max(min(D[i],r*r),0)
cov_tilde = torch.mm(U,torch.mm(D.diag_embed(),U.t()))
return cov_tilde
def SeparateCov(X, n, d, rho, r=1.0,b_fleig=False):
cov = torch.mm(X.t(),X)/n
cov_gauss = GaussCov(X, n, d, 0.5*rho, r=r)
Ug, Dg, Vg = cov_gauss.svd()
U, D, V = cov.svd()
Z = get_gauss_noise_vector(d)
sens = r*r*np.sqrt(2)/n
D_tilde = torch.diag(D) + torch.diag(sens/np.sqrt(rho)*Z)
if b_fleig:
for i in range(d):
D_tilde[i,i] = max(min(D_tilde[i,i],r*r),0)
cov_tilde = torch.mm(Ug,torch.mm(D_tilde,Vg.t()))
return cov_tilde
def SeparateLapCov(X, n, d, eps, r=1.0,b_fleig=False):
cov = torch.mm(X.t(),X)/n
eps0 = 0.5*eps
cov_lap = LapCov(X, n, d, eps0, r=r)
Ug, Dg, Vg = cov_lap.svd()
U, D, V = cov.svd()
Z = get_lap_noise_vector(d)
sens = r*r*2./n
D_tilde = torch.diag(D) + torch.diag(sens/eps0*Z)
if b_fleig:
for i in range(d):
D_tilde[i,i] = max(min(D_tilde[i,i],r*r),0)
cov_tilde = torch.mm(Ug,torch.mm(D_tilde,Vg.t()))
return cov_tilde
def get_bias(counts, tup):
(parts, n, k, k1) = tup
i = k1-k+1
bias = sum([counts[l]*parts[l] for l in range(i)])
bias = (bias - sum(counts[:i])*2**(2*k))/n
return bias
def get_diff(counts, tup):
(parts, n, k, noise1, noise2, k1, r) = tup
i = k1-k+1
bias = sum([counts[l]*parts[l] for l in range(i)])
bias = (bias - sum(counts[:i])*2**(2*k))/n
noise = 2**k*noise1 + 2**(2*k)*noise2
diff = n*(bias - noise)/r/r
return diff
def get_diff2(counts, tup):
(parts, n, k, gaussnoise, sepnoise1, sepnoise2, k1, r) = tup
i = k1-k+1
bias = sum([counts[l]*parts[l] for l in range(i)])
bias = (bias - sum(counts[:i])*2**(2*k))/n
noise = min(2**k*gaussnoise, 2**k*sepnoise1 + 2**(2*k)*sepnoise2)
diff = n*(bias - noise)/r/r
return diff
def ClippedCov(X, n, d, rho, beta, tr_tilde, r=1.0):
eta = gaussian_tailbound(d,0.5*beta)
nu = wigner_gauss_tailbound(d,0.5*beta)
rho1 = 0.75*rho
noise1 = 2**(1.25)*np.sqrt(tr_tilde)/np.sqrt(n)/(rho1**0.25)*np.sqrt(nu)
noise2 = np.sqrt(2)*eta/np.sqrt(rho1)/n
k1 = int(np.log2(r))
k3 = -max(int(np.ceil(np.log2(noise1+noise2)-np.log2(r*r/np.sqrt(rho1)/n*np.log(1./beta))))+1,k1)
t = k1-k3+1
x_norm = torch.linalg.norm(X, dim=1, keepdim=True)
parts = [2**(2*(k1-l)+2) for l in range(t)]
counts = get_bincounts(x_norm, n, t)
args = [(parts,n,k1-l,noise1,noise2,k1,r) for l in range(t)]
j = SVT(0,np.sqrt(rho/2.),counts,get_diff,args)
k2_tilde = k1-j#k1+1-j
if k2_tilde < k1:
r_tilde =2**(k2_tilde+1)
else:
r_tilde = 2**k2_tilde
X_tilde = clip(X,x_norm,r_tilde)
Sigma = SeparateCov(X_tilde,n,d,rho1,r=r_tilde,b_fleig=True)
return Sigma
def ClippedLapCov(X, n, d, eps, beta, tr_tilde, r=1.0):
eta = laplace_tailbound(d,0.5*beta)
nu = wigner_lap_tailbound(d,0.5*beta)
eps1 = 0.75*eps
eps2 = 0.25*eps
noise1 = np.sqrt(8*np.sqrt(2)*d*tr_tilde)/np.sqrt(eps1*n)*np.sqrt(nu)
noise2 = 4*eta/eps1/n
k1 = int(np.log2(r))
k3 = -max(int(np.ceil(np.log2(noise1+noise2)-np.log2(r*r/eps1/n*np.log(1./beta))))+1,-k1)
t = k1-k3+1
x_norm = torch.linalg.norm(X, dim=1, keepdim=True)
parts = [2**(2*(k1-l)+2) for l in range(t)]
counts = get_bincounts(x_norm, n, t)
args = [(parts,n,k1-l,noise1,noise2,k1,r) for l in range(t)]
j = SVT(0,eps2,counts,get_diff,args)
k2_tilde = k1-j
if k2_tilde < k1:
r_tilde = 2**(k2_tilde+1)
else:
r_tilde = 2**k2_tilde
X_tilde = clip(X,x_norm,r_tilde)
Sigma = SeparateLapCov(X_tilde,n,d,eps1,r=r_tilde,b_fleig=True)
return Sigma
def AdaptiveCov(X, args,r=1.0):
rho = args.total_budget
n = args.n
d = args.d
beta = args.beta
cov = torch.mm(X.t(),X)/n
tr = torch.trace(cov)
factor = np.sqrt(8./rho*np.log(8./beta))
tr_tilde = tr + r*r*(2./np.sqrt(rho)/n*np.random.normal(0,1) + factor/n)
tr_tilde = min(tr_tilde, r)
tr_tilde = max(tr_tilde, 1e-16)
rho1 = 0.75*rho
beta1 = 0.75*beta
eta = gaussian_tailbound(d,0.5*beta1)
nu = wigner_gauss_tailbound(d,0.5*beta1)
omega = wigner_gauss_fnormbound(d,beta1)
sepnoise1 = 2**(1.25)*np.sqrt(tr_tilde)/np.sqrt(n)/(rho1**0.25)*np.sqrt(nu)/6.
sepnoise2 = np.sqrt(2)*eta/np.sqrt(rho1)/n
gaussnoise1 = 1./np.sqrt(rho1)/n*omega
k1 = int(np.log2(r))
k3 = -min(int(d*n),-int(np.log2(1e-24)))
t = k1-k3+1
x_norm = torch.linalg.norm(X, dim=1, keepdim=True)
parts = [2**(2*(k1-l)+2) for l in range(t)]
counts = get_bincounts(x_norm, n, t)
args = [(parts,n,k1-l,gaussnoise1,sepnoise1,sepnoise2,k1,r) for l in range(t)]
j = SVT(0,np.sqrt(rho)/2.,counts,get_diff2,args)
k2_tilde = k1-j
r_tilde = min(2**(k2_tilde+1),r)
X_tilde = clip(X,x_norm,r_tilde)
sepnoise = (2**k2_tilde)*sepnoise1+(2**(2*k2_tilde))*sepnoise2
gaussnoise = (2**(2*k2_tilde))*gaussnoise1
if sepnoise>=gaussnoise:
Sigma = GaussCov(X_tilde,n,d,rho1,r=r_tilde,b_fleig=True)
else:
Sigma = SeparateCov(X_tilde,n,d,rho1,r=r_tilde,b_fleig=True)
return Sigma
def AdaptiveLapCov(X, args,r=1.0):
rho = args.total_budget
eps = np.sqrt(2*rho)
n = args.n
d = args.d
beta = args.beta
cov = torch.mm(X.t(),X)/n
tr = torch.trace(cov)
factor = 8.*r*r/eps*np.log(4./beta)
tr_tilde = tr + 8.*r*r/eps/n*np.random.laplace(0,1) + factor/n
tr_tilde = min(tr_tilde, r)
tr_tilde = max(tr_tilde, 1e-16)
eps1 = 0.75*eps
beta1 = 0.75*beta
eta = laplace_tailbound(d,0.5*beta1)
nu = wigner_lap_tailbound(d,0.5*beta1)
omega = wigner_lap_fnormbound(d,beta1)
sepnoise1 = np.sqrt(8*np.sqrt(2)*d*tr_tilde)/np.sqrt(eps1*n)*np.sqrt(nu)/14.
sepnoise2 = 4*eta/eps1/n
lapnoise1 = np.sqrt(2)*d*r*r/n*omega
k1 = int(np.log2(r))
k3 = -min(int(d*n),-int(np.log2(1e-24)))
t = k1-k3+1
x_norm = torch.linalg.norm(X, dim=1, keepdim=True)
parts = [2**(2*(k1-l)+2) for l in range(t)]
counts = get_bincounts(x_norm, n, t)
args = [(parts,n,k1-l,lapnoise1,sepnoise1,sepnoise2,k1,r) for l in range(t)]
j = SVT(0,eps/8.,counts,get_diff2,args)
k2_tilde = k1-j
r_tilde = min(2**(k2_tilde+1),r)
X_tilde = clip(X,x_norm,r_tilde)
sepnoise = (2**k2_tilde)*sepnoise1+(2**(2*k2_tilde))*sepnoise2
lapnoise = (2**(2*k2_tilde))*lapnoise1
if sepnoise>=lapnoise:
Sigma = LapCov(X_tilde,n,d,eps1,r=r_tilde,b_fleig=True)
else:
Sigma = SeparateLapCov(X_tilde,n,d,eps1,r=r_tilde,b_fleig=True)
return Sigma | 8,261 | 34.612069 | 278 | py |
PrivateCovariance | PrivateCovariance-main/exponential/utils.py | import torch
import numpy as np
from scipy.optimize import root_scalar
import time
def root_bisect_dec(x0,x1,func,args,T=10,thres=1e-8):
left = x0
right = x1
mid = 0.5*(left+right)
mid_val = func(mid, args)
t = 0
err = abs(mid_val)
while t<T and err > thres:
if mid_val > 0:
left = mid
else:
right = mid
mid = 0.5*(left+right)
mid_val = func(mid, args)
err = abs(mid_val)
t = t+1
return mid
def constr_bingham(x, Da):
d = len(Da)
farr = [1/(x+2*Da[j]) for j in range(d)]
f = sum(farr)
return f-1
def find_bingham(cov, eps, d, batch=2):
Uc, Dc, Vc = cov.svd()
lamb_1 = max(Dc)
Da = -eps/4.0*(Dc-lamb_1)
A = -eps/4.0*cov + eps/4.0*(lamb_1)*torch.eye(d)
b = root_scalar(constr_bingham,args=Da,bracket=[1,d+1]).root
ohm = torch.eye(d) + 2./b*A
ohm_inv = torch.linalg.inv(ohm)
logM = -0.5*(d-b)+d/2.*np.log(d/b)
zero_mean = torch.zeros(d)
Z = torch.distributions.multivariate_normal.MultivariateNormal(zero_mean, ohm_inv)
while True:
z = Z.sample((batch,)).t()
v = torch.divide(z,torch.norm(z, dim=0))
u = torch.rand(batch)
pr1 = torch.diag(torch.matmul(torch.matmul(v.t(),A),v))
pr = torch.diag(torch.matmul(torch.matmul(v.t(),ohm),v))
pr = torch.exp(-pr1+d/2.*np.log(pr)-logM)
success = (u < pr).squeeze()
if (sum(success) > 0):
ind = np.argmax(success>0)
v_out = v[:,ind]
return v_out
def advanced_comp(x, ep0,k,delta):
comp = np.sqrt(2.*k*np.log(1./delta))*x+k*x*(np.exp(x)-1)
return comp-ep0
def convert_eps(ep0, k, delta):
a = k
b = np.sqrt(2.*k*np.log(1./delta))
c = -ep0
r0 = (-b+np.sqrt(b*b-4*a*c))/2/a
return r0
| 1,821 | 26.19403 | 86 | py |
PrivateCovariance | PrivateCovariance-main/exponential/algos.py | import torch
import numpy as np
from exponential.utils import find_bingham, convert_eps
from scipy.linalg import null_space
def EMCov(X, args, b_budget=False, b_fleig=True):
rho = args.total_budget
delta = args.delta
n = args.n
d = args.d
cov = torch.mm(X.t(),X)
if not(delta > 0.0):
eps_total = np.sqrt(2*rho)
else:
eps_total = rho+2.*np.sqrt(rho*np.log(1/delta))
eps0 = 0.5*eps_total
Uc, Dc, Vc = cov.svd()
lap = torch.distributions.laplace.Laplace(0,2./eps0).sample((d,))
Lamb_hat = torch.diag(Dc) + torch.diag(lap)
Lamb_round = torch.zeros(d)
if b_fleig:
for i in range(d):
lamb = max(min(Lamb_hat[i,i],n),0)
Lamb_round[i] = lamb
else:
Lamb_round = torch.diag(Lamb_hat)
P1 = torch.eye(d)
if not(b_budget):
if (delta > 0):
ep = convert_eps(eps0,d,delta)
else:
ep = eps0/d
eps = [ep for j in range(d)]
else:
tau = 2./eps0*np.log(2.*d/args.beta)
numer = [np.sqrt(Lamb_round[j]+tau) for j in range(d)]
denom = sum(numer)
eps = [eps0*numer[j]/denom for j in range(d)]
Ci = cov
Pi = torch.eye(d)
theta = torch.zeros(d,d)
for i in range(d):
Ci, Pi = EMStep(cov, Ci, Pi, eps[i], d, i, theta)
C_hat = torch.zeros(d,d)
for i in range(d):
C_hat = C_hat + Lamb_round[i]*torch.outer(theta[i],theta[i])
return C_hat/n
def EMStep(C, Ci, Pi, epi, d, i, theta):
u_hat = find_bingham(Ci, epi, (d-i), int(np.sqrt(d)))
theta_hat = torch.matmul(Pi,u_hat)
theta[i] = theta_hat
if not(i==d-1):
Pi = torch.from_numpy(null_space(theta))
Ci = torch.matmul(torch.matmul(Pi.t(),C),Pi)
return Ci, Pi
| 1,802 | 27.619048 | 69 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/experiment_run.py | """This library contains all utility functions to help you run your
experiments.
Methods
-------
run_experiment:
Run a single experiment with a specific solver and ExperimentParameters
run_experiment_agenda:
Run a number of experiments defined in the ExperimentAgenda
run_specific_experiments_from_research_agenda
Run only a few experiments fro a defined ExperimentAgenda
create_experiment_agenda
Create an experiment agenda given ranges for paramters to be probed
span_n_grid
Helper function to span the n-dimensional grid from parameter ranges
create_env_pair_for_experiment
Create a pair of environments for the desired research. One environment has no malfunciton, the other one has
exactly one malfunciton
save_experiment_results_to_file
Save the results of an experiment or a full experiment agenda
load_experiment_results_to_file
Load the results form an experiment result file
"""
import datetime
import itertools
import logging
import multiprocessing
import os
import platform
import pprint
import shutil
import threading
import time
from functools import partial
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
import pandas as pd
import tqdm as tqdm
from flatland.envs.rail_trainrun_data_structures import TrainrunDict
from pandas import DataFrame
from rsp.global_data_configuration import BASELINE_DATA_FOLDER
from rsp.global_data_configuration import EXPERIMENT_DATA_SUBDIRECTORY_NAME
from rsp.global_data_configuration import INFRAS_AND_SCHEDULES_FOLDER
from rsp.scheduling.asp_wrapper import asp_reschedule_wrapper
from rsp.scheduling.schedule import exists_schedule
from rsp.scheduling.schedule import load_schedule
from rsp.scheduling.schedule import Schedule
from rsp.scheduling.scheduling_problem import get_paths_in_route_dag
from rsp.scheduling.scheduling_problem import path_stats
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.scheduling.scheduling_problem import TopoDict
from rsp.step_01_agenda_expansion.agenda_expansion import expand_infrastructure_parameter_range
from rsp.step_01_agenda_expansion.agenda_expansion import expand_schedule_parameter_range
from rsp.step_01_agenda_expansion.experiment_parameters_and_ranges import ExperimentAgenda
from rsp.step_01_agenda_expansion.experiment_parameters_and_ranges import ExperimentParameters
from rsp.step_01_agenda_expansion.experiment_parameters_and_ranges import InfrastructureParameters
from rsp.step_01_agenda_expansion.experiment_parameters_and_ranges import InfrastructureParametersRange
from rsp.step_01_agenda_expansion.experiment_parameters_and_ranges import ScheduleParameters
from rsp.step_01_agenda_expansion.experiment_parameters_and_ranges import ScheduleParametersRange
from rsp.step_01_agenda_expansion.experiment_parameters_and_ranges import SpeedData
from rsp.step_01_agenda_expansion.global_constants import GLOBAL_CONSTANTS
from rsp.step_01_agenda_expansion.global_constants import GlobalConstants
from rsp.step_02_infrastructure_generation.infrastructure import exists_infrastructure
from rsp.step_02_infrastructure_generation.infrastructure import gen_infrastructure
from rsp.step_02_infrastructure_generation.infrastructure import load_infrastructure
from rsp.step_02_infrastructure_generation.infrastructure import save_infrastructure
from rsp.step_03_schedule_generation.schedule_generation import gen_and_save_schedule
from rsp.step_05_experiment_run.experiment_malfunction import gen_malfunction
from rsp.step_05_experiment_run.experiment_results import ExperimentResults
from rsp.step_05_experiment_run.experiment_results import load_experiments_results
from rsp.step_05_experiment_run.experiment_results import plausibility_check_experiment_results
from rsp.step_05_experiment_run.experiment_results_analysis import convert_list_of_experiment_results_analysis_to_data_frame
from rsp.step_05_experiment_run.experiment_results_analysis import expand_experiment_results_for_analysis
from rsp.step_05_experiment_run.experiment_results_analysis import plausibility_check_experiment_results_analysis
from rsp.step_05_experiment_run.experiment_results_analysis_load_and_save import load_and_expand_experiment_results_from_data_folder
from rsp.step_05_experiment_run.experiment_results_analysis_load_and_save import load_data_from_individual_csv_in_data_folder
from rsp.step_05_experiment_run.experiment_results_analysis_load_and_save import save_experiment_results_to_file
from rsp.step_05_experiment_run.scopers.scoper_offline_delta import scoper_offline_delta_for_all_agents
from rsp.step_05_experiment_run.scopers.scoper_offline_delta_weak import scoper_offline_delta_weak_for_all_agents
from rsp.step_05_experiment_run.scopers.scoper_offline_fully_restricted import scoper_offline_fully_restricted_for_all_agents
from rsp.step_05_experiment_run.scopers.scoper_online_random import scoper_online_random_for_all_agents
from rsp.step_05_experiment_run.scopers.scoper_online_route_restricted import scoper_online_route_restricted_for_all_agents
from rsp.step_05_experiment_run.scopers.scoper_online_transmission_chains import scoper_online_transmission_chains_for_all_agents
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import scoper_online_unrestricted_for_all_agents
from rsp.step_06_analysis.detailed_experiment_analysis.route_dag_analysis import visualize_route_dag_constraints_simple_wrapper
from rsp.utils.file_utils import check_create_folder
from rsp.utils.file_utils import newline_and_flush_stdout_and_stderr
from rsp.utils.pickle_helper import _pickle_dump
from rsp.utils.pickle_helper import _pickle_load
from rsp.utils.psutil_helpers import current_process_stats_human_readable
from rsp.utils.psutil_helpers import virtual_memory_human_readable
from rsp.utils.rsp_logger import add_file_handler_to_rsp_logger
from rsp.utils.rsp_logger import remove_file_handler_from_rsp_logger
from rsp.utils.rsp_logger import rsp_logger
# B008 Do not perform function calls in argument defaults.
# The call is performed only once at function definition time.
# All calls to your function will reuse the result of that definition-time function call.
# If this is intended, ass ign the function call to a module-level variable and use that variable as a default value.
AVAILABLE_CPUS = os.cpu_count()
_pp = pprint.PrettyPrinter(indent=4)
def run_experiment_in_memory(
schedule: Schedule,
experiment_parameters: ExperimentParameters,
infrastructure_topo_dict: TopoDict,
# TODO we should use logging debug levels instead
debug: bool = False,
online_unrestricted_only: bool = False,
) -> ExperimentResults:
"""A.2 + B Runs the main part of the experiment: re-scheduling full and
delta perfect/naive.
Parameters
----------
schedule
operational schedule that where malfunction happened
experiment_parameters
hierarchical experiment parameters
infrastructure_topo_dict
the "full" topology for each agent
debug
debug logging
online_unrestricted_only
run only scope `online_unrestricted`.
Used for "calibration runs" where we are only interested in the speed-up between `online_unrestricted` with different `GlobalConstants`.
Returns
-------
ExperimentResults
"""
rsp_logger.info(f"run_experiment_in_memory for {experiment_parameters.experiment_id} with GLOBAL_CONSTANTS={GLOBAL_CONSTANTS._constants}")
rsp_logger.info(f"1. gen malfunction for {experiment_parameters.experiment_id}")
schedule_problem, schedule_result = schedule
schedule_trainruns: TrainrunDict = schedule_result.trainruns_dict
# --------------------------------------------------------------------------------------
# A.2 Determine malfunction (deterministically from experiment parameters)
# --------------------------------------------------------------------------------------
experiment_malfunction = gen_malfunction(
earliest_malfunction=experiment_parameters.re_schedule_parameters.earliest_malfunction,
malfunction_duration=experiment_parameters.re_schedule_parameters.malfunction_duration,
malfunction_agent_id=experiment_parameters.re_schedule_parameters.malfunction_agent_id,
schedule_trainruns=schedule.schedule_experiment_result.trainruns_dict,
)
malfunction_agent_trainrun = schedule_trainruns[experiment_malfunction.agent_id]
rsp_logger.info(f"{experiment_malfunction} for scheduled start {malfunction_agent_trainrun[0]} and arrival {malfunction_agent_trainrun[-1]}")
rescheduling_topo_dict = _make_restricted_topo(
infrastructure_topo_dict=infrastructure_topo_dict,
number_of_shortest_paths=experiment_parameters.re_schedule_parameters.number_of_shortest_paths_per_agent,
)
# TODO SIM-774 streamline 5 stages according to overleaf; introduce planning stage; split experiment_run.py!!!
# --------------------------------------------------------------------------------------
# B.1. Re-schedule Full
# --------------------------------------------------------------------------------------
rsp_logger.info("2. reschedule full")
# clone topos since propagation will modify them
online_unrestricted_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
problem_online_unrestricted: ScheduleProblemDescription = scoper_online_unrestricted_for_all_agents(
malfunction=experiment_malfunction,
schedule_trainruns=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
latest_arrival=schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration,
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
topo_dict_=online_unrestricted_topo_dict,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
)
results_online_unrestricted = asp_reschedule_wrapper(
reschedule_problem_description=problem_online_unrestricted,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
online_unrestricted_trainruns = results_online_unrestricted.trainruns_dict
costs_ = results_online_unrestricted.solver_statistics["summary"]["costs"][0]
rsp_logger.info(f" results_online_unrestricted has costs {costs_}, took {results_online_unrestricted.solver_statistics['summary']['times']['total']}")
if online_unrestricted_only:
return ExperimentResults(
experiment_parameters=experiment_parameters,
malfunction=experiment_malfunction,
problem_schedule=schedule_problem,
problem_online_unrestricted=problem_online_unrestricted,
problem_offline_delta=None,
problem_offline_delta_weak=None,
problem_offline_fully_restricted=None,
problem_online_route_restricted=None,
problem_online_transmission_chains_fully_restricted=None,
problem_online_transmission_chains_route_restricted=None,
results_schedule=schedule_result,
results_online_unrestricted=results_online_unrestricted,
results_offline_delta=None,
results_offline_delta_weak=None,
results_offline_fully_restricted=None,
results_online_route_restricted=None,
results_online_transmission_chains_fully_restricted=None,
results_online_transmission_chains_route_restricted=None,
predicted_changed_agents_online_transmission_chains_fully_restricted=None,
predicted_changed_agents_online_transmission_chains_route_restricted=None,
**{f"problem_online_random_{i}": None for i in range(GLOBAL_CONSTANTS.NB_RANDOM)},
**{f"results_online_random_{i}": None for i in range(GLOBAL_CONSTANTS.NB_RANDOM)},
**{f"predicted_changed_agents_online_random_{i}": None for i in range(GLOBAL_CONSTANTS.NB_RANDOM)},
)
# --------------------------------------------------------------------------------------
# B.2.a Lower bound: Re-Schedule Delta Perfect
# --------------------------------------------------------------------------------------
rsp_logger.info("3a. reschedule delta perfect (lower bound)")
# clone topos since propagation will modify them
offline_delta_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
problem_offline_delta = scoper_offline_delta_for_all_agents(
online_unrestricted_trainrun_dict=online_unrestricted_trainruns,
malfunction=experiment_malfunction,
max_episode_steps=schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration,
offline_delta_topo_dict_=offline_delta_topo_dict,
schedule_trainrun_dict=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
)
results_offline_delta = asp_reschedule_wrapper(
reschedule_problem_description=problem_offline_delta,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
rsp_logger.info(
f" results_offline_delta has costs {results_offline_delta.solver_statistics['summary']['costs'][0]}, "
f"took {results_offline_delta.solver_statistics['summary']['times']['total']}"
)
# --------------------------------------------------------------------------------------
# B.2.a Above Lower bound: Re-Schedule Delta Weak
# --------------------------------------------------------------------------------------
rsp_logger.info("3a. reschedule delta Weak (above lower bound)")
# clone topos since propagation will modify them
offline_delta_weak_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
problem_offline_delta_weak = scoper_offline_delta_weak_for_all_agents(
online_unrestricted_trainrun_dict=online_unrestricted_trainruns,
online_unrestricted_problem=problem_online_unrestricted,
malfunction=experiment_malfunction,
latest_arrival=schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration,
topo_dict_=offline_delta_weak_topo_dict,
schedule_trainrun_dict=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
)
results_offline_delta_weak = asp_reschedule_wrapper(
reschedule_problem_description=problem_offline_delta_weak,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
rsp_logger.info(f" results_offline_delta_weak has costs {results_offline_delta_weak.solver_statistics['summary']['costs'][0]}")
# --------------------------------------------------------------------------------------
# B.2.b Lower bound: Re-Schedule Delta trivially_perfect
# --------------------------------------------------------------------------------------
rsp_logger.info("3b. reschedule delta trivially_perfect (lower bound)")
# clone topos since propagation will modify them
delta_trivially_perfect_reschedule_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
problem_offline_fully_restricted = scoper_offline_fully_restricted_for_all_agents(
online_unrestricted_trainrun_dict=online_unrestricted_trainruns,
malfunction=experiment_malfunction,
max_episode_steps=schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration,
offline_fully_restricted_topo_dict_=delta_trivially_perfect_reschedule_topo_dict,
schedule_trainrun_dict=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
)
results_offline_fully_restricted = asp_reschedule_wrapper(
reschedule_problem_description=problem_offline_fully_restricted,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
rsp_logger.info(
f" results_offline_fully_restricted has costs "
f"{results_offline_fully_restricted.solver_statistics['summary']['costs'][0]}, "
f"took {results_offline_fully_restricted.solver_statistics['summary']['times']['total']}"
)
# --------------------------------------------------------------------------------------
# B.2.c Some restriction
# --------------------------------------------------------------------------------------
rsp_logger.info("4. reschedule no rerouting")
# clone topos since propagation will modify them
delta_no_rerouting_reschedule_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
problem_online_route_restricted = scoper_online_route_restricted_for_all_agents(
online_unrestricted_trainrun_dict=online_unrestricted_trainruns,
online_unrestricted_problem=problem_online_unrestricted,
malfunction=experiment_malfunction,
max_episode_steps=schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration,
# pytorch convention for in-place operations: postfixed with underscore.
topo_dict_=delta_no_rerouting_reschedule_topo_dict,
schedule_trainrun_dict=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
)
results_online_route_restricted = asp_reschedule_wrapper(
reschedule_problem_description=problem_online_route_restricted,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
rsp_logger.info(
f" results_online_route_restricted has costs {results_online_route_restricted.solver_statistics['summary']['costs'][0]}, "
f"took {results_online_route_restricted.solver_statistics['summary']['times']['total']}"
)
# --------------------------------------------------------------------------------------
# B.2.d Upper bound: online predictor
# --------------------------------------------------------------------------------------
rsp_logger.info("5a. reschedule delta online transmission chains: upper bound")
# clone topos since propagation will modify them
online_transmission_chains_fully_restricted_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
(
problem_online_transmission_chains_fully_restricted,
predicted_changed_agents_online_transmission_chains_fully_restricted_predicted,
) = scoper_online_transmission_chains_for_all_agents(
online_unrestricted_problem=problem_online_unrestricted,
malfunction=experiment_malfunction,
latest_arrival=schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration,
# pytorch convention for in-place operations: postfixed with underscore.
delta_online_topo_dict_to_=online_transmission_chains_fully_restricted_topo_dict,
schedule_trainrun_dict=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
time_flexibility=False,
)
results_online_transmission_chains_fully_restricted = asp_reschedule_wrapper(
reschedule_problem_description=problem_online_transmission_chains_fully_restricted,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
rsp_logger.info(
f" results_online_transmission_chains_fully_restricted has costs "
f"{results_online_transmission_chains_fully_restricted.solver_statistics['summary']['costs'][0]}, "
f"took {results_online_transmission_chains_fully_restricted.solver_statistics['summary']['times']['total']}"
)
# --------------------------------------------------------------------------------------
# B.2.d Upper bound: online_no_time_flexibility predictor
# --------------------------------------------------------------------------------------
rsp_logger.info("5b. reschedule delta online_no_time_flexibility transmission chains: upper bound")
# clone topos since propagation will modify them
online_transmission_chains_route_restricted_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
(
problem_online_transmission_chains_route_restricted,
predicted_changed_agents_online_transmission_chains_route_restricted_predicted,
) = scoper_online_transmission_chains_for_all_agents(
online_unrestricted_problem=problem_online_unrestricted,
malfunction=experiment_malfunction,
latest_arrival=schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration,
# pytorch convention for in-place operations: postfixed with underscore.
delta_online_topo_dict_to_=online_transmission_chains_route_restricted_topo_dict,
schedule_trainrun_dict=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
time_flexibility=True,
)
results_online_transmission_chains_route_restricted = asp_reschedule_wrapper(
reschedule_problem_description=problem_online_transmission_chains_route_restricted,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
rsp_logger.info(
f" results_online_transmission_chains_route_restricted has costs "
f"{results_online_transmission_chains_route_restricted.solver_statistics['summary']['costs'][0]}, "
f"took {results_online_transmission_chains_route_restricted.solver_statistics['summary']['times']['total']}"
)
# --------------------------------------------------------------------------------------
# B.2.e Sanity check: random predictor
# if that also reduces solution time, our problem is not hard enough, showing the problem is not trivial
# --------------------------------------------------------------------------------------
rsp_logger.info("6. reschedule delta random naive: upper bound")
randoms = []
for random_i in range(GLOBAL_CONSTANTS.NB_RANDOM):
# clone topos since propagation will modify them
online_random_topo_dict = {agent_id: topo.copy() for agent_id, topo in rescheduling_topo_dict.items()}
problem_online_random, predicted_changed_agents_online_random = scoper_online_random_for_all_agents(
online_unrestricted_problem=problem_online_unrestricted,
malfunction=experiment_malfunction,
# TODO document? will it be visible in ground times?
latest_arrival=(schedule_problem.max_episode_steps + experiment_malfunction.malfunction_duration),
# pytorch convention for in-place operations: postfixed with underscore.
delta_random_topo_dict_to_=online_random_topo_dict,
schedule_trainrun_dict=schedule_trainruns,
minimum_travel_time_dict=schedule_problem.minimum_travel_time_dict,
# TODO document? will it be visible in ground times?
max_window_size_from_earliest=experiment_parameters.re_schedule_parameters.max_window_size_from_earliest,
weight_route_change=experiment_parameters.re_schedule_parameters.weight_route_change,
weight_lateness_seconds=experiment_parameters.re_schedule_parameters.weight_lateness_seconds,
nb_changed_running_agents_online=len(predicted_changed_agents_online_transmission_chains_fully_restricted_predicted),
)
results_online_random = asp_reschedule_wrapper(
reschedule_problem_description=problem_online_random,
schedule=schedule_trainruns,
debug=debug,
asp_seed_value=experiment_parameters.schedule_parameters.asp_seed_value,
)
rsp_logger.info(
f" results_online_random[{random_i}] has costs {results_online_random.solver_statistics['summary']['costs'][0]}, "
f"took {results_online_random.solver_statistics['summary']['times']['total']}"
)
randoms.append((problem_online_random, results_online_random, predicted_changed_agents_online_random))
# --------------------------------------------------------------------------------------
# B.3. Result
# --------------------------------------------------------------------------------------
rsp_logger.info("7. gathering results")
current_results = ExperimentResults(
experiment_parameters=experiment_parameters,
malfunction=experiment_malfunction,
problem_schedule=schedule_problem,
problem_online_unrestricted=problem_online_unrestricted,
problem_offline_delta=problem_offline_delta,
problem_offline_delta_weak=problem_offline_delta_weak,
problem_offline_fully_restricted=problem_offline_fully_restricted,
problem_online_route_restricted=problem_online_route_restricted,
problem_online_transmission_chains_fully_restricted=problem_online_transmission_chains_fully_restricted,
problem_online_transmission_chains_route_restricted=problem_online_transmission_chains_route_restricted,
results_schedule=schedule_result,
results_online_unrestricted=results_online_unrestricted,
results_offline_delta=results_offline_delta,
results_offline_delta_weak=results_offline_delta_weak,
results_offline_fully_restricted=results_offline_fully_restricted,
results_online_route_restricted=results_online_route_restricted,
results_online_transmission_chains_fully_restricted=results_online_transmission_chains_fully_restricted,
results_online_transmission_chains_route_restricted=results_online_transmission_chains_route_restricted,
predicted_changed_agents_online_transmission_chains_fully_restricted=predicted_changed_agents_online_transmission_chains_fully_restricted_predicted,
predicted_changed_agents_online_transmission_chains_route_restricted=predicted_changed_agents_online_transmission_chains_route_restricted_predicted,
**{f"problem_online_random_{i}": randoms[i][0] for i in range(GLOBAL_CONSTANTS.NB_RANDOM)},
**{f"results_online_random_{i}": randoms[i][1] for i in range(GLOBAL_CONSTANTS.NB_RANDOM)},
**{f"predicted_changed_agents_online_random_{i}": randoms[i][2] for i in range(GLOBAL_CONSTANTS.NB_RANDOM)},
)
rsp_logger.info(f"done re-schedule full and delta naive/perfect for experiment {experiment_parameters.experiment_id}")
return current_results
def _make_restricted_topo(infrastructure_topo_dict: TopoDict, number_of_shortest_paths: int):
topo_dict = {agent_id: topo.copy() for agent_id, topo in infrastructure_topo_dict.items()}
nb_paths_before = []
nb_paths_after = []
for _, topo in topo_dict.items():
paths = get_paths_in_route_dag(topo)
nodes_to_keep = [node for path in paths[:number_of_shortest_paths] for node in path]
nodes_to_remove = {node for node in topo.nodes if node not in nodes_to_keep}
topo.remove_nodes_from(nodes_to_remove)
nb_paths_before.append(len(paths))
nb_paths_after.append(len(get_paths_in_route_dag(topo)))
rsp_logger.info(
f"make restricted topo for re-scheduling with number_of_shortest_paths{number_of_shortest_paths}: "
f"{path_stats(nb_paths_before)} -> {path_stats(nb_paths_after)}"
)
return topo_dict
def _render_route_dags_from_data(experiment_base_directory: str, experiment_id: int):
results_before, _ = load_and_expand_experiment_results_from_data_folder(
experiment_data_folder_name=experiment_base_directory + "/data", experiment_ids=[experiment_id]
)[0]
problem_online_unrestricted: ScheduleProblemDescription = results_before.problem_online_unrestricted
for agent_id in problem_online_unrestricted.route_dag_constraints_dict:
visualize_route_dag_constraints_simple_wrapper(
schedule_problem_description=problem_online_unrestricted.schedule_problem_description,
trainrun_dict=None,
experiment_malfunction=problem_online_unrestricted.experiment_malfunction,
agent_id=agent_id,
file_name=f"reschedule_alt_agent_{agent_id}.pdf",
)
def _get_asp_solver_details_from_statistics(elapsed_time: float, statistics: Dict):
return "{:5.3f}s = {:5.2f}% ({:5.3f}s (Solving: {}s 1st Model: {}s Unsat: {}s)".format(
statistics["summary"]["times"]["total"],
statistics["summary"]["times"]["total"] / elapsed_time * 100,
statistics["summary"]["times"]["total"],
statistics["summary"]["times"]["solve"],
statistics["summary"]["times"]["sat"],
statistics["summary"]["times"]["unsat"],
)
def _write_sha_txt(folder_name: str):
"""
Write the current commit hash to a file "sha.txt" in the given folder
Parameters
----------
folder_name
"""
import git
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
out_file = os.path.join(folder_name, "sha.txt")
rsp_logger.info(f"writing {sha} to {out_file}")
with open(out_file, "w") as out:
out.write(sha)
def run_experiment_from_to_file(
experiment_parameters: ExperimentParameters,
experiment_base_directory: str,
experiment_output_directory: str,
global_constants: GlobalConstants,
csv_only: bool = False,
debug: bool = False,
online_unrestricted_only: bool = False,
raise_exceptions: bool = False,
):
"""A.2 + B. Run and save one experiment from experiment parameters.
Parameters
----------
experiment_base_directory
base for infrastructure and schedules
experiment_parameters
contains reference to infrastructure and schedules
experiment_output_directory
debug
"""
rsp_logger.info(f"run_experiment_from_to_file with {global_constants}")
# N.B. this works since we ensure that every experiment runs in its own process!
GLOBAL_CONSTANTS.set_defaults(constants=global_constants)
experiment_data_directory = f"{experiment_output_directory}/{EXPERIMENT_DATA_SUBDIRECTORY_NAME}"
# add logging file handler in this thread
stdout_log_file = os.path.join(experiment_data_directory, f"log.txt")
stderr_log_file = os.path.join(experiment_data_directory, f"err.txt")
stdout_log_fh = add_file_handler_to_rsp_logger(stdout_log_file, logging.INFO)
stderr_log_fh = add_file_handler_to_rsp_logger(stderr_log_file, logging.ERROR)
rsp_logger.info(f"start experiment {experiment_parameters.experiment_id}")
try:
check_create_folder(experiment_data_directory)
start_datetime_str = datetime.datetime.now().strftime("%H:%M:%S")
rsp_logger.info("Running experiment {} under pid {} at {}".format(experiment_parameters.experiment_id, os.getpid(), start_datetime_str))
start_time = time.time()
rsp_logger.info("*** experiment parameters for experiment {}. {}".format(experiment_parameters.experiment_id, _pp.pformat(experiment_parameters)))
if experiment_base_directory is None or not exists_schedule(
base_directory=experiment_base_directory,
infra_id=experiment_parameters.infra_parameters.infra_id,
schedule_id=experiment_parameters.schedule_parameters.schedule_id,
):
rsp_logger.warn(f"Could not find schedule for {experiment_parameters.experiment_id} in {experiment_base_directory}")
return
rsp_logger.info(f"load_schedule for {experiment_parameters.experiment_id}")
schedule, schedule_parameters = load_schedule(
base_directory=f"{experiment_base_directory}",
infra_id=experiment_parameters.infra_parameters.infra_id,
schedule_id=experiment_parameters.schedule_parameters.schedule_id,
)
infrastructure, _ = load_infrastructure(base_directory=f"{experiment_base_directory}", infra_id=experiment_parameters.infra_parameters.infra_id)
if debug:
_render_route_dags_from_data(experiment_base_directory=experiment_output_directory, experiment_id=experiment_parameters.experiment_id)
# B2: full and delta perfect re-scheduling
experiment_results: ExperimentResults = run_experiment_in_memory(
schedule=schedule,
experiment_parameters=experiment_parameters,
infrastructure_topo_dict=infrastructure.topo_dict,
debug=debug,
online_unrestricted_only=online_unrestricted_only,
)
if experiment_results is None:
print(f"No malfunction for experiment {experiment_parameters.experiment_id}")
return []
elapsed_time = time.time() - start_time
end_datetime_str = datetime.datetime.now().strftime("%H:%M:%S")
if not online_unrestricted_only:
s = ("Running experiment {}: took {:5.3f}s ({}--{}) (sched: {} / re-sched full: {} / re-sched delta perfect: {} / ").format(
experiment_parameters.experiment_id,
elapsed_time,
start_datetime_str,
end_datetime_str,
_get_asp_solver_details_from_statistics(elapsed_time=elapsed_time, statistics=experiment_results.results_schedule.solver_statistics),
_get_asp_solver_details_from_statistics(elapsed_time=elapsed_time, statistics=experiment_results.results_online_unrestricted.solver_statistics),
_get_asp_solver_details_from_statistics(elapsed_time=elapsed_time, statistics=experiment_results.results_offline_delta.solver_statistics),
)
solver_time_schedule = experiment_results.results_schedule.solver_statistics["summary"]["times"]["total"]
solver_statistics_times_total_online_unrestricted = experiment_results.results_online_unrestricted.solver_statistics["summary"]["times"]["total"]
solver_time_offline_delta = experiment_results.results_offline_delta.solver_statistics["summary"]["times"]["total"]
elapsed_overhead_time = elapsed_time - solver_time_schedule - solver_statistics_times_total_online_unrestricted - solver_time_offline_delta
s += "remaining: {:5.3f}s = {:5.2f}%) in thread {}".format(
elapsed_overhead_time, elapsed_overhead_time / elapsed_time * 100, threading.get_ident()
)
rsp_logger.info(s)
rsp_logger.info(virtual_memory_human_readable())
rsp_logger.info(current_process_stats_human_readable())
# fail fast!
if not online_unrestricted_only:
plausibility_check_experiment_results(experiment_results=experiment_results)
plausibility_check_experiment_results_analysis(
experiment_results_analysis=expand_experiment_results_for_analysis(experiment_results=experiment_results)
)
filename = create_experiment_filename(experiment_data_directory, experiment_parameters.experiment_id)
save_experiment_results_to_file(
experiment_results=experiment_results, file_name=filename, csv_only=csv_only, online_unrestricted_only=online_unrestricted_only
)
return os.getpid()
except Exception as e:
rsp_logger.error(e, exc_info=True)
rsp_logger.error(
f"XXX failed experiment_id={experiment_parameters.experiment_id} in {experiment_data_directory}, "
f"infra_id={experiment_parameters.infra_parameters.infra_id}, "
f"schedule_id={experiment_parameters.schedule_parameters.schedule_id}"
)
if raise_exceptions:
raise e
return os.getpid()
finally:
remove_file_handler_from_rsp_logger(stdout_log_fh)
remove_file_handler_from_rsp_logger(stderr_log_fh)
rsp_logger.info(f"end experiment {experiment_parameters.experiment_id}")
def load_and_filter_experiment_results_analysis(
experiment_base_directory: str = BASELINE_DATA_FOLDER,
experiments_of_interest: List[int] = None,
from_cache: bool = False,
from_individual_csv: bool = True,
local_filter_experiment_results_analysis_data_frame: Callable[[DataFrame], DataFrame] = None,
) -> DataFrame:
if from_cache:
experiment_data_filtered = pd.read_csv(f"{experiment_base_directory}.csv")
else:
if from_individual_csv:
experiment_data: pd.DataFrame = load_data_from_individual_csv_in_data_folder(
experiment_data_folder_name=f"{experiment_base_directory}/{EXPERIMENT_DATA_SUBDIRECTORY_NAME}", experiment_ids=experiments_of_interest
)
else:
_, experiment_results_analysis_list = load_and_expand_experiment_results_from_data_folder(
experiment_data_folder_name=f"{experiment_base_directory}/{EXPERIMENT_DATA_SUBDIRECTORY_NAME}", experiment_ids=experiments_of_interest,
)
experiment_data: pd.DataFrame = convert_list_of_experiment_results_analysis_to_data_frame(experiment_results_analysis_list)
if local_filter_experiment_results_analysis_data_frame is not None:
experiment_data_filtered = local_filter_experiment_results_analysis_data_frame(experiment_data)
print(f"removed {len(experiment_data) - len(experiment_data_filtered)}/{len(experiment_data)} rows")
else:
experiment_data_filtered = experiment_data
experiment_data_filtered.to_csv(f"{experiment_base_directory}.csv")
return experiment_data_filtered
def run_experiment_agenda(
experiment_base_directory: str,
experiment_agenda: ExperimentAgenda = None,
experiment_output_directory: str = None,
filter_experiment_agenda: Callable[[ExperimentParameters], bool] = None,
# take only half of avilable cpus so the machine stays responsive
run_experiments_parallel: int = AVAILABLE_CPUS // 2,
csv_only: bool = False,
online_unrestricted_only: bool = False,
) -> str:
"""Run A.2 + B. Presupposes infras and schedules
Parameters
----------
experiment_output_directory
if passed, agenda in this directory must be the same as the one passed
experiment_agenda: ExperimentAgenda
Full list of experiments
experiment_base_directory: str
where are schedules etc?
filter_experiment_agenda
filter which experiment to run
run_experiments_parallel: in
run experiments in parallel
run_analysis
online_unrestricted_only
csv_only
Returns
-------
Returns the name of the experiment base and data folders
"""
assert (
experiment_agenda is not None or experiment_output_directory is not None
), "Either experiment_agenda or experiment_output_directory must be specified."
if experiment_output_directory is None:
experiment_output_directory = f"{experiment_base_directory}/" + create_experiment_folder_name(experiment_agenda.experiment_name)
check_create_folder(experiment_output_directory)
experiment_data_directory = f"{experiment_output_directory}/{EXPERIMENT_DATA_SUBDIRECTORY_NAME}"
if exists_experiment_agenda(experiment_output_directory):
rsp_logger.info(f"============================================================================================================")
rsp_logger.info(f"loading agenda <- {experiment_output_directory}")
rsp_logger.info(f"============================================================================================================")
experiment_agenda_from_file = load_experiment_agenda_from_file(experiment_folder_name=experiment_output_directory)
if experiment_agenda is not None:
assert experiment_agenda_from_file == experiment_agenda
experiment_agenda = experiment_agenda_from_file
elif experiment_agenda is not None:
save_experiment_agenda_and_hash_to_file(output_base_folder=experiment_output_directory, experiment_agenda=experiment_agenda)
else:
raise Exception("Either experiment_agenda or experiment_output_directory with experiment_agenda.pkl must be passed.")
assert experiment_agenda is not None
check_create_folder(experiment_data_directory)
if run_experiments_parallel <= 1:
rsp_logger.warn("Using only one process in pool might cause pool to stall sometimes. Use more than one process in pool?")
# tee stdout to log file
stdout_log_file = os.path.join(experiment_data_directory, "log.txt")
stderr_log_file = os.path.join(experiment_data_directory, "err.txt")
stdout_log_fh = add_file_handler_to_rsp_logger(stdout_log_file, logging.INFO)
stderr_log_fh = add_file_handler_to_rsp_logger(stderr_log_file, logging.ERROR)
try:
if filter_experiment_agenda is not None:
len_before_filtering = len(experiment_agenda.experiments)
rsp_logger.info(f"============================================================================================================")
rsp_logger.info(f"filtering agenda by passed filter {filter_experiment_agenda} <- {experiment_output_directory}")
rsp_logger.info(f"============================================================================================================")
experiments_filtered = filter(filter_experiment_agenda, experiment_agenda.experiments)
experiment_agenda = ExperimentAgenda(
experiment_name=experiment_agenda.experiment_name, global_constants=experiment_agenda.global_constants, experiments=list(experiments_filtered)
)
rsp_logger.info(
f"after applying filter, there are {len(experiment_agenda.experiments)} experiments out of {len_before_filtering}: \n" + str(experiment_agenda)
)
rsp_logger.info(f"============================================================================================================")
rsp_logger.info(f"filtering agenda by experiments not run yet <- {experiment_output_directory}")
rsp_logger.info(f"============================================================================================================")
len_before_filtering = len(experiment_agenda.experiments)
experiment_agenda = ExperimentAgenda(
experiment_name=experiment_agenda.experiments,
experiments=[
experiment
for experiment in experiment_agenda.experiments
if load_experiments_results(experiment_data_folder_name=experiment_data_directory, experiment_id=experiment.experiment_id) is None
],
global_constants=experiment_agenda.global_constants,
)
rsp_logger.info(
f"after filtering out experiments already run from {experiment_output_directory}, "
f"there are {len(experiment_agenda.experiments)} experiments out of {len_before_filtering}: \n" + str(experiment_agenda)
)
rsp_logger.info(f"============================================================================================================")
rsp_logger.info(f"RUNNING agenda -> {experiment_data_directory} ({len(experiment_agenda.experiments)} experiments)")
rsp_logger.info(f"============================================================================================================")
rsp_logger.info(f"experiment_agenda.global_constants={experiment_agenda.global_constants}")
rsp_logger.info(f"============================================================================================================")
# use processes in pool only once because of https://github.com/potassco/clingo/issues/203
# https://stackoverflow.com/questions/38294608/python-multiprocessing-pool-new-process-for-each-variable
# N.B. even with parallelization degree 1, we want to run each experiment in a new process
# in order to get around https://github.com/potassco/clingo/issues/203
pool = multiprocessing.Pool(processes=run_experiments_parallel, maxtasksperchild=1)
rsp_logger.info(f"pool size {pool._processes} / {multiprocessing.cpu_count()} ({os.cpu_count()}) cpus on {platform.node()}")
# nicer printing when tdqm print to stderr and we have logging to stdout shown in to the same console (IDE, separated in files)
newline_and_flush_stdout_and_stderr()
run_and_save_one_experiment_partial = partial(
run_experiment_from_to_file,
experiment_base_directory=experiment_base_directory,
experiment_output_directory=experiment_output_directory,
csv_only=csv_only,
global_constants=experiment_agenda.global_constants,
online_unrestricted_only=online_unrestricted_only,
)
for pid_done in tqdm.tqdm(
pool.imap_unordered(run_and_save_one_experiment_partial, experiment_agenda.experiments), total=len(experiment_agenda.experiments)
):
# unsafe use of inner API
procs = [f"{str(proc)}={proc.pid}" for proc in pool._pool]
rsp_logger.info(f"pid {pid_done} done. Pool: {procs}")
# nicer printing when tdqm print to stderr and we have logging to stdout shown in to the same console (IDE)
newline_and_flush_stdout_and_stderr()
_print_error_summary(experiment_data_directory)
finally:
remove_file_handler_from_rsp_logger(stdout_log_fh)
remove_file_handler_from_rsp_logger(stderr_log_fh)
return experiment_output_directory
def _print_error_summary(experiment_data_directory):
rsp_logger.info(f"loading and expanding experiment results from {experiment_data_directory}")
print(f"=========================================================")
print(f"ERROR SUMMARY")
print(f"=========================================================")
with open(os.path.join(experiment_data_directory, "err.txt"), "r") as file_in:
content = file_in.read()
print(content)
print(f"=========================================================")
print(f"END OF ERROR SUMMARY")
print(f"=========================================================")
print("\n\n\n\n")
def create_infrastructure_and_schedule_from_ranges(
infrastructure_parameters_range: InfrastructureParametersRange,
schedule_parameters_range: ScheduleParametersRange,
base_directory: str,
speed_data: SpeedData,
grid_mode: bool = False,
run_experiments_parallel: int = 5,
) -> List[ScheduleParameters]:
"""Create infrastructures and schedules for the given ranges. Skips
infrastructures and schedules already existing. For existing
infrastructures, checks that parameters match.
Parameters
----------
infrastructure_parameters_range
schedule_parameters_range
base_directory
speed_data
grid_mode
run_experiments_parallel
Returns
-------
"""
# expand infrastructure parameters and generate infrastructure
list_of_infrastructure_parameters = expand_infrastructure_parameter_range_and_generate_infrastructure(
infrastructure_parameter_range=infrastructure_parameters_range, base_directory=base_directory, speed_data=speed_data, grid_mode=grid_mode
)
# expand schedule parameters and get list of those missing
list_of_schedule_parameters_to_generate: List[ScheduleParameters] = list(
itertools.chain.from_iterable(
[
expand_schedule_parameter_range_and_get_those_not_existing_yet(
schedule_parameters_range=schedule_parameters_range, base_directory=base_directory, infra_id=infrastructure_parameters.infra_id
)
for infrastructure_parameters in list_of_infrastructure_parameters
]
)
)
# generate schedules in parallel
pool = multiprocessing.Pool(processes=run_experiments_parallel, maxtasksperchild=1)
gen_and_save_schedule_partial = partial(gen_and_save_schedule, base_directory=base_directory)
for done in tqdm.tqdm(
pool.imap_unordered(gen_and_save_schedule_partial, list_of_schedule_parameters_to_generate), total=len(list_of_schedule_parameters_to_generate)
):
rsp_logger.info(f"done: {done}")
# expand schedule parameters and get full list
list_of_schedule_parameters: List[ScheduleParameters] = list(
itertools.chain.from_iterable(
[
expand_schedule_parameter_range(schedule_parameter_range=schedule_parameters_range, infra_id=infrastructure_parameters.infra_id)
for infrastructure_parameters in list_of_infrastructure_parameters
]
)
)
return list_of_schedule_parameters
def list_infrastructure_and_schedule_params_from_base_directory(
base_directory: str, filter_experiment_agenda: Callable[[int, int], bool] = None, debug: bool = False
) -> Tuple[List[InfrastructureParameters], Dict[int, List[Tuple[ScheduleParameters, Schedule]]]]:
infra_schedule_dict = {}
infra_parameters_list = []
nb_infras = len(os.listdir(f"{base_directory}/infra/"))
for infra_id in range(nb_infras):
infra, infra_parameters = load_infrastructure(base_directory=base_directory, infra_id=infra_id)
if debug:
for agent_id, topo in infra.topo_dict.items():
print(f" {agent_id} has {len(get_paths_in_route_dag(topo))} paths in infra {infra_id}")
infra_parameters_list.append(infra_parameters)
schedule_dir = f"{base_directory}/infra/{infra_id:03d}/schedule"
if not os.path.isdir(schedule_dir):
continue
schedule_ids = [int(s) for s in os.listdir(schedule_dir)]
for schedule_id in schedule_ids:
if filter_experiment_agenda is not None and not filter_experiment_agenda(infra_id, schedule_id):
continue
schedule, schedule_parameters = load_schedule(base_directory=base_directory, infra_id=infra_id, schedule_id=schedule_id)
infra_schedule_dict.setdefault(infra_parameters.infra_id, []).append((schedule_parameters, schedule))
return infra_parameters_list, infra_schedule_dict
def expand_infrastructure_parameter_range_and_generate_infrastructure(
infrastructure_parameter_range: InfrastructureParametersRange, base_directory: str, speed_data: SpeedData, grid_mode: bool = True
) -> List[InfrastructureParameters]:
"""Expand infrastructure parameter range and generate infrastructure for
those not existing. If infrastructure file is present, checks that it
corresponds to the expansion.
Parameters
----------
infrastructure_parameter_range
base_directory
speed_data
grid_mode
Returns
-------
"""
list_of_infra_parameters = expand_infrastructure_parameter_range(
infrastructure_parameter_range=infrastructure_parameter_range, grid_mode=grid_mode, speed_data=speed_data
)
for infra_parameters in list_of_infra_parameters:
if exists_infrastructure(base_directory=base_directory, infra_id=infra_parameters.infra_id):
rsp_logger.info(f"skipping gen infrastructure for [{infra_parameters.infra_id}] {infra_parameters} -> infrastructure already exists")
_, infra_parameters_from_file = load_infrastructure(base_directory=base_directory, infra_id=infra_parameters.infra_id)
assert (
infra_parameters == infra_parameters_from_file
), f"infra parameters not the same for [{infra_parameters.infra_id}]: expected {infra_parameters}, found {infra_parameters_from_file} in file"
continue
infra = gen_infrastructure(infra_parameters=infra_parameters)
save_infrastructure(infrastructure=infra, infrastructure_parameters=infra_parameters, base_directory=base_directory)
return list_of_infra_parameters
def expand_schedule_parameter_range_and_get_those_not_existing_yet(
schedule_parameters_range: ScheduleParametersRange, base_directory: str, infra_id: int
) -> List[ScheduleParameters]:
list_of_schedule_parameters = expand_schedule_parameter_range(schedule_parameter_range=schedule_parameters_range, infra_id=infra_id)
infra, infra_parameters = load_infrastructure(base_directory=base_directory, infra_id=infra_id)
list_of_schedule_parameters_to_generate = []
for schedule_parameters in list_of_schedule_parameters:
if exists_schedule(base_directory=base_directory, infra_id=infra_id, schedule_id=schedule_parameters.schedule_id):
rsp_logger.info(
f"skipping gen schedule for [infra {infra_id}/schedule {schedule_parameters.schedule_id}] {infra_parameters} {schedule_parameters} "
f"-> schedule already exists"
)
_, schedule_parameters_from_file = load_schedule(base_directory=base_directory, infra_id=infra_id, schedule_id=schedule_parameters.schedule_id)
assert schedule_parameters_from_file == schedule_parameters, (
f"schedule parameters [infra {infra_id}/schedule {schedule_parameters.schedule_id}] not the same: "
f"expected {schedule_parameters}, found {schedule_parameters_from_file} in file"
)
continue
list_of_schedule_parameters_to_generate.append(schedule_parameters)
return list_of_schedule_parameters_to_generate
def save_experiment_agenda_and_hash_to_file(output_base_folder: str, experiment_agenda: ExperimentAgenda):
"""Save experiment agenda and current git hash to the folder with the
experiments.
Parameters
----------
output_base_folder: str
Folder name of experiment where all experiment files and agenda are stored
experiment_agenda: ExperimentAgenda
The experiment agenda to save
"""
_pickle_dump(obj=experiment_agenda, file_name="experiment_agenda.pkl", folder=output_base_folder)
# write current hash to sha.txt to experiment folder
_write_sha_txt(output_base_folder)
def load_experiment_agenda_from_file(experiment_folder_name: str) -> ExperimentAgenda:
"""Save experiment agenda to the folder with the experiments.
Parameters
----------
experiment_folder_name: str
Folder name of experiment where all experiment files and agenda are stored
"""
return _pickle_load(file_name="experiment_agenda.pkl", folder=experiment_folder_name)
def exists_experiment_agenda(experiment_folder_name: str) -> bool:
"""Does a `ExperimentAgenda` exist?"""
file_name = os.path.join(experiment_folder_name, "experiment_agenda.pkl")
return os.path.isfile(file_name)
def create_experiment_folder_name(experiment_name: str) -> str:
datetime_string = datetime.datetime.now().strftime("%Y_%m_%dT%H_%M_%S")
return "{}_{}".format(experiment_name, datetime_string)
def create_experiment_filename(experiment_data_folder_name: str, experiment_id: int) -> str:
datetime_string = datetime.datetime.now().strftime("%Y_%m_%dT%H_%M_%S")
filename = "experiment_{:04d}_{}.pkl".format(experiment_id, datetime_string)
return os.path.join(experiment_data_folder_name, filename)
def delete_experiment_folder(experiment_folder_name: str):
"""Delete experiment folder.
Parameters
----------
experiment_folder_name: str
Folder name of experiment where all experiment files are stored
Returns
-------
"""
shutil.rmtree(experiment_folder_name)
if __name__ == "__main__":
experiment_id_to_rerun = 3254
run_experiment_agenda(
experiment_base_directory=INFRAS_AND_SCHEDULES_FOLDER,
experiment_agenda=_pickle_load(f"{BASELINE_DATA_FOLDER}/experiment_agenda.pkl"),
filter_experiment_agenda=lambda params: params.experiment_id == experiment_id_to_rerun,
)
| 58,195 | 54.372027 | 160 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/scopers/scoper_online_random.py | import pprint
from typing import Dict
import numpy as np
from flatland.envs.rail_trainrun_data_structures import TrainrunDict
from rsp.scheduling.propagate import verify_consistency_of_route_dag_constraints_for_agent
from rsp.scheduling.scheduling_problem import RouteDAGConstraints
from rsp.scheduling.scheduling_problem import RouteDAGConstraintsDict
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.scheduling.scheduling_problem import TopoDict
from rsp.step_05_experiment_run.experiment_malfunction import ExperimentMalfunction
from rsp.step_05_experiment_run.scopers.scoper_agent_wise import AgentWiseChange
from rsp.step_05_experiment_run.scopers.scoper_agent_wise import scoper_agent_wise
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import _extract_route_section_penalties
_pp = pprint.PrettyPrinter(indent=4)
def scoper_online_random_for_all_agents(
online_unrestricted_problem: ScheduleProblemDescription,
malfunction: ExperimentMalfunction,
minimum_travel_time_dict: Dict[int, int],
latest_arrival: int,
# pytorch convention for in-place operations: postfixed with underscore.
delta_random_topo_dict_to_: TopoDict,
schedule_trainrun_dict: TrainrunDict,
weight_route_change: int,
weight_lateness_seconds: int,
max_window_size_from_earliest: int,
nb_changed_running_agents_online: int,
) -> ScheduleProblemDescription:
"""The scoper random only opens up the malfunction agent and the same
amount of agents as were changed in the full re-schedule, but chosen
randomly.
Parameters
----------
online_unrestricted_problem
online_unrestricted_trainrun_dict: TrainrunDict
the magic information of the full re-schedule
malfunction: ExperimentMalfunction
the malfunction; used to determine the waypoint after the malfunction
minimum_travel_time_dict: Dict[int,int]
the minimumum travel times for the agents
latest_arrival:
latest arrival
delta_random_topo_dict_to_:
the topologies used for scheduling
schedule_trainrun_dict: TrainrunDict
the schedule S0
max_window_size_from_earliest: int
maximum window size as offset from earliest. => "Cuts off latest at earliest + earliest_time_windows when doing
back propagation of latest"
weight_lateness_seconds
how much
weight_route_change
Returns
-------
ScheduleProblemDesccription
"""
freeze_dict: RouteDAGConstraintsDict = {}
topo_dict: TopoDict = {}
agents_running_after_malfunction = [
agent_id for agent_id, schedule_trainrun in schedule_trainrun_dict.items() if schedule_trainrun[-1].scheduled_at >= malfunction.time_step
]
assert malfunction.agent_id in agents_running_after_malfunction
changed_agents = np.random.choice(agents_running_after_malfunction, nb_changed_running_agents_online, replace=False)
for agent_id in schedule_trainrun_dict.keys():
earliest_dict, latest_dict, topo = scoper_agent_wise(
agent_id=agent_id,
topo_=delta_random_topo_dict_to_[agent_id],
schedule_trainrun=schedule_trainrun_dict[agent_id],
online_unrestricted_problem=online_unrestricted_problem,
# N.B. we do not require malfunction agent to have re-routing flexibility!
agent_wise_change=AgentWiseChange.unrestricted if agent_id in changed_agents else AgentWiseChange.route_restricted,
malfunction=malfunction,
latest_arrival=latest_arrival,
max_window_size_from_earliest=max_window_size_from_earliest,
minimum_travel_time=minimum_travel_time_dict[agent_id],
)
freeze_dict[agent_id] = RouteDAGConstraints(earliest=earliest_dict, latest=latest_dict)
topo_dict[agent_id] = topo
# TODO SIM-324 pull out verification
for agent_id, _ in freeze_dict.items():
verify_consistency_of_route_dag_constraints_for_agent(
agent_id=agent_id,
route_dag_constraints=freeze_dict[agent_id],
topo=topo_dict[agent_id],
malfunction=malfunction,
max_window_size_from_earliest=max_window_size_from_earliest,
)
# N.B. re-schedule train run must not necessarily be open in route dag constraints!
return (
ScheduleProblemDescription(
route_dag_constraints_dict=freeze_dict,
minimum_travel_time_dict=minimum_travel_time_dict,
topo_dict=topo_dict,
max_episode_steps=latest_arrival,
route_section_penalties=_extract_route_section_penalties(
schedule_trainruns=schedule_trainrun_dict, topo_dict=topo_dict, weight_route_change=weight_route_change
),
weight_lateness_seconds=weight_lateness_seconds,
),
set(changed_agents),
)
| 4,898 | 42.353982 | 145 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/scopers/scoper_offline_fully_restricted.py | import pprint
from typing import Dict
import numpy as np
from flatland.envs.rail_trainrun_data_structures import TrainrunDict
from rsp.scheduling.propagate import verify_consistency_of_route_dag_constraints_for_agent
from rsp.scheduling.scheduling_problem import RouteDAGConstraints
from rsp.scheduling.scheduling_problem import RouteDAGConstraintsDict
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.scheduling.scheduling_problem import TopoDict
from rsp.step_05_experiment_run.experiment_malfunction import ExperimentMalfunction
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import _extract_route_section_penalties
_pp = pprint.PrettyPrinter(indent=4)
def scoper_offline_fully_restricted_for_all_agents(
online_unrestricted_trainrun_dict: TrainrunDict,
malfunction: ExperimentMalfunction,
minimum_travel_time_dict: Dict[int, int],
max_episode_steps: int,
# pytorch convention for in-place operations: postfixed with underscore.
offline_fully_restricted_topo_dict_: TopoDict,
schedule_trainrun_dict: TrainrunDict,
weight_route_change: int,
weight_lateness_seconds: int,
max_window_size_from_earliest: int = np.inf,
) -> ScheduleProblemDescription:
"""The scoper scoper_offline_fully_restricted_for_all_agents only opens up
the malfunction agent and the same amount of agents as were changed in the
full re-reschedule, but chosen trivially_perfectly.
Parameters
----------
online_unrestricted_trainrun_dict: TrainrunDict
the magic information of the full re-reschedule
malfunction: ExperimentMalfunction
the malfunction; used to determine the waypoint after the malfunction
minimum_travel_time_dict: Dict[int,int]
the minimumum travel times for the agents
max_episode_steps:
latest arrival
offline_fully_restricted_topo_dict_:
the topologies used for scheduling
schedule_trainrun_dict: TrainrunDict
the reschedule S0
max_window_size_from_earliest: int
maximum window size as offset from earliest. => "Cuts off latest at earliest + earliest_time_windows when doing
back propagation of latest"
weight_lateness_seconds
how much
weight_route_change
Returns
-------
ScheduleProblemDesccription
"""
freeze_dict: RouteDAGConstraintsDict = {}
for agent_id, online_unrestricted_trainrun in online_unrestricted_trainrun_dict.items():
topo_ = offline_fully_restricted_topo_dict_[agent_id]
reschedule = {trainrun_waypoint.waypoint: trainrun_waypoint.scheduled_at for trainrun_waypoint in set(online_unrestricted_trainrun)}
nodes_to_keep = {trainrun_waypoint.waypoint for trainrun_waypoint in online_unrestricted_trainrun}
nodes_to_remove = {node for node in topo_.nodes if node not in nodes_to_keep}
topo_.remove_nodes_from(nodes_to_remove)
freeze_dict[agent_id] = RouteDAGConstraints(earliest=reschedule, latest=reschedule)
# TODO SIM-324 pull out verification
for agent_id, _ in freeze_dict.items():
verify_consistency_of_route_dag_constraints_for_agent(
agent_id=agent_id,
route_dag_constraints=freeze_dict[agent_id],
topo=offline_fully_restricted_topo_dict_[agent_id],
malfunction=malfunction,
max_window_size_from_earliest=max_window_size_from_earliest,
)
# N.B. re-reschedule train run must not necessarily be open in route dag constraints!
return ScheduleProblemDescription(
route_dag_constraints_dict=freeze_dict,
minimum_travel_time_dict=minimum_travel_time_dict,
topo_dict=offline_fully_restricted_topo_dict_,
max_episode_steps=max_episode_steps,
route_section_penalties=_extract_route_section_penalties(
schedule_trainruns=schedule_trainrun_dict, topo_dict=offline_fully_restricted_topo_dict_, weight_route_change=weight_route_change
),
weight_lateness_seconds=weight_lateness_seconds,
)
| 4,060 | 44.122222 | 141 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/scopers/scoper_offline_delta_weak.py | import pprint
from typing import Dict
import numpy as np
from flatland.envs.rail_trainrun_data_structures import TrainrunDict
from rsp.scheduling.propagate import verify_consistency_of_route_dag_constraints_for_agent
from rsp.scheduling.propagate import verify_trainrun_satisfies_route_dag_constraints
from rsp.scheduling.scheduling_problem import RouteDAGConstraints
from rsp.scheduling.scheduling_problem import RouteDAGConstraintsDict
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.scheduling.scheduling_problem import TopoDict
from rsp.step_05_experiment_run.experiment_malfunction import ExperimentMalfunction
from rsp.step_05_experiment_run.scopers.scoper_agent_wise import AgentWiseChange
from rsp.step_05_experiment_run.scopers.scoper_agent_wise import scoper_agent_wise
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import _extract_route_section_penalties
_pp = pprint.PrettyPrinter(indent=4)
def scoper_offline_delta_weak_for_all_agents(
online_unrestricted_trainrun_dict: TrainrunDict,
online_unrestricted_problem: ScheduleProblemDescription,
malfunction: ExperimentMalfunction,
minimum_travel_time_dict: Dict[int, int],
latest_arrival: int,
# pytorch convention for in-place operations: postfixed with underscore.
topo_dict_: TopoDict,
schedule_trainrun_dict: TrainrunDict,
weight_route_change: int,
weight_lateness_seconds: int,
max_window_size_from_earliest: int = np.inf,
) -> ScheduleProblemDescription:
"""The scoper offline delta weak only opens up all changed agents and
freezes all unchanged agents to the intitial schedule.
Parameters
----------
online_unrestricted_problem
online_unrestricted_trainrun_dict: TrainrunDict
the magic information of the full re-schedule
malfunction: ExperimentMalfunction
the malfunction; used to determine the waypoint after the malfunction
minimum_travel_time_dict: Dict[int,int]
the minimumum travel times for the agents
latest_arrival:
latest arrival
topo_dict_:
the topologies used for scheduling
schedule_trainrun_dict: TrainrunDict
the schedule S0
max_window_size_from_earliest: int
maximum window size as offset from earliest. => "Cuts off latest at earliest + earliest_time_windows when doing
back propagation of latest"
weight_lateness_seconds
how much
weight_route_change
Returns
-------
ScheduleProblemDesccription
"""
freeze_dict: RouteDAGConstraintsDict = {}
topo_dict: TopoDict = {}
changed_dict = {agent_id: online_unrestricted_trainrun_dict[agent_id] != schedule_trainrun_dict[agent_id] for agent_id in schedule_trainrun_dict}
# TODO SIM-324 pull out verification
assert malfunction.agent_id in changed_dict
for agent_id in schedule_trainrun_dict.keys():
earliest_dict, latest_dict, topo = scoper_agent_wise(
agent_id=agent_id,
topo_=topo_dict_[agent_id],
schedule_trainrun=schedule_trainrun_dict[agent_id],
online_unrestricted_problem=online_unrestricted_problem,
malfunction=malfunction,
latest_arrival=latest_arrival,
max_window_size_from_earliest=max_window_size_from_earliest,
minimum_travel_time=minimum_travel_time_dict[agent_id],
# freeze all unchanged agents - we know this is feasible!
agent_wise_change=AgentWiseChange.unrestricted if changed_dict[agent_id] else AgentWiseChange.fully_restricted,
)
freeze_dict[agent_id] = RouteDAGConstraints(earliest=earliest_dict, latest=latest_dict)
topo_dict[agent_id] = topo
# TODO SIM-324 pull out verification
for agent_id, _ in freeze_dict.items():
verify_consistency_of_route_dag_constraints_for_agent(
agent_id=agent_id,
route_dag_constraints=freeze_dict[agent_id],
topo=topo_dict[agent_id],
malfunction=malfunction,
max_window_size_from_earliest=max_window_size_from_earliest,
)
# re-schedule train run must be open in route dag constraints
verify_trainrun_satisfies_route_dag_constraints(
agent_id=agent_id, route_dag_constraints=freeze_dict[agent_id], scheduled_trainrun=online_unrestricted_trainrun_dict[agent_id]
)
return ScheduleProblemDescription(
route_dag_constraints_dict=freeze_dict,
minimum_travel_time_dict=minimum_travel_time_dict,
topo_dict=topo_dict,
max_episode_steps=latest_arrival,
route_section_penalties=_extract_route_section_penalties(
schedule_trainruns=schedule_trainrun_dict, topo_dict=topo_dict, weight_route_change=weight_route_change
),
weight_lateness_seconds=weight_lateness_seconds,
)
| 4,868 | 43.263636 | 149 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/scopers/scoper_agent_wise.py | from enum import Enum
import networkx as nx
from flatland.envs.rail_trainrun_data_structures import Trainrun
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.step_05_experiment_run.experiment_malfunction import ExperimentMalfunction
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import scoper_online_unrestricted
class AgentWiseChange(Enum):
unrestricted = "unrestricted"
route_restricted = "route_restricted"
fully_restricted = "fully_restricted"
def scoper_agent_wise(
agent_id: int,
# pytorch convention for in-place operations: postfixed with underscore.
topo_: nx.DiGraph,
schedule_trainrun: Trainrun,
online_unrestricted_problem: ScheduleProblemDescription,
malfunction: ExperimentMalfunction,
minimum_travel_time: int,
latest_arrival: int,
agent_wise_change: AgentWiseChange,
max_window_size_from_earliest: int,
):
""""scoper changed or unchanged":
- if no change for train between schedule and re-schedule,
- keep the exact train run if `exact`
- keep the same route with flexibility
- if any change for train between schedule and re-schedule, open up everything as in full re-scheduling
"""
if agent_wise_change == AgentWiseChange.unrestricted:
route_dag_constraints = online_unrestricted_problem.route_dag_constraints_dict[agent_id]
return route_dag_constraints.earliest.copy(), route_dag_constraints.latest.copy(), online_unrestricted_problem.topo_dict[agent_id].copy()
elif agent_wise_change == AgentWiseChange.fully_restricted:
schedule = {trainrun_waypoint.waypoint: trainrun_waypoint.scheduled_at for trainrun_waypoint in set(schedule_trainrun)}
nodes_to_keep = {trainrun_waypoint.waypoint for trainrun_waypoint in schedule_trainrun}
nodes_to_remove = {node for node in topo_.nodes if node not in nodes_to_keep}
topo_.remove_nodes_from(nodes_to_remove)
return schedule, schedule, topo_
elif agent_wise_change == AgentWiseChange.route_restricted:
schedule_waypoints = {trainrun_waypoint.waypoint for trainrun_waypoint in schedule_trainrun}
to_remove = {node for node in topo_.nodes if node not in schedule_waypoints}
topo_.remove_nodes_from(to_remove)
earliest, latest = scoper_online_unrestricted(
agent_id=agent_id,
topo_=topo_,
schedule_trainrun=schedule_trainrun,
minimum_travel_time=minimum_travel_time,
malfunction=malfunction,
latest_arrival=latest_arrival,
max_window_size_from_earliest=max_window_size_from_earliest,
)
return earliest, latest, topo_
else:
raise RuntimeError(f"Unhandled case agent_wise_change={agent_wise_change}")
| 2,810 | 43.619048 | 145 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/scopers/scoper_offline_delta.py | import logging
import pprint
from typing import Dict
import networkx as nx
import numpy as np
from flatland.envs.rail_trainrun_data_structures import Trainrun
from flatland.envs.rail_trainrun_data_structures import TrainrunDict
from rsp.scheduling.propagate import _get_delayed_trainrun_waypoint_after_malfunction
from rsp.scheduling.propagate import propagate
from rsp.scheduling.propagate import verify_consistency_of_route_dag_constraints_for_agent
from rsp.scheduling.propagate import verify_trainrun_satisfies_route_dag_constraints
from rsp.scheduling.scheduling_problem import get_sinks_for_topo
from rsp.scheduling.scheduling_problem import RouteDAGConstraints
from rsp.scheduling.scheduling_problem import RouteDAGConstraintsDict
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.scheduling.scheduling_problem import TopoDict
from rsp.step_05_experiment_run.experiment_malfunction import ExperimentMalfunction
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import _extract_route_section_penalties
from rsp.utils.rsp_logger import rsp_logger
_pp = pprint.PrettyPrinter(indent=4)
def scoper_offline_delta(
agent_id: int,
# pytorch convention for in-place operations: postfixed with underscore.
topo_: nx.DiGraph,
schedule_trainrun: Trainrun,
online_unrestricted_trainrun: Trainrun,
malfunction: ExperimentMalfunction,
minimum_travel_time: int,
latest_arrival: int,
max_window_size_from_earliest: int = np.inf,
):
""""Scoper perfect":
- allow only edges either in schedule or re-schedule
- if the same in location and time in schedule and re-schedule -> stay (implicitly includes everything up to malfunction)
Caveat: In contrast to other methods, the topo is not modified.
Parameters
----------
agent_id
topo_
schedule_trainrun
online_unrestricted_trainrun
malfunction
minimum_travel_time
latest_arrival
max_window_size_from_earliest
Returns
-------
"""
waypoints_same_location_and_time = {
trainrun_waypoint.waypoint for trainrun_waypoint in set(online_unrestricted_trainrun).intersection(set(schedule_trainrun))
}
if rsp_logger.isEnabledFor(logging.DEBUG):
rsp_logger.debug(f"waypoints_same_location_and_time={waypoints_same_location_and_time}")
schedule_waypoints = {trainrun_waypoint.waypoint for trainrun_waypoint in schedule_trainrun}
reschedule_waypoints = {trainrun_waypoint.waypoint for trainrun_waypoint in online_unrestricted_trainrun}
assert schedule_waypoints.issubset(topo_.nodes), f"{schedule_waypoints} {topo_.nodes} {schedule_waypoints.difference(topo_.nodes)}"
assert reschedule_waypoints.issubset(topo_.nodes), f"{reschedule_waypoints} {topo_.nodes} {reschedule_waypoints.difference(topo_.nodes)}"
waypoints_same_location = list(schedule_waypoints.intersection(reschedule_waypoints))
if rsp_logger.isEnabledFor(logging.DEBUG):
rsp_logger.debug(f"waypoints_same_location={waypoints_same_location}")
topo_out = topo_.copy()
to_remove = set(topo_out.nodes).difference(schedule_waypoints.union(reschedule_waypoints))
topo_out.remove_nodes_from(to_remove)
earliest_dict = {}
latest_dict = {}
schedule = {trainrun_waypoint.waypoint: trainrun_waypoint.scheduled_at for trainrun_waypoint in schedule_trainrun}
for v in waypoints_same_location_and_time:
earliest_dict[v] = schedule[v]
latest_dict[v] = schedule[v]
sinks = list(get_sinks_for_topo(topo_out))
for sink in sinks:
if sink in waypoints_same_location_and_time:
continue
latest_dict[sink] = latest_arrival
# this is v_2 in paper
delayed_trainrun_waypoint_after_malfunction = _get_delayed_trainrun_waypoint_after_malfunction(
agent_id=agent_id, trainrun=schedule_trainrun, malfunction=malfunction, minimum_travel_time=minimum_travel_time
)
earliest_dict[delayed_trainrun_waypoint_after_malfunction.waypoint] = delayed_trainrun_waypoint_after_malfunction.scheduled_at
force_earliest = waypoints_same_location_and_time.union({delayed_trainrun_waypoint_after_malfunction.waypoint})
assert set(force_earliest).issubset(topo_out.nodes), (
f"{force_earliest.difference(topo_out.nodes)} - {set(topo_out.nodes).difference(force_earliest)} // "
f"{set(topo_out.nodes).intersection(force_earliest)} // {delayed_trainrun_waypoint_after_malfunction}"
)
propagate(
earliest_dict=earliest_dict,
latest_dict=latest_dict,
topo=topo_out,
force_earliest=force_earliest,
force_latest=waypoints_same_location_and_time.union(sinks),
must_be_visited=waypoints_same_location,
minimum_travel_time=minimum_travel_time,
latest_arrival=latest_arrival,
max_window_size_from_earliest=max_window_size_from_earliest,
)
return earliest_dict, latest_dict, topo_out
def scoper_offline_delta_for_all_agents(
online_unrestricted_trainrun_dict: TrainrunDict,
malfunction: ExperimentMalfunction,
minimum_travel_time_dict: Dict[int, int],
max_episode_steps: int,
offline_delta_topo_dict_: TopoDict,
schedule_trainrun_dict: TrainrunDict,
weight_route_change: int,
weight_lateness_seconds: int,
max_window_size_from_earliest: int = np.inf,
) -> ScheduleProblemDescription:
"""The scoper perfect only opens up the differences between the schedule
and the imaginary re-schedule. It gives no additional routing flexibility!
Parameters
----------
online_unrestricted_trainrun_dict: TrainrunDict
the magic information of the full re-schedule
malfunction: ExperimentMalfunction
the malfunction; used to determine the waypoint after the malfunction
minimum_travel_time_dict: Dict[int,int]
the minimumum travel times for the agents
max_episode_steps:
latest arrival
offline_delta_topo_dict_:
the topologies used for scheduling
schedule_trainrun_dict: TrainrunDict
the schedule S0
max_window_size_from_earliest: int
maximum window size as offset from earliest. => "Cuts off latest at earliest + earliest_time_windows when doing
back propagation of latest"
weight_lateness_seconds
how much
weight_route_change
Returns
-------
ScheduleProblemDesccription
"""
freeze_dict: RouteDAGConstraintsDict = {}
topo_dict: TopoDict = {}
for agent_id in schedule_trainrun_dict.keys():
earliest_dict, latest_dict, topo = scoper_offline_delta(
agent_id=agent_id,
topo_=offline_delta_topo_dict_[agent_id],
schedule_trainrun=schedule_trainrun_dict[agent_id],
online_unrestricted_trainrun=online_unrestricted_trainrun_dict[agent_id],
malfunction=malfunction,
minimum_travel_time=minimum_travel_time_dict[agent_id],
latest_arrival=max_episode_steps,
max_window_size_from_earliest=max_window_size_from_earliest,
)
freeze_dict[agent_id] = RouteDAGConstraints(earliest=earliest_dict, latest=latest_dict)
topo_dict[agent_id] = topo
# TODO SIM-324 pull out verification
for agent_id, _ in freeze_dict.items():
verify_consistency_of_route_dag_constraints_for_agent(
agent_id=agent_id,
route_dag_constraints=freeze_dict[agent_id],
topo=topo_dict[agent_id],
malfunction=malfunction,
max_window_size_from_earliest=max_window_size_from_earliest,
)
# re-schedule train run must be open in route dag constraints
verify_trainrun_satisfies_route_dag_constraints(
agent_id=agent_id, route_dag_constraints=freeze_dict[agent_id], scheduled_trainrun=online_unrestricted_trainrun_dict[agent_id]
)
return ScheduleProblemDescription(
route_dag_constraints_dict=freeze_dict,
minimum_travel_time_dict=minimum_travel_time_dict,
topo_dict=topo_dict,
max_episode_steps=max_episode_steps,
route_section_penalties=_extract_route_section_penalties(
schedule_trainruns=schedule_trainrun_dict, topo_dict=topo_dict, weight_route_change=weight_route_change
),
weight_lateness_seconds=weight_lateness_seconds,
)
| 8,348 | 42.036082 | 141 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/scopers/scoper_online_transmission_chains.py | import pprint
from typing import Dict
from typing import Set
from typing import Tuple
import numpy as np
from flatland.envs.rail_trainrun_data_structures import TrainrunDict
from rsp.resource_occupation.resource_occupation import extract_resource_occupations
from rsp.scheduling.propagate import verify_consistency_of_route_dag_constraints_for_agent
from rsp.scheduling.scheduling_problem import RouteDAGConstraints
from rsp.scheduling.scheduling_problem import RouteDAGConstraintsDict
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.scheduling.scheduling_problem import TopoDict
from rsp.step_05_experiment_run.experiment_malfunction import ExperimentMalfunction
from rsp.step_05_experiment_run.scopers.scoper_agent_wise import AgentWiseChange
from rsp.step_05_experiment_run.scopers.scoper_agent_wise import scoper_agent_wise
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import _extract_route_section_penalties
from rsp.transmission_chains.transmission_chains import extract_transmission_chains_from_schedule
from rsp.transmission_chains.transmission_chains import validate_transmission_chains
_pp = pprint.PrettyPrinter(indent=4)
def scoper_online_transmission_chains_for_all_agents(
online_unrestricted_problem: ScheduleProblemDescription,
malfunction: ExperimentMalfunction,
minimum_travel_time_dict: Dict[int, int],
latest_arrival: int,
# pytorch convention for in-place operations: postfixed with underscore.
delta_online_topo_dict_to_: TopoDict,
schedule_trainrun_dict: TrainrunDict,
weight_route_change: int,
weight_lateness_seconds: int,
time_flexibility: bool,
max_window_size_from_earliest: int = np.inf,
) -> Tuple[ScheduleProblemDescription, Set[int]]:
"""The scoper online only opens up the differences between the schedule and
the imaginary re-schedule. It gives no additional routing flexibility!
Parameters
----------
online_unrestricted_problem
online_unrestricted_trainrun_dict: TrainrunDict
the magic information of the full re-schedule
malfunction: ExperimentMalfunction
the malfunction; used to determine the waypoint after the malfunction
minimum_travel_time_dict: Dict[int,int]
the minimumum travel times for the agents
latest_arrival:
latest arrival
delta_online_topo_dict_to_:
the topologies used for scheduling
schedule_trainrun_dict: TrainrunDict
the schedule S0
max_window_size_from_earliest: int
maximum window size as offset from earliest. => "Cuts off latest at earliest + earliest_time_windows when doing
back propagation of latest"
weight_lateness_seconds
how much
weight_route_change
Returns
-------
ScheduleProblemDesccription
"""
# 1. compute the forward-only wave of the malfunction
schedule_occupations = extract_resource_occupations(schedule=schedule_trainrun_dict)
transmission_chains = extract_transmission_chains_from_schedule(malfunction=malfunction, occupations=schedule_occupations)
validate_transmission_chains(transmission_chains=transmission_chains)
# 2. compute reached agents
online_reached_agents = {transmission_chain[-1].hop_off.agent_id for transmission_chain in transmission_chains}
agent_wise_change_if_unchanged = AgentWiseChange.route_restricted if time_flexibility else AgentWiseChange.fully_restricted
freeze_dict: RouteDAGConstraintsDict = {}
topo_dict: TopoDict = {}
# TODO SIM-324 pull out verification
assert malfunction.agent_id in online_reached_agents
for agent_id in schedule_trainrun_dict.keys():
earliest_dict, latest_dict, topo = scoper_agent_wise(
agent_id=agent_id,
topo_=delta_online_topo_dict_to_[agent_id],
schedule_trainrun=schedule_trainrun_dict[agent_id],
online_unrestricted_problem=online_unrestricted_problem,
malfunction=malfunction,
latest_arrival=latest_arrival,
max_window_size_from_earliest=max_window_size_from_earliest,
minimum_travel_time=minimum_travel_time_dict[agent_id],
# N.B. we do not require malfunction agent to have re-routing flexibility!
agent_wise_change=AgentWiseChange.unrestricted if agent_id in online_reached_agents else agent_wise_change_if_unchanged,
)
freeze_dict[agent_id] = RouteDAGConstraints(earliest=earliest_dict, latest=latest_dict)
topo_dict[agent_id] = topo
# TODO SIM-324 pull out verification
for agent_id, _ in freeze_dict.items():
verify_consistency_of_route_dag_constraints_for_agent(
agent_id=agent_id,
route_dag_constraints=freeze_dict[agent_id],
topo=topo_dict[agent_id],
malfunction=malfunction,
max_window_size_from_earliest=max_window_size_from_earliest,
)
# N.B. re-schedule train run must not necessarily be open in route dag constraints!
return (
ScheduleProblemDescription(
route_dag_constraints_dict=freeze_dict,
minimum_travel_time_dict=minimum_travel_time_dict,
topo_dict=topo_dict,
max_episode_steps=latest_arrival,
route_section_penalties=_extract_route_section_penalties(
schedule_trainruns=schedule_trainrun_dict, topo_dict=topo_dict, weight_route_change=weight_route_change
),
weight_lateness_seconds=weight_lateness_seconds,
),
online_reached_agents,
)
| 5,579 | 45.115702 | 132 | py |
rsp | rsp-master/src/python/rsp/step_05_experiment_run/scopers/scoper_online_route_restricted.py | import pprint
from typing import Dict
import numpy as np
from flatland.envs.rail_trainrun_data_structures import TrainrunDict
from rsp.scheduling.propagate import verify_consistency_of_route_dag_constraints_for_agent
from rsp.scheduling.scheduling_problem import RouteDAGConstraintsDict
from rsp.scheduling.scheduling_problem import ScheduleProblemDescription
from rsp.scheduling.scheduling_problem import TopoDict
from rsp.step_05_experiment_run.experiment_malfunction import ExperimentMalfunction
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import _extract_route_section_penalties
from rsp.step_05_experiment_run.scopers.scoper_online_unrestricted import scoper_online_unrestricted
_pp = pprint.PrettyPrinter(indent=4)
def scoper_online_route_restricted_for_all_agents(
online_unrestricted_trainrun_dict: TrainrunDict,
online_unrestricted_problem: ScheduleProblemDescription,
malfunction: ExperimentMalfunction,
minimum_travel_time_dict: Dict[int, int],
max_episode_steps: int,
# pytorch convention for in-place operations: postfixed with underscore.
topo_dict_: TopoDict,
schedule_trainrun_dict: TrainrunDict,
weight_route_change: int,
weight_lateness_seconds: int,
max_window_size_from_earliest: int = np.inf,
) -> ScheduleProblemDescription:
"""The scoper naive only opens up the differences between the schedule and
the imaginary re-schedule. It gives no additional routing flexibility!
Parameters
----------
online_unrestricted_problem
online_unrestricted_trainrun_dict: TrainrunDict
the magic information of the full re-schedule
malfunction: ExperimentMalfunction
the malfunction; used to determine the waypoint after the malfunction
minimum_travel_time_dict: Dict[int,int]
the minimumum travel times for the agents
max_episode_steps:
latest arrival
topo_dict_:
the topologies used for scheduling
schedule_trainrun_dict: TrainrunDict
the schedule S0
max_window_size_from_earliest: int
maximum window size as offset from earliest. => "Cuts off latest at earliest + earliest_time_windows when doing
back propagation of latest"
weight_lateness_seconds
how much
weight_route_change
Returns
-------
ScheduleProblemDesccription
"""
freeze_dict: RouteDAGConstraintsDict = {}
for agent_id, schedule_trainrun in schedule_trainrun_dict.items():
topo_ = topo_dict_[agent_id]
schedule_waypoints = {trainrun_waypoint.waypoint for trainrun_waypoint in schedule_trainrun}
to_remove = {node for node in topo_.nodes if node not in schedule_waypoints}
topo_.remove_nodes_from(to_remove)
freeze_dict[agent_id] = scoper_online_unrestricted(
agent_id=agent_id,
topo_=topo_,
schedule_trainrun=schedule_trainrun_dict[agent_id],
minimum_travel_time=minimum_travel_time_dict[agent_id],
malfunction=malfunction,
latest_arrival=max_episode_steps,
max_window_size_from_earliest=max_window_size_from_earliest,
)
# TODO SIM-324 pull out verification
for agent_id, _ in freeze_dict.items():
verify_consistency_of_route_dag_constraints_for_agent(
agent_id=agent_id,
route_dag_constraints=freeze_dict[agent_id],
topo=topo_dict_[agent_id],
malfunction=malfunction,
max_window_size_from_earliest=max_window_size_from_earliest,
)
# N.B. re-schedule train run must not necessarily be be open in route dag constraints
return ScheduleProblemDescription(
route_dag_constraints_dict=freeze_dict,
minimum_travel_time_dict=minimum_travel_time_dict,
topo_dict=topo_dict_,
max_episode_steps=max_episode_steps,
route_section_penalties=_extract_route_section_penalties(
schedule_trainruns=schedule_trainrun_dict, topo_dict=topo_dict_, weight_route_change=weight_route_change
),
weight_lateness_seconds=weight_lateness_seconds,
)
| 4,118 | 41.463918 | 119 | py |
Alpha-IoU | Alpha-IoU-main/test.py | import argparse, csv
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.5, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True, # only here is True as pre-defined
log_imgs=0, # number of logged images
compute_loss=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True,
prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5:.95', 'mAP')
p, r, f1, mp, mr, map50, map55, map60, map65, map70, map75, map80, map85, map90, map95, map, t0, t1 = \
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
iou_dataset = []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
# out: list of detections, on (n,6) tensor per image [xyxy, conf, cls]
out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True)
t1 += time_synchronized() - t
iou_batch = []
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.4f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 4) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
# collect ious
iou_image = []
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
iou_class = []
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# only collect iou >= 0.5
ious_list = [z for z in ious.tolist() if z >= 0.5]
# print(ious_list)
iou_class += ious_list
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
iou_image += iou_class
iou_batch += iou_image
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
iou_dataset += iou_batch
# save iou as txt
with open(save_dir / 'ious.csv', 'w') as f:
writer = csv.writer(f)
for iou in iou_dataset:
writer.writerow([iou])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap55, ap60, ap65, ap70, ap75, ap80, ap85, ap90, ap95, ap = \
ap[:, 0], ap[:, 1], ap[:, 2], ap[:, 3], ap[:, 4], ap[:, 5], ap[:, 6], ap[:, 7], ap[:, 8], ap[:, 9], ap.mean(1)
# AP@0.5 to 0.95, AP@0.5:0.95
mp, mr, map50, map55, map60, map65, map70, map75, map80, map85, map90, map95, map = \
p.mean(), r.mean(), ap50.mean(), ap55.mean(), ap60.mean(), ap65.mean(), ap70.mean(), ap75.mean(), \
ap80.mean(), ap85.mean(), ap90.mean(), ap95.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.4g' * 15 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map55, map60, map65, map70, map75, map80, map85, map90, map95, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap55[i], ap60[i], ap65[i], ap70[i], ap75[i],
ap80[i], ap85[i], ap90[i], ap95[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False)
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
# python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
| 17,952 | 47.131367 | 122 | py |
Alpha-IoU | Alpha-IoU-main/detect.py | import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://'))
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz, stride=stride)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
t0 = time.time()
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or view_img: # Add bbox to image
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
print(opt)
check_requirements()
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| 8,218 | 45.698864 | 119 | py |
Alpha-IoU | Alpha-IoU-main/hubconf.py | """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80)
"""
from pathlib import Path
import torch
from models.yolo import Model
from utils.general import set_logging
from utils.google_utils import attempt_download
dependencies = ['torch', 'yaml']
set_logging()
def create(name, pretrained, channels, classes, autoshape):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
try:
model = Model(config, channels, classes)
if pretrained:
fname = f'{name}.pt' # checkpoint filename
attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
model.load_state_dict(state_dict, strict=False) # load
if len(ckpt['model'].names) == classes:
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
return model
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
raise Exception(s) from e
def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-small model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5s', pretrained, channels, classes, autoshape)
def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-medium model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5m', pretrained, channels, classes, autoshape)
def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-large model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5l', pretrained, channels, classes, autoshape)
def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5x', pretrained, channels, classes, autoshape)
def custom(path_or_model='path/to/model.pt', autoshape=True):
"""YOLOv5-custom model from https://github.com/ultralytics/yolov5
Arguments (3 options):
path_or_model (str): 'path/to/model.pt'
path_or_model (dict): torch.load('path/to/model.pt')
path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
Returns:
pytorch model
"""
model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict):
model = model['ema' if model.get('ema') else 'model'] # load model
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
hub_model.load_state_dict(model.float().state_dict()) # load state_dict
hub_model.names = model.names # class names
return hub_model.autoshape() if autoshape else hub_model
if __name__ == '__main__':
model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
# model = custom(path_or_model='path/to/model.pt') # custom example
# Verify inference
import numpy as np
from PIL import Image
imgs = [Image.open('data/images/bus.jpg'), # PIL
'data/images/zidane.jpg', # filename
'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI
np.zeros((640, 480, 3))] # numpy
results = model(imgs) # batched inference
results.print()
results.save()
| 5,274 | 34.884354 | 114 | py |
Alpha-IoU | Alpha-IoU-main/train.py | import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
logger = logging.getLogger(__name__)
'''
before training a model, plz change clean/noisy label, and a specific loss
train from scratch with initial lr=0.01 for bs=64, 0.02 for bs=128, 0.04 for bs=256
use a pretrain model with initial lr=0.002 for bs=32, 0.004 for bs=64, 0.008 for bs=128, 0.016 for bs=256
test always on the clean label
'''
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if rank in [-1, 0] and wandb and wandb.run is None:
opt.hyp = hyp # add hyperparameters
wandb_run = wandb.init(config=opt, resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
name=save_dir.stem,
entity=opt.entity,
id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
loggers = {'wandb': wandb} # loggers dict
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 10 and wandb:
wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')
if x.exists()]}, commit=False)
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
log_imgs=opt.log_imgs if wandb else 0,
compute_loss=compute_loss)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_run.id if wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f)
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
if opt.log_artifacts:
wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False)
else:
dist.destroy_process_group()
wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='', help='initial weights path')
# parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
if opt.resume: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
# Train
logger.info(opt)
try:
import wandb
except ImportError:
wandb = None
prefix = colorstr('wandb: ')
logger.info(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer, wandb)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device, wandb=wandb)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
## tensorboard --logdir=../runs/train/voc50_iou
## tensorboard --logdir=../runs/exp14_noise_0_scra_2ciou
# http://localhost:6006/ | 32,757 | 51.4128 | 151 | py |
Alpha-IoU | Alpha-IoU-main/models/yolo.py | import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
# y = model(img, profile=True)
# Tensorboard
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter()
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(model.model, img) # add model to tensorboard
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
| 12,060 | 42.699275 | 119 | py |
Alpha-IoU | Alpha-IoU-main/models/export.py | """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
Usage:
$ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
"""
import argparse
import sys
import time
sys.path.append('./') # to run '$ python *.py' files in subdirectories
import torch
import torch.nn as nn
import models
from models.experimental import attempt_load
from utils.activations import Hardswish, SiLU
from utils.general import set_logging, check_img_size
from utils.torch_utils import select_device
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes')
parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
set_logging()
t = time.time()
# Load PyTorch model
device = select_device(opt.device)
model = attempt_load(opt.weights, map_location=device) # load FP32 model
labels = model.names
# Checks
gs = int(max(model.stride)) # grid size (max stride)
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
# Input
img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection
# Update model
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if isinstance(m, models.common.Conv): # assign export-friendly activations
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
# elif isinstance(m, models.yolo.Detect):
# m.forward = m.forward_export # assign forward (optional)
model.model[-1].export = not opt.grid # set Detect() layer grid export
y = model(img) # dry run
# TorchScript export
try:
print('\nStarting TorchScript export with torch %s...' % torch.__version__)
f = opt.weights.replace('.pt', '.torchscript.pt') # filename
ts = torch.jit.trace(model, img)
ts.save(f)
print('TorchScript export success, saved as %s' % f)
except Exception as e:
print('TorchScript export failure: %s' % e)
# ONNX export
try:
import onnx
print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
f = opt.weights.replace('.pt', '.onnx') # filename
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
output_names=['classes', 'boxes'] if y is None else ['output'],
dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)
# Checks
onnx_model = onnx.load(f) # load onnx model
onnx.checker.check_model(onnx_model) # check onnx model
# print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
print('ONNX export success, saved as %s' % f)
except Exception as e:
print('ONNX export failure: %s' % e)
# CoreML export
try:
import coremltools as ct
print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
# convert model from torchscript and apply pixel scaling as per detect.py
model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
f = opt.weights.replace('.pt', '.mlmodel') # filename
model.save(f)
print('CoreML export success, saved as %s' % f)
except Exception as e:
print('CoreML export failure: %s' % e)
# Finish
print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
| 4,422 | 41.12381 | 117 | py |
Alpha-IoU | Alpha-IoU-main/models/common.py | # This file contains modules common to various models
import math
from pathlib import Path
import numpy as np
import requests
import torch
import torch.nn as nn
from PIL import Image
from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
from utils.plots import color_list, plot_one_box
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
class autoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
img_size = 640 # inference size (pixels)
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
def __init__(self, model):
super(autoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
# filename: imgs = 'data/samples/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
# numpy: = np.zeros((720,1280,3)) # HWC
# torch: = torch.zeros(16,3,720,1280) # BCHW
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
if isinstance(im, str): # filename or uri
im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open
im.filename = f # for uri
files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg')
im = np.array(im) # to numpy
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
# Inference
with torch.no_grad():
y = self.model(x, augment, profile)[0] # forward
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
# Post-process
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
return Detections(imgs, y, files, self.names)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, files, names=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred)
def display(self, pprint=False, show=False, save=False, render=False, save_dir=''):
colors = color_list()
for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
plot_one_box(box, img, label=label, color=colors[int(cls) % 10])
img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
if pprint:
print(str.rstrip(', '))
if show:
img.show(self.files[i]) # show
if save:
f = Path(save_dir) / self.files[i]
img.save(f) # save
print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n')
if render:
self.imgs[i] = np.asarray(img)
def print(self):
self.display(pprint=True) # print results
def show(self):
self.display(show=True) # show results
def save(self, save_dir='results/'):
Path(save_dir).mkdir(exist_ok=True)
self.display(save=True, save_dir=save_dir) # save results
def render(self):
self.display(render=True) # render results
return self.imgs
def __len__(self):
return self.n
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
| 12,997 | 41.064725 | 120 | py |
Alpha-IoU | Alpha-IoU-main/models/experimental.py | # This file contains experimental modules
import numpy as np
import torch
import torch.nn as nn
from models.common import Conv, DWConv
from utils.google_utils import attempt_download
class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super(CrossConv, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super(Sum, self).__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super(GhostConv, self).__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class MixConv2d(nn.Module):
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super(MixConv2d, self).__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()
def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def attempt_load(weights, map_location=None):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
attempt_download(w)
ckpt = torch.load(w, map_location=map_location) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble
| 5,146 | 37.125926 | 114 | py |
Alpha-IoU | Alpha-IoU-main/utils/loss.py | # Loss functions
import torch
import torch.nn as nn
from utils.general import bbox_iou, bbox_alpha_iou
from utils.torch_utils import is_parallel
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super(BCEBlurWithLogitsLoss, self).__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(QFocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
# Compute losses
def __init__(self, model, autobalance=False):
super(ComputeLoss, self).__init__()
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=0.0)
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
for k in 'na', 'nc', 'nl', 'anchors':
setattr(self, k, getattr(det, k))
def __call__(self, p, targets): # predictions, targets, model
device = targets.device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
# iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, alpha=3, CIoU=True) # iou(prediction, target)
iou = bbox_alpha_iou(pbox.T, tbox[i], x1y1x2y2=False, alpha=3, CIoU=True) # iou(prediction, target)
lbox += (1.0 - iou).mean() # iou loss
# Objectness
tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
# Classification
if self.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
t[range(n), tcls[i]] = self.cp
lcls += self.BCEcls(ps[:, 5:], t) # BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
obji = self.BCEobj(pi[..., 4], tobj)
lobj += obji * self.balance[i] # obj loss
if self.autobalance:
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
if self.autobalance:
self.balance = [x / self.balance[self.ssi] for x in self.balance]
lbox *= self.hyp['box']
lobj *= self.hyp['obj']
lcls *= self.hyp['cls']
bs = tobj.shape[0] # batch size
loss = lbox + lobj + lcls
return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
def build_targets(self, p, targets):
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
na, nt = self.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(self.nl):
anchors = self.anchors[i]
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
class ComputeLoss2:
# Compute losses 2
def __init__(self, model, autobalance=False):
super(ComputeLoss2, self).__init__()
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=0.0)
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
for k in 'na', 'nc', 'nl', 'anchors':
setattr(self, k, getattr(det, k))
def __call__(self, p, targets): # predictions, targets, model
device = targets.device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
# consistent with yolo2.py
pxy = ps[:, :2].sigmoid() * 2. - 0.5
ratio = 0.6
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] * ratio
pbox = torch.cat((pxy, pwh), 1) # predicted box
iou = bbox_iou(pbox.T, tbox[i], whratio=ratio, CIoU=True) # iou(prediction, target)
# iou = bbox_alpha_iou(pbox.T, tbox[i], x1y1x2y2=False, GIoU=True) # iou(prediction, target)
lbox += (1.0 - iou).mean() # iou loss
# Objectness
tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
# Classification
if self.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
t[range(n), tcls[i]] = self.cp
lcls += self.BCEcls(ps[:, 5:], t) # BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
obji = self.BCEobj(pi[..., 4], tobj)
lobj += obji * self.balance[i] # obj loss
if self.autobalance:
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
if self.autobalance:
self.balance = [x / self.balance[self.ssi] for x in self.balance]
lbox *= self.hyp['box']
lobj *= self.hyp['obj']
lcls *= self.hyp['cls']
bs = tobj.shape[0] # batch size
loss = lbox + lobj + lcls
return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
def build_targets(self, p, targets):
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
na, nt = self.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(self.nl):
anchors = self.anchors[i]
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
| 15,771 | 44.191977 | 120 | py |
Alpha-IoU | Alpha-IoU-main/utils/autoanchor.py | # Auto-anchor utils
import numpy as np
import torch
import yaml
from scipy.cluster.vq import kmeans
from tqdm import tqdm
from utils.general import colorstr
def check_anchor_order(m):
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
a = m.anchor_grid.prod(-1).view(-1) # anchor area
da = a[-1] - a[0] # delta a
ds = m.stride[-1] - m.stride[0] # delta s
if da.sign() != ds.sign(): # same order
print('Reversing anchor order')
m.anchors[:] = m.anchors.flip(0)
m.anchor_grid[:] = m.anchor_grid.flip(0)
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
prefix = colorstr('autoanchor: ')
print(f'\n{prefix}Analyzing anchors... ', end='')
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
# max(shapes)==640, e.g., (640, 480) for an input image
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
# add noise to wh???
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
def metric(k): # compute metric
r = wh[:, None] / k[None] # torch.size() == [n,9,2]
x = torch.min(r, 1. / r).min(2)[0] # ratio metric, torch.size() == [n,9]
best = x.max(1)[0] # best_x, [n], distance(bbox, closest centroid)
aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
bpr = (best > 1. / thr).float().mean() # best possible recall
return bpr, aat
# m.anchor_grid.clone().cpu().view(-1, 2) are 9 anchors
# (10, 13), (16, 30), (33, 23), (30, 61), (62, 45), (59, 119), (116, 90), (156, 198), (373, 326)
# BPR >0.98 for both VOC and COCO datasets
bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
if bpr < 0.98: # threshold to recompute
print('. Attempting to improve anchors, please wait...')
na = m.anchor_grid.numel() // 2 # number of anchors
new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
new_bpr = metric(new_anchors.reshape(-1, 2))[0]
if new_bpr > bpr: # replace anchors
new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
check_anchor_order(m)
print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
else:
print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')
print('') # newline
def check_anchors2(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
prefix = colorstr('autoanchor: ')
print(f'\n{prefix}Analyzing anchors... ', end='')
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
def metric(k): # compute metric
r = wh[:, None] / k[None]
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
best = x.max(1)[0] # best_x
aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
bpr = (best > 1. / thr).float().mean() # best possible recall
return bpr, aat
bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
if bpr < 0.98: # threshold to recompute
print('. Attempting to improve anchors, please wait...')
na = m.anchor_grid.numel() // 2 # number of anchors
new_anchors = kmean_anchors2(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
new_bpr = metric(new_anchors.reshape(-1, 2))[0]
if new_bpr > bpr: # replace anchors
new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
check_anchor_order(m)
print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
else:
print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')
print('') # newline
def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
path: path to dataset *.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
thr = 1. / thr
prefix = colorstr('autoanchor: ')
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
# x = wh_iou(wh, torch.tensor(k)) # iou metric
return x, x.max(1)[0] # x, best_x
def anchor_fitness(k): # mutation fitness
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
return (best * (best > thr).float()).mean() # fitness
def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')
print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '
f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
if isinstance(path, str): # *.yaml file
with open(path) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
dataset = path # dataset
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans calculation
print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
k = print_results(k)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
if verbose:
print_results(k)
return print_results(k)
def kmean_anchors2(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
path: path to dataset *.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors2()
"""
thr = 1. / thr
prefix = colorstr('autoanchor: ')
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
# ratio metric, 2 is the dimension (maybe height?),
# along which to find the minimal number from torch.min(r, 1. / r)
# [0] is to obtain the 0 dimension result of torch.min(r, 1. / r).min(2)
x = torch.min(r, 1. / r).min(2)[0]
# x = wh_iou(wh, torch.tensor(k)) # iou metric
return x, x.max(1)[0] # x, best_x
def anchor_fitness(k): # mutation fitness
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
return (best * (best > thr).float()).mean() # fitness
def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')
print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '
f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
if isinstance(path, str): # *.yaml file
with open(path) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
dataset = path # dataset
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans calculation
print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
k = print_results(k)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
if verbose:
print_results(k)
return print_results(k)
| 13,869 | 45.233333 | 120 | py |
Alpha-IoU | Alpha-IoU-main/utils/plots.py | # Plotting utils
import glob
import math
import os
import random
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import yaml
from PIL import Image, ImageDraw, ImageFont
from scipy.signal import butter, filtfilt
from utils.general import xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
# Settings
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
def color_list():
# Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
def hex2rgb(h):
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949)
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype='low', analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter
def plot_one_box(x, img, color=None, label=None, line_thickness=3):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None):
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
line_thickness = line_thickness or max(int(min(img.size) / 200), 2)
draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot
if label:
fontsize = max(round(max(img.size) / 40), 12)
font = ImageFont.truetype("Arial.ttf", fontsize)
txt_width, txt_height = font.getsize(label)
draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
return np.asarray(img)
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), tight_layout=True)
plt.plot(x, ya, '.-', label='YOLOv3')
plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.grid()
plt.legend()
fig.savefig('comparison.png', dpi=200)
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
colors = color_list() # list of colors
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype('int')
labels = image_targets.shape[1] == 6 # labels if no conf column
conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
if boxes.shape[1]:
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
boxes[[0, 2]] *= w # scale to pixels
boxes[[1, 3]] *= h
elif scale_factor < 1: # absolute coords need scale if image scales
boxes *= scale_factor
boxes[[0, 2]] += block_x
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = colors[cls % len(colors)]
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
# Draw image filename labels
if paths:
label = Path(paths[i]).name[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
lineType=cv2.LINE_AA)
# Image border
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
if fname:
r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
# cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
Image.fromarray(mosaic).save(fname) # PIL save
return mosaic
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
plt.close()
def plot_test_txt(): # from utils.plots import *; plot_test()
# Plot test.txt histograms
x = np.loadtxt('test.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
# Plot study.txt generated by test.py
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
# ax = ax.ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
# for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]:
for f in sorted(Path(path).glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
# for i in range(7):
# ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
# ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
ax2.grid(alpha=0.2)
ax2.set_yticks(np.arange(20, 60, 5))
ax2.set_xlim(0, 30)
ax2.set_ylim(30, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
plt.savefig(str(Path(path).name) + '.png', dpi=300)
def plot_labels(labels, save_dir=Path(''), loggers=None):
# plot dataset labels
print('Plotting labels... ')
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
colors = color_list()
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
# seaborn correlogram
sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
plt.close()
# matplotlib labels
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
ax[0].set_xlabel('classes')
sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
# rectangles
labels[:, 1:3] = 0.5 # center
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
ax[1].imshow(img)
ax[1].axis('off')
for a in [0, 1, 2, 3]:
for s in ['top', 'right', 'left', 'bottom']:
ax[a].spines[s].set_visible(False)
plt.savefig(save_dir / 'labels.jpg', dpi=200)
matplotlib.use('Agg')
plt.close()
# loggers
for k, v in loggers.items() or {}:
if k == 'wandb' and v:
v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader)
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print('%15s: %.3g' % (k, mu))
plt.savefig('evolve.png', dpi=200)
print('\nPlot saved as evolve.png')
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
files = list(Path(save_dir).glob('frames*.txt'))
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
n = results.shape[1] # number of rows
x = np.arange(start, min(stop, n) if stop else n)
results = results[:, x]
t = (results[0] - results[0].min()) # set t0=0s
results[0] = x
for i, a in enumerate(ax):
if i < len(results):
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
a.set_title(s[i])
a.set_xlabel('time (s)')
# if fi == len(files) - 1:
# a.set_ylim(bottom=0)
for side in ['top', 'right']:
a.spines[side].set_visible(False)
else:
a.remove()
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
# Plot training 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
ax[i].plot(x, y, marker='.', label=s[j])
# y_smooth = butter_lowpass_filtfilt(y)
# ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
# Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
ax = ax.ravel()
s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
if bucket:
# files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
files = ['results%g.txt' % x for x in id]
c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
os.system(c)
else:
files = list(Path(save_dir).glob('results*.txt'))
assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # don't show zero loss values
# y /= y[0] # normalize
label = labels[fi] if len(labels) else f.stem
ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
ax[i].set_title(s[i])
# if i in [5, 6, 7]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
ax[1].legend()
fig.savefig(Path(save_dir) / 'results.png', dpi=200)
| 18,126 | 41.254079 | 120 | py |
Alpha-IoU | Alpha-IoU-main/utils/datasets.py | # Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
# we can change to noisy folders
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
# sa, sb = os.sep + 'images_25' + os.sep, os.sep + 'labels' + os.sep
# sa, sb = os.sep + 'images_50' + os.sep, os.sep + 'labels' + os.sep
return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
# return [x.replace(sa, sb, 1).split('2017')[0] + '2017_noise_0.4' + \
# x.replace(os.path.splitext(x)[-1], '.txt').split('2017')[1] for x in img_paths]
# return [x.replace(sa, sb, 1).split('2017')[0] + '2017_noise_0.3' + \
# x.replace(os.path.splitext(x)[-1], '.txt').split('2017')[1] for x in img_paths]
# return [x.replace(sa, sb, 1).split('2017')[0] + '2017_noise_0.2' + \
# x.replace(os.path.splitext(x)[-1], '.txt').split('2017')[1] for x in img_paths]
# return [x.replace(sa, sb, 1).split('2017')[0] + '2017_noise_0.1' + \
# x.replace(os.path.splitext(x)[-1], '.txt').split('2017')[1] for x in img_paths]
# VOC
# return [x.replace(sa, sb, 1).split('voc')[0] + 'voc_noise1' + \
# x.replace(os.path.splitext(x)[-1], '.txt').split('voc')[1] for x in img_paths]
# return [x.replace(sa, sb, 1).split('voc')[0] + 'voc_noise2' + \
# x.replace(os.path.splitext(x)[-1], '.txt').split('voc')[1] for x in img_paths]
# return [x.replace(sa, sb, 1).split('voc')[0] + 'voc_noise3' + \
# x.replace(os.path.splitext(x)[-1], '.txt').split('voc')[1] for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
| 45,239 | 41.201493 | 120 | py |
Alpha-IoU | Alpha-IoU-main/utils/torch_utils.py | # PyTorch utils
import logging
import math
import os
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPS computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def git_describe():
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
if Path('.git').exists():
return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1]
else:
return ''
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
n = torch.cuda.device_count()
if n > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(device.split(',') if device else range(n)):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
logger.info(s) # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''
logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
| 11,956 | 39.532203 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.