repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GradAug | GradAug-main/models/wideresnet_randwidth.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.randwidth_ops import RWConv2d, RWLinear, RWBatchNorm2d
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = RWBatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = RWConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = RWBatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = RWConv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and RWConv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet_randwidth(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet_randwidth, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = RWConv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False, us=[False, True])
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = RWBatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = RWLinear(nChannels[3], num_classes, us=[True, False])
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
if m.affine:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
last_dim = out.size()[1]
out = out.view(-1, last_dim)
return self.fc(out)
| 3,864 | 43.425287 | 115 | py |
GradAug | GradAug-main/models/resnet_randdepth.py | '''
resnet for cifar in pytorch
Reference:
[1] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In CVPR, 2016.
[2] K. He, X. Zhang, S. Ren, and J. Sun. Identity mappings in deep residual networks. In ECCV, 2016.
'''
import torch
import torch.nn as nn
import math
import numpy as np
def conv3x3(in_planes, out_planes, stride=1):
" 3x3 convolution with padding "
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion=1
def __init__(self, prob, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.prob = prob
self.fullnet = False
def forward(self, x):
residual = x.clone()
if self.training:
k = int(np.random.binomial(1, self.prob, 1))
if self.fullnet: # fullnet do not drop
k = 1
if k == 1: # do not drop
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
else: # drop the layer
if self.downsample is not None:
residual = self.downsample(x)
out = residual
else:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.prob * out + residual
# out = out + residual
out = self.relu(out)
return out
class ResNet_Cifar(nn.Module):
def __init__(self, block, prob_0_L, layers, num_classes=10):
super(ResNet_Cifar, self).__init__()
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.prob_now = prob_0_L[0]
self.prob_delta = prob_0_L[0] - prob_0_L[1]
self.prob_step = self.prob_delta / (sum(layers) - 1)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.prob_now, self.inplanes, planes, stride, downsample))
self.prob_now = self.prob_now - self.prob_step
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.prob_now, self.inplanes, planes))
self.prob_now = self.prob_now - self.prob_step
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet56_cifar(**kwargs):
prob_0_L = [1.0, 0.8]
model = ResNet_Cifar(BasicBlock, prob_0_L, [9, 9, 9], **kwargs)
return model
def resnet110_cifar(**kwargs):
prob_0_L = [1.0, 0.8]
model = ResNet_Cifar(BasicBlock, prob_0_L, [18, 18, 18], **kwargs)
return model
if __name__ == '__main__':
net = resnet56_cifar()
y = net(torch.randn(1, 3, 64, 64))
print(net)
print(y.size())
| 4,787 | 29.113208 | 109 | py |
GradAug | GradAug-main/models/pyramidnet_randwidth.py | import torch
import torch.nn as nn
import math
from models.randwidth_ops import RWLinear, RWConv2d, RWBatchNorm2d
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
outchannel_ratio = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
featuremap_size = shortcut.size()[2:4]
else:
shortcut = x
featuremap_size = out.size()[2:4]
batch_size = out.size()[0]
residual_channel = out.size()[1]
shortcut_channel = shortcut.size()[1]
if residual_channel != shortcut_channel:
padding = torch.autograd.Variable(
torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0],
featuremap_size[1]).fill_(0))
out += torch.cat((shortcut, padding), 1)
else:
out += shortcut
return out
class Bottleneck(nn.Module):
outchannel_ratio = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16, t=1):
super(Bottleneck, self).__init__()
self.bn1 = RWBatchNorm2d(inplanes, ratio=1)
self.conv1 = RWConv2d(inplanes, planes, kernel_size=1, bias=False, ratio=[1, 1])
self.bn2 = RWBatchNorm2d(planes)
self.conv2 = RWConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, groups=1)
self.bn3 = RWBatchNorm2d(planes)
self.conv3 = RWConv2d(planes, planes * Bottleneck.outchannel_ratio, kernel_size=1, bias=False, ratio=[1.0, 1.0])
self.bn4 = RWBatchNorm2d(planes * Bottleneck.outchannel_ratio, ratio=1)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn4(out)
if self.downsample is not None:
shortcut = self.downsample(x)
featuremap_size = shortcut.size()[2:4]
else:
shortcut = x
featuremap_size = out.size()[2:4]
batch_size = out.size()[0]
residual_channel = out.size()[1]
shortcut_channel = shortcut.size()[1]
if residual_channel != shortcut_channel:
padding = torch.autograd.Variable(
torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0],
featuremap_size[1]).fill_(0))
out += torch.cat((shortcut, padding), 1)
else:
out += shortcut
return out
class PyramidNet_randwidth(nn.Module):
def __init__(self, dataset, depth, alpha, num_classes, bottleneck=True):
super(PyramidNet_randwidth, self).__init__()
self.dataset = dataset
if self.dataset.startswith('cifar'):
self.inplanes = 16
if bottleneck == True:
n = int((depth - 2) / 9)
block = Bottleneck
else:
n = int((depth - 2) / 6)
block = BasicBlock
self.addrate = alpha / (3 * n * 1.0)
self.input_featuremap_dim = self.inplanes
self.conv1 = RWConv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False, us=[False, True])
self.bn1 = RWBatchNorm2d(self.input_featuremap_dim)
self.featuremap_dim = self.input_featuremap_dim
self.layer1 = self.pyramidal_make_layer(block, n)
self.layer2 = self.pyramidal_make_layer(block, n, stride=2)
self.layer3 = self.pyramidal_make_layer(block, n, stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = RWBatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = RWLinear(self.final_featuremap_dim, num_classes, us=[True, False])
elif dataset == 'imagenet':
blocks = {18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}
layers = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3],
200: [3, 24, 36, 3]}
if layers.get(depth) is None:
if bottleneck == True:
blocks[depth] = Bottleneck
temp_cfg = int((depth - 2) / 12)
else:
blocks[depth] = BasicBlock
temp_cfg = int((depth - 2) / 8)
layers[depth] = [temp_cfg, temp_cfg, temp_cfg, temp_cfg]
print('=> the layer configuration for each stage is set to', layers[depth])
self.inplanes = 64
self.addrate = alpha / (sum(layers[depth]) * 1.0)
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.featuremap_dim = self.input_featuremap_dim
self.layer1 = self.pyramidal_make_layer(blocks[depth], layers[depth][0])
self.layer2 = self.pyramidal_make_layer(blocks[depth], layers[depth][1], stride=2)
self.layer3 = self.pyramidal_make_layer(blocks[depth], layers[depth][2], stride=2)
self.layer4 = self.pyramidal_make_layer(blocks[depth], layers[depth][3], stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def pyramidal_make_layer(self, block, block_depth, stride=1):
downsample = None
if stride != 1: # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio:
downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)
layers = []
self.featuremap_dim = self.featuremap_dim + self.addrate
layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample, t=1))
for i in range(1, block_depth):
temp_featuremap_dim = self.featuremap_dim + self.addrate
layers.append(
block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1))
# block(int(round(self.featuremap_dim)), int(round(temp_featuremap_dim)), 1, t=block.outchannel_ratio))
self.featuremap_dim = temp_featuremap_dim
self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio
return nn.Sequential(*layers)
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x | 9,184 | 37.919492 | 129 | py |
GradAug | GradAug-main/utils/mytransforms.py | import torch
import numpy as np
from PIL import Image
from torchvision import transforms
import random
imagenet_pca = {
'eigval': np.asarray([0.2175, 0.0188, 0.0045]),
'eigvec': np.asarray([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
class Lighting(object):
def __init__(self, alphastd,
eigval=imagenet_pca['eigval'],
eigvec=imagenet_pca['eigvec']):
self.alphastd = alphastd
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0.:
return img
rnd = np.random.randn(3) * self.alphastd
rnd = rnd.astype('float32')
v = rnd
old_dtype = np.asarray(img).dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
img = Image.fromarray(img.astype(old_dtype), 'RGB')
return img
def __repr__(self):
return self.__class__.__name__ + '()'
class MultiCropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(self, base_transform, dataset='cifar'):
if dataset.startswith('cifar'):
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
resos = [32, 28, 24]
elif dataset.startswith('imagenet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
resos = [224, 192, 160, 128]
else:
raise NotImplemented('dataset not implemented.')
self.reso_idx = [0, 1, 2]
self.base_transform = base_transform
self.fullnet_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
self.subnet_transforms = []
for i in range(len(resos)):
self.subnet_transforms.append(
transforms.Compose([
transforms.Resize((resos[i], resos[i])),
transforms.ToTensor(),
normalize
])
)
def __call__(self, x):
output_list = []
output_list.append(self.fullnet_transform(self.base_transform(x)))
for idx in self.reso_idx:
# t = random.randint(0, len(self.subnet_transforms)-1)
# output_list.append(self.subnet_transforms[random.randint(0, 3)](self.base_transform(x)))
output_list.append(self.subnet_transforms[idx](self.base_transform(x)))
return output_list
def set_resoidx(self, reso_idx):
self.reso_idx = reso_idx | 2,934 | 33.127907 | 102 | py |
HDN | HDN-master/tools/test.py | #Copyright 2021, XinruiZhan
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import cv2
import torch
import numpy as np
from hdn.core.config import cfg
from hdn.tracker.tracker_builder import build_tracker
from hdn.utils.bbox import get_axis_aligned_bbox, get_min_max_bbox, get_w_h_from_poly, get_points_from_xyxy
from hdn.utils.model_load import load_pretrain
from toolkit.datasets import DatasetFactory
from hdn.models.model_builder_e2e_unconstrained_v2 import ModelBuilder
parser = argparse.ArgumentParser(description='siamese tracking')
parser.add_argument('--dataset', type=str,
help='datasets')
parser.add_argument('--config', default='', type=str,
help='config file')
parser.add_argument('--snapshot', default='', type=str,
help='snapshot of models to eval')
parser.add_argument('--video', default='', type=str,
help='eval one special video')
parser.add_argument('--vis', action='store_true',
help='whether visualzie result')
parser.add_argument("--gpu_id", default="not_set", type=str,
help="gpu id")
parser.add_argument('--img_w', type=int, default=640)
parser.add_argument('--img_h', type=int, default=360)
parser.add_argument('--patch_size_h', type=int, default=315)
parser.add_argument('--patch_size_w', type=int, default=560)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--lr', type=float, default=1e-9, help='learning rate')
parser.add_argument('--model_name', type=str, default='resnet34')
parser.add_argument('--pretrained', type=bool, default=True, help='Use pretrained waights?')
parser.add_argument('--finetune', type=bool, default=True, help='Use pretrained waights?')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
torch.set_num_threads(1)
def main():
# load config
print('args.config',args.config)
cfg.merge_from_file(args.config)
cur_dir = os.path.dirname(os.path.realpath(__file__))
dataset_name = args.dataset
if args.dataset.startswith('POT'):
dataset_name = 'POT'
dataset_root = os.path.join(cur_dir, '../testing_dataset', dataset_name)
# create model
model = ModelBuilder()
# load model
print('args.snapshot',args.snapshot)
model = load_pretrain(model, args.snapshot).cuda().eval()
# build tracker
tracker = build_tracker(model)
exp_name = cfg.BASE.PROJ_PATH+'hdn/'
device = torch.cuda.current_device()
# create dataset
dataset = DatasetFactory.create_dataset(name=args.dataset,
dataset_root=dataset_root,
load_img=False)
model_name = args.snapshot.split('/')[-1].split('.')[0]
total_lost = 0
for v_idx, video in enumerate(dataset):
print('video_name', video.name)
# test one special video series
# print('video.attr[0]', video.attr[0])
# if video.attr[0] not in args.video_attr:
# continue
# print('visibledevice',os.environ["CUDA_VISIBLE_DEVICES"])
# if os.environ["CUDA_VISIBLE_DEVICES"] == "0":
# if v_idx > 55 :
# continue
# elif os.environ["CUDA_VISIBLE_DEVICES"] == "1":
# if v_idx > 110 or v_idx < 55:
# continue
# if os.environ["CUDA_VISIBLE_DEVICES"] == "2":
# if v_idx > 165 or v_idx < 110:
# continue
# elif os.environ["CUDA_VISIBLE_DEVICES"] == "3":
# # if v_idx > 210 or v_idx < 165:
# if v_idx < 165:
# continue
if args.video != '':
# test one special video
if video.name not in args.video:
continue
print('v_idx',v_idx)
toc = 0
pred_bboxes = []
scores = []
track_times = []
isPolygon=False
for idx, (img, gt_bbox) in enumerate(video):
###slow down
tic = cv2.getTickCount()
if idx == 0:
cx, cy, w, h = get_min_max_bbox(np.array(gt_bbox))
if(len(gt_bbox)==8):
gt_points = gt_bbox
gt_poly = get_w_h_from_poly(np.array(gt_bbox))
else:
gt_points = get_points_from_xyxy(np.array(gt_bbox))
gt_poly = [cx, cy, w, h, 0]
gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]
first_point = np.array([gt_bbox[:2]])
tracker.init(img, gt_bbox_, gt_poly, gt_points, first_point)
pred_bbox = gt_bbox_
#fixme
if dataset_name in ['POT', 'UCSB', 'POIC']:
pred_bbox = np.array(gt_bbox)
scores.append(None)
if 'VOT2018-LT' == args.dataset:
pred_bboxes.append([1])
else:
pred_bboxes.append(pred_bbox)
else:
if idx%2==1:
if (len(gt_bbox) == 8):
gt_points = gt_bbox
gt_poly = get_w_h_from_poly(np.array(gt_bbox))
cx, cy, w, h = gt_poly[0],gt_poly[1], gt_poly[2], gt_poly[3]
else:
gt_points = get_points_from_xyxy(np.array(gt_bbox))
cx, cy, w, h = get_min_max_bbox(np.array(gt_bbox))
gt_poly = [cx, cy, w, h, 0]
gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]
outputs = tracker.track_new(idx, img, gt_bbox_, gt_poly, gt_points)
#fixme
if dataset_name in ['POT', 'UCSB', 'POIC']:
polygon = np.array(outputs['polygon']).astype(np.int32)
isPolygon = True
pred_bbox = np.array(outputs['polygon']).astype(np.float32).reshape(1,-1)[0]
bbox_align = outputs['bbox_aligned']
elif 'polygon' in outputs:
polygon = np.array(outputs['polygon']).astype(np.float32)
max_p = np.max(polygon, 0)
min_p = np.min(polygon, 0)
pred_bbox = [min_p[0], min_p[1],
max_p[0]-min_p[0], max_p[1]-min_p[1]]
isPolygon = True
bbox_align = outputs['bbox_aligned']
else:
pred_bbox = outputs['bbox']
pred_bboxes.append(pred_bbox)
scores.append(outputs['best_score'])
toc += cv2.getTickCount() - tic
track_times.append((cv2.getTickCount() - tic)/cv2.getTickFrequency())
if idx == 0:
cv2.destroyAllWindows()
if args.vis and idx > 0:
if dataset_name in ['POT', 'UCSB', 'POIC']:
cv2.polylines(img, [np.array(gt_bbox).reshape(4,2).astype(np.int32)],
True, (0, 255, 255), 2)
else:
gt_box = list(map(int, gt_bbox))
pred_bbox = list(map(int, pred_bbox))
cv2.rectangle(img, (gt_box[0], gt_box[1]),
(gt_box[0]+gt_box[2], gt_box[1]+gt_box[3]), (0, 255, 0), 2)
if isPolygon:
cv2.polylines(img, [polygon],
True, (0, 255, 0), 2)
cv2.rectangle(img, (int(bbox_align[0]), int(bbox_align[1])), (int(bbox_align[0])+int(bbox_align[2]), int(bbox_align[1])+int(bbox_align[3])), (0, 0, 255), 2)
else:
cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]),
(pred_bbox[0]+pred_bbox[2], pred_bbox[1]+pred_bbox[3]), (0, 255, 255), 2)
cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(video.name, img)
cv2.waitKey(1)
toc /= cv2.getTickFrequency()
# save results
if 'VOT2018-LT' == args.dataset:
video_path = os.path.join('results', args.dataset, model_name,
'longterm', video.name)
if not os.path.isdir(video_path):
os.makedirs(video_path)
result_path = os.path.join(video_path,
'{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write(','.join([str(i) for i in x])+'\n')
result_path = os.path.join(video_path,
'{}_001_confidence.value'.format(video.name))
with open(result_path, 'w') as f:
for x in scores:
f.write('\n') if x is None else f.write("{:.6f}\n".format(x))
result_path = os.path.join(video_path,
'{}_time.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in track_times:
f.write("{:.6f}\n".format(x))
elif 'GOT-10k' == args.dataset:
video_path = os.path.join('results', args.dataset, model_name, video.name)
if not os.path.isdir(video_path):
os.makedirs(video_path)
result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write(','.join([str(i) for i in x])+'\n')
result_path = os.path.join(video_path,
'{}_time.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in track_times:
f.write("{:.6f}\n".format(x))
else:
model_path = os.path.join('results', args.dataset, model_name)
if not os.path.isdir(model_path):
os.makedirs(model_path)
result_path = os.path.join(model_path, '{}.txt'.format(video.name))
with open(result_path, 'w') as f:
if dataset_name in ['POT', 'UCSB', 'POIC']:
for x in pred_bboxes:
f.write(' '.join([str(i) for i in x]) + '\n')
else:
for x in pred_bboxes:
f.write(','.join([str(i) for i in x])+'\n')
print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format(
v_idx+1, video.name, toc, idx / toc))
if __name__ == '__main__':
main()
| 10,757 | 42.032 | 176 | py |
HDN | HDN-master/tools/demo.py | #Copyright 2021, XinruiZhan
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
import cv2
import torch
import numpy as np
from glob import glob
from hdn.core.config import cfg
from hdn.models.model_builder_e2e_unconstrained_v2 import ModelBuilder
from hdn.tracker.tracker_builder import build_tracker
from hdn.utils.model_load import load_pretrain
from hdn.utils.bbox import get_axis_aligned_bbox, get_min_max_bbox, get_w_h_from_poly, get_points_from_xywh, poly2mask, xywh2xyxy
import matplotlib.pyplot as plt
torch.set_num_threads(1)
parser = argparse.ArgumentParser(description='tracking demo')
parser.add_argument('--config', type=str, help='config file')
parser.add_argument('--snapshot', type=str, help='model name')
parser.add_argument('--video', default='', type=str,
help='videos or image files')
parser.add_argument('--save', action='store_true',
help='whether visualzie result')
parser.add_argument('--mode', help='demo mode', default='tracking')
parser.add_argument('--img_insert', help='img for replacing', default='None')
parser.add_argument('--video_insert', help='video for replacing', default='None')
parser.add_argument('--mosiac_range', help='the range for pixels to be the same for mosiac', default=30)
args = parser.parse_args()
from shapely.geometry import Polygon
pts = [] # save points in init image
def draw_roi(event, x, y, flags,param):
img2 = param[0].copy()
if event == cv2.EVENT_LBUTTONDOWN:
pts.append((x, y))
if event == cv2.EVENT_RBUTTONDOWN:
pts.pop()
if len(pts) > 0:
cv2.circle(img2, pts[-1], 3, (0, 0, 255), -1)
if len(pts) > 1:
for i in range(len(pts) - 1):
cv2.circle(img2, pts[i], 5, (0, 0, 255), -1)
cv2.line(img=img2, pt1=pts[i], pt2=pts[i + 1], color=(255, 0, 0), thickness=2)
cv2.imshow(param[1], img2)
#
def get_frames(video_name):
if not video_name:
cap = cv2.VideoCapture(0)
# warmup
for i in range(5):
cap.read()
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
elif video_name.endswith('avi') or \
video_name.endswith('mp4') or \
video_name.endswith('mov'):
cap = cv2.VideoCapture(args.video)
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
else:
images = glob(os.path.join(video_name, '*.jp*'))
images = sorted(images,
key=lambda x: int(x.split('/')[-1].split('.')[0]))
for img in images:
frame = cv2.imread(img)
yield frame
def main():
# load config
cfg.merge_from_file(args.config)
cfg.CUDA = torch.cuda.is_available() and cfg.CUDA
device = torch.device('cuda' if cfg.CUDA else 'cpu')
# create model
model = ModelBuilder()
# load model
model = load_pretrain(model, args.snapshot).cuda().eval()
# build tracker
tracker = build_tracker(model)
first_frame = True
if args.video:
tmp = args.video.split('/')
video_name = tmp[-1].split('.')[0]
if video_name is '':
video_name = tmp[-2]
else:
video_name = 'webcam'
cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)
print('video_name', video_name, args.video)
if args.mode == 'video_replace':
#prepare img names
video_path = args.video_insert
# insert_img_names = sorted(os.listdir(video_path))
insert_img_names = sorted(os.listdir(video_path), key=lambda x: int(x[:-4]) )
for fr_idx, frame in enumerate(get_frames(args.video)):
if first_frame:
print('if_first_frame', first_frame)
# build video writer
if args.save:
if args.video.endswith('avi') or \
args.video.endswith('mp4') or \
args.video.endswith('mov'):
cap = cv2.VideoCapture(args.video)
fps = int(round(cap.get(cv2.CAP_PROP_FPS)))
else:
fps = 30
# save_video_path = args.video.split(video_name)[0] + video_name + '_tracking.mp4'
save_video_path = args.video.split(video_name)[0] + video_name +'-'+ args.mode + '.mp4'
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
frame_size = (frame.shape[1], frame.shape[0]) # (w, h)
video_writer = cv2.VideoWriter(save_video_path, fourcc, fps, frame_size)
cv2.setMouseCallback(video_name, draw_roi, param=[frame, video_name])
print('replace with image/video only support 4 points for now')
print("[INFO] click left mouse button: choose point,click right mouse button: remove the last selected point")
print("[INFO] press 's' to complete object selection ")
print("[INFO] ESC to exit")
while True:
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
if key == ord("s"):
break
poly = np.array(pts).reshape(-1,2)
gt_points = poly.reshape(-1)
first_point = pts[0]
geo_poly = Polygon(poly)
cx, cy = geo_poly.centroid.x, geo_poly.centroid.y
max_p = np.max(poly, 0)
min_p = np.min(poly, 0)
align_bbox = [min_p[0], min_p[1],
max_p[0] - min_p[0], max_p[1] - min_p[1]]
gt_rect = [cx, cy, align_bbox[2], align_bbox[3],0]
tracker.init(frame, align_bbox, gt_rect, gt_points, first_point)
first_frame = False
cv2.destroyAllWindows()
else:
outputs = tracker.track_new(fr_idx, frame)
bbox = list(map(int, outputs['bbox']))
if frame.shape[0] < frame.shape[1]:
w_rsz = 1080
h_rsz = int(frame.shape[0] / frame.shape[1] *1080)
else:
h_rsz = 720
w_rsz = int(frame.shape[1]/ frame.shape[0] * 720)
if args.mode == 'tracking':
if 'polygon' in outputs:
polygon = np.array(outputs['polygon']).astype(np.int32)
cv2.polylines(frame, [polygon],
True, (0, 255, 0), 2)
if 'mask' in outputs:
mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) * 255)
mask = mask.astype(np.uint8)
mask = np.stack([mask, mask*255, mask]).transpose(1, 2, 0)
frame = cv2.addWeighted(frame, 0.77, mask, 0.23, -1)
else:
cv2.rectangle(frame, (bbox[0], bbox[1]),
(bbox[0]+bbox[2], bbox[1]+bbox[3]),
(0, 255, 0), 3)
frame_rsz = cv2.resize(frame, (w_rsz, h_rsz))/255
cv2.imshow(video_name, frame_rsz)
cv2.waitKey(40)
#------------App 1. replace by a single img or a video-----------------------
#This can be used to add static AD poster, video AD or add decoration for img (although we do not track the surface). and it's a toy now.
#more you can do: replace one video object with object tracked in another video.
#more accurate results needs align the decorations precisely for first frame.
if args.mode == 'img_replace' or args.mode == 'video_replace':
if not args.img_insert:
raise Exception('image for replacing not found')
if args.mode == 'img_replace':
I_ins = cv2.imread(args.img_insert)
if args.mode == 'video_replace':
if len(insert_img_names) > fr_idx:
img_name = os.path.join(video_path, insert_img_names[fr_idx])
else:
img_name = os.path.join(video_path, insert_img_names[-1])
I_ins = cv2.imread(img_name)
# #for similarity, we lower the quality of the inserted image.
# I_ins = cv2.resize(I_ins, (int(box_w), int(box_h)))
I_ins_w, I_ins_h = I_ins.shape[1], I_ins.shape[0]
I_ins_4pts = [0, 0, I_ins_w, 0, I_ins_w, I_ins_h, 0, I_ins_h]
I_ins_4pts = np.array(I_ins_4pts).reshape(4, 2).astype(np.float32)
if 'polygon' in outputs:
polygon = np.array(outputs['polygon']).astype(np.int32)
else:
raise Exception('No polygon ouput')
H = cv2.getPerspectiveTransform(I_ins_4pts, polygon.astype(np.float32))
mask = np.zeros([I_ins_h, I_ins_w]).astype('float')
if args.img_insert.endswith('.png'):
I_ins_w_alpha = cv2.imread(args.img_insert, cv2.IMREAD_UNCHANGED)
I_alpha = I_ins_w_alpha[:,:,3]
I_alpha_mask = (I_alpha==0).astype('int')
I_alpha_mask_rev = (I_alpha!=0).astype('int')
mask = mask + I_alpha_mask
I_alpha_mask_rev = np.repeat(np.expand_dims(I_alpha_mask_rev,2), 3, axis=2)
I_ins = I_ins * (I_alpha_mask_rev.astype(np.int8))
I_ins_warped = cv2.warpPerspective(I_ins, H, (frame.shape[1], frame.shape[0]),
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
mask_warped = cv2.warpPerspective(mask, H, (frame.shape[1], frame.shape[0]),
borderMode=cv2.BORDER_CONSTANT, borderValue=1)
mask_warped_c3 = np.repeat(np.expand_dims(mask_warped,2), 3, axis=2)
mask_warped = mask_warped_c3[:frame.shape[0],:frame.shape[1], :]
frame = (mask_warped * frame + I_ins_warped).astype('uint8')
frame_rsz = cv2.resize(frame, (w_rsz, h_rsz))
cv2.imshow(video_name, frame_rsz)
cv2.waitKey(40)
#------------App 2: add video mosiac--------------------------#
#For video editing
if args.mode == 'mosiac':
poly = outputs['polygon']
poly_mask = poly2mask((frame.shape[0], frame.shape[1]), [poly])
poly_mask_rev = (poly_mask==0).astype(np.uint8)
frame_mosiac = frame[::args.mosiac_range, ::args.mosiac_range]
frame_mosiac = cv2.resize(frame_mosiac, (frame.shape[1], frame.shape[0] ))
poly_mask = np.repeat(np.expand_dims(poly_mask,2), 3, axis=2)
poly_mask_rev = np.repeat(np.expand_dims(poly_mask_rev,2), 3, axis=2)
frame = frame_mosiac * poly_mask + poly_mask_rev * frame
frame_rsz = cv2.resize(frame, (w_rsz, h_rsz))/255
cv2.imshow(video_name, frame_rsz)
cv2.waitKey(40)
if args.save:
video_writer.write(frame)
if args.save:
video_writer.release()
if __name__ == '__main__':
main()
| 11,417 | 41.764045 | 149 | py |
HDN | HDN-master/tools/train.py | #Copyright 2021, XinruiZhan
# A distribute version of training
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import time
import math
import json
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.nn.utils import clip_grad_norm_
from torch.utils.data.distributed import DistributedSampler
from hdn.utils.lr_scheduler import build_lr_scheduler
from hdn.utils.log_helper import init_log, print_speed, add_file_handler
from hdn.utils.distributed import dist_init, DistModule, reduce_gradients, \
average_reduce, get_rank, get_world_size
from hdn.utils.model_load import load_pretrain, restore_from
from hdn.utils.average_meter import AverageMeter
from hdn.utils.misc import describe, commit
from hdn.models.model_builder_e2e_unconstrained_v2 import ModelBuilder
# from hdn.datasets.dataset.semi_supervised_dataset import BANDataset
from hdn.datasets.dataset import get_dataset
from hdn.core.config import cfg
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import torch.optim as optim
# from torchviz import make_dot
import cv2
import time
logger = logging.getLogger('global')
parser = argparse.ArgumentParser(description='siamese tracking')
parser.add_argument('--cfg', type=str, default='config.yaml',
help='configuration of tracking')
parser.add_argument('--seed', type=int, default=123456,
help='random seed')
parser.add_argument('--local_rank', type=int, default=0,
help='compulsory for pytorch launcer')
args = parser.parse_args()
# CUDA_VISIBLE_DEVICES=0,1,2,3
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def make_mesh(patch_w,patch_h):
x_flat = np.arange(0,patch_w)
x_flat = x_flat[np.newaxis,:]
y_one = np.ones(patch_h)
y_one = y_one[:,np.newaxis]
x_mesh = np.matmul(y_one , x_flat)
y_flat = np.arange(0,patch_h)
y_flat = y_flat[:,np.newaxis]
x_one = np.ones(patch_w)
x_one = x_one[np.newaxis,:]
y_mesh = np.matmul(y_flat,x_one)
return x_mesh,y_mesh
def build_data_loader():
logger.info("build train dataset")
# train_dataset
if cfg.BAN.BAN:
print('cfg.DATASET.TYPE',cfg.DATASET.TYPE)
train_dataset =get_dataset(cfg.DATASET.TYPE)
logger.info("build dataset done")
train_sampler = None
if get_world_size() > 1:
train_sampler = DistributedSampler(train_dataset)
print('num_worker',cfg.TRAIN.NUM_WORKERS)
#we don't have enough memory
train_loader = DataLoader(train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.TRAIN.NUM_WORKERS,
pin_memory=False,
sampler=train_sampler)
return train_loader
def build_opt_lr(model, current_epoch=0):
model.train()
for param in model.backbone.parameters():
param.requires_grad = False
for m in model.backbone.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
for layer in cfg.BACKBONE.TRAIN_LAYERS:
for param in getattr(model.backbone, layer).parameters():
param.requires_grad = True
for m in getattr(model.backbone, layer).modules():
if isinstance(m, nn.BatchNorm2d):
m.train()
trainable_params = []
trainable_params += [{'params': filter(lambda x: x.requires_grad,
model.backbone.parameters()),
'lr': cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR}]
if cfg.ADJUST.ADJUST:
if cfg.TRAIN.OBJ == 'LP':
for param in model.neck.parameters():
param.requires_grad = False
trainable_params += [{'params': model.neck_lp.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
elif cfg.TRAIN.OBJ == 'NM':
for param in model.neck_lp.parameters():
param.requires_grad = False
trainable_params += [{'params': model.neck.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
elif cfg.TRAIN.OBJ == 'SIM' or cfg.TRAIN.OBJ == 'ALL':
trainable_params += [{'params': model.neck_lp.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.neck.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
elif cfg.TRAIN.OBJ == 'HOMO':
for param in model.neck.parameters():
param.requires_grad = False
for param in model.neck_lp.parameters():
param.requires_grad = False
# neck & head
if cfg.TRAIN.OBJ == 'LP':
trainable_params += [{'params': model.head_lp.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
for param in model.head.parameters():
param.requires_grad = False
elif cfg.TRAIN.OBJ == 'NM':
trainable_params += [{'params': model.head.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
for param in model.head_lp.parameters():
param.requires_grad = False
elif cfg.TRAIN.OBJ == 'SIM':
trainable_params += [{'params': model.head_lp.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.head.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.hm_net.parameters(),
'lr': cfg.TRAIN.HOMO_START_LR}]
elif cfg.TRAIN.OBJ == 'ALL':
trainable_params += [{'params': model.head_lp.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.head.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.hm_net.parameters(),
'lr': cfg.TRAIN.HOMO_START_LR * cfg.TRAIN.HOMO_LR_RATIO}]
optimizer = torch.optim.Adam(trainable_params, lr=cfg.TRAIN.BASE_LR, amsgrad=True, weight_decay=1e-4) # default as 0.0001
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.8)
lr_scheduler.step(cfg.TRAIN.START_EPOCH)
return optimizer, lr_scheduler
def log_grads(model, tb_writer, tb_index):
def weights_grads(model):
grad = {}
weights = {}
for name, param in model.named_parameters():
if param.grad is not None:
grad[name] = param.grad
weights[name] = param.data
return grad, weights
grad, weights = weights_grads(model)
feature_norm, head_norm = 0, 0
for k, g in grad.items():
_norm = g.data.norm(2)
# print('weight', weights[k])
weight = weights[k]
w_norm = weight.norm(2)
w_norm_avg = weight.norm(1)/weight.shape[0]
# print('w_norm',w_norm)
if 'feature' in k:
feature_norm += _norm ** 2
else:
head_norm += _norm ** 2
tb_writer.add_scalar('grad_all/' + k.replace('.', '/'),
_norm, tb_index)
tb_writer.add_scalar('weight_all/' + k.replace('.', '/'),
w_norm, tb_index)
tb_writer.add_scalar('w-g/' + k.replace('.', '/'),
w_norm / (1e-20 + _norm), tb_index)
tb_writer.add_scalar('w_norm_avg' + k.replace('.', '/'),
w_norm_avg, tb_index)
tot_norm = feature_norm + head_norm
tot_norm = tot_norm ** 0.5
feature_norm = feature_norm ** 0.5
head_norm = head_norm ** 0.5
tb_writer.add_scalar('grad/tot', tot_norm, tb_index)
tb_writer.add_scalar('grad/feature', feature_norm, tb_index)
tb_writer.add_scalar('grad/head', head_norm, tb_index)
def train(train_loader, model, optimizer, lr_scheduler, tb_writer):
rank = get_rank()
average_meter = AverageMeter()
def is_valid_number(x):
return not (math.isnan(x) or math.isinf(x) or x > 1e4)
world_size = get_world_size()
num_per_epoch = len(train_loader.dataset) // \
cfg.TRAIN.EPOCH // (cfg.TRAIN.BATCH_SIZE * world_size)
start_epoch = cfg.TRAIN.START_EPOCH
epoch = start_epoch
print('start epoch', cfg.TRAIN.START_EPOCH)
if not os.path.exists(cfg.TRAIN.SNAPSHOT_DIR) and \
get_rank() == 0:
os.makedirs(cfg.TRAIN.SNAPSHOT_DIR)
logger.info("model\n{}".format(describe(model.module)))
end = time.time()
print('num_per_epoch', num_per_epoch)
for idx, data in enumerate(train_loader):
if epoch != idx // num_per_epoch + start_epoch:
epoch = idx // num_per_epoch + start_epoch
if get_rank() == 0:
torch.save(
{'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict()},
cfg.TRAIN.SNAPSHOT_DIR + '/got_e2e_%s_e%d.pth' % (cfg.TRAIN.OBJ, epoch))
if epoch == cfg.TRAIN.EPOCH:
return
if cfg.BACKBONE.TRAIN_EPOCH == epoch:
logger.info('start training backbone.')
optimizer, lr_scheduler = build_opt_lr(model.module, epoch)
logger.info("model\n{}".format(describe(model.module)))
lr_scheduler.step(epoch)
logger.info('epoch: {}'.format(epoch + 1))
tb_idx = idx + start_epoch * num_per_epoch
if idx % num_per_epoch == 0 and idx != 0:
for idx, pg in enumerate(optimizer.param_groups):
logger.info('epoch {} lr {}'.format(epoch + 1, pg['lr']))
if rank == 0:
tb_writer.add_scalar('lr/group{}'.format(idx + 1),
pg['lr'], tb_idx)
data_time = average_reduce(time.time() - end)
if rank == 0:
tb_writer.add_scalar('time/data', data_time, tb_idx)
# with torch.autograd.detect_anomaly():
optimizer.zero_grad()
outputs = model(data)
loss = outputs['total_loss']
if is_valid_number(loss.data.item()):
loss.backward()
reduce_gradients(model)
clip_grad_norm_(model.parameters(), cfg.TRAIN.GRAD_CLIP)
optimizer.step()
batch_time = time.time() - end
batch_info = {}
batch_info['batch_time'] = average_reduce(batch_time)
batch_info['data_time'] = average_reduce(data_time)
for k, v in sorted(outputs.items()):
# pass
batch_info[k] = average_reduce(v.data.item())
average_meter.update(**batch_info)
if rank == 0:
for k, v in batch_info.items():
tb_writer.add_scalar(k, v, tb_idx)
if (idx + 1) % cfg.TRAIN.PRINT_FREQ == 0:
info = "Epoch: [{}][{}/{}] lr: {:.6f}\n".format(
epoch + 1, (idx + 1) % num_per_epoch,
num_per_epoch, lr_scheduler.get_lr()[1])
for cc, (k, v) in enumerate(batch_info.items()):
if cc % 2 == 0:
info += ("\t{:s}\t").format(
getattr(average_meter, k))
else:
info += ("{:s}\n").format(
getattr(average_meter, k))
logger.info(info)
print_speed(idx + 1 + start_epoch * num_per_epoch,
average_meter.batch_time.avg,
cfg.TRAIN.EPOCH * num_per_epoch)
if cfg.TRAIN.LOG_GRADS:
log_grads(model.module, tb_writer, tb_idx)
end = time.time()
def main():
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,3"
rank, world_size = dist_init()
print('rank', rank, 'world_size', world_size)
logger.info("init done")
# load cfg
cfg.merge_from_file(args.cfg)
if rank == 0:
if not os.path.exists(cfg.TRAIN.LOG_DIR):
os.makedirs(cfg.TRAIN.LOG_DIR)
init_log('global', logging.INFO)
if cfg.TRAIN.LOG_DIR:
add_file_handler('global',
os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'),
logging.INFO)
logger.info("Version Information: \n{}\n".format(commit()))
logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
# create model
model = ModelBuilder().cuda().train()
# load pretrained backbone weights
if cfg.BACKBONE.PRETRAINED:
cur_path = os.path.dirname(os.path.realpath(__file__))
backbone_path = os.path.join(cur_path, '../', cfg.BACKBONE.PRETRAINED)
print('pretrained path', backbone_path)
load_pretrain(model.backbone, backbone_path)
# create tensorboard writer
if rank == 0 and cfg.TRAIN.LOG_DIR:
tb_writer = SummaryWriter(cfg.TRAIN.LOG_DIR)
else:
tb_writer = None
# build dataset loader
train_loader = build_data_loader()
start_epoch = cfg.TRAIN.START_EPOCH
print('start_epoch',start_epoch)
#build optimizer and lr_scheduler
optimizer, lr_scheduler = build_opt_lr(model,
cfg.TRAIN.START_EPOCH)
# resume training
RESUME_PATH = cfg.BASE.PROJ_PATH + cfg.TRAIN.RESUME
if cfg.TRAIN.RESUME:
logger.info("resume from {}".format(RESUME_PATH))
assert os.path.isfile(RESUME_PATH), \
'{} is not a valid file.'.format(RESUME_PATH)
model, optimizer, cfg.TRAIN.START_EPOCH = \
restore_from(model, optimizer, RESUME_PATH)
# # load pretrain
elif cfg.TRAIN.PRETRAINED:
print('if cfg.TRAIN.PRETRAINED')
load_pretrain(model, cfg.TRAIN.PRETRAINED)
dist_model = DistModule(model)
logger.info(lr_scheduler)
logger.info("model prepare done")
cfg.TRAIN.START_EPOCH = start_epoch
# start training
train(train_loader, dist_model, optimizer, lr_scheduler, tb_writer)
if __name__ == '__main__':
# seed_torch(args.seed)
# import heartrate
# from heartrate import trace, files
# heartrate.trace(browser=True,host='10.214.241.12', port=4235, files=files.path_contains('model_builder_e2e_unconstrained', 'train_e2e_unconstrained_dist','unconstrained_dataset'))
seed_torch(args.seed)
main()
| 14,895 | 37.293059 | 185 | py |
HDN | HDN-master/hdn/tracker/base_tracker.py | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import torch
from hdn.core.config import cfg
from hdn.models.logpolar import getPolarImg, getLinearPolarImg
from hdn.utils.transform import img_shift_left_top_2_center
from hdn.utils.point import generate_points, generate_points_lp
class BaseTracker(object):
""" Base tracker of single objec tracking
"""
def init(self, img, bbox):
"""
args:
img(np.ndarray): BGR image
bbox(list): [x, y, width, height]
x, y need to be 0-based
"""
raise NotImplementedError
def track(self, img):
"""
args:
img(np.ndarray): BGR image
return:
bbox(list):[x, y, width, height]
"""
raise NotImplementedError
class SiameseTracker(BaseTracker):
def _convert_bbox(self, delta, point):# delta:1*4*25*25
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
delta = delta.detach().cpu().numpy()#4*625
delta[0, :] = point[:, 0] - delta[0, :]
delta[1, :] = point[:, 1] - delta[1, :]
delta[2, :] = point[:, 0] + delta[2, :]
delta[3, :] = point[:, 1] + delta[3, :]
delta[0, :], delta[1, :], delta[2, :], delta[3, :] = corner2center(delta)
return delta
def _convert_delta(self, delta):
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
delta = delta.detach().cpu().numpy()
return delta
def _convert_c(self, delta, point):
delta = delta.permute(1, 2, 3, 0).contiguous().view(2,-1)
delta = delta.detach().cpu().numpy()
delta[0, :] = point[:, 0] - delta[0, :]*8
delta[1, :] = point[:, 1] - delta[1, :]*8
return delta
def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans, islog=False):
"""
args:
im: bgr based image
pos: center position
model_sz: exemplar size
s_z: original size
avg_chans: channel average
"""
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
# c = (original_sz + 1) / 2
c = (original_sz - 1) / 2
# context_xmin = round(pos[0] - c) # py2 and py3 round
context_xmin = np.floor(pos[0] - c + 0.5)
context_xmax = context_xmin + sz - 1
# context_ymin = round(pos[1] - c)
context_ymin = np.floor(pos[1] - c + 0.5)
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if len(im.shape)==2:
im = im.reshape(im.shape[0], im.shape[1], 1)
r, c, k = im.shape
# k = 1
else:
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)
te_im = np.zeros(size, np.uint8)
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch = te_im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
else:
im_patch = im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch, (model_sz, model_sz))
if islog:
if islog == 1:
im_log = getPolarImg(im_patch)
# im_log = shift_left_top_2_center(im_log)
else:
im_log = getLinearPolarImg(im_patch)
# im_log = shift_left_top_2_center(im_log)
im_patch = np.concatenate((im_patch, im_log), 2)
if len(im_patch.shape)==2:
im_patch = im_patch.reshape(im_patch.shape[0],im_patch.shape[1],1)
im_patch = im_patch.transpose(2, 0, 1)
im_patch = im_patch[np.newaxis, :, :, :]
im_patch = im_patch.astype(np.float32)
im_patch = torch.from_numpy(im_patch)
if cfg.CUDA:
im_patch = im_patch.cuda()
return im_patch
def get_subwindow_for_homo(self, im, pos, model_sz, original_sz, avg_chans, islog=False):
"""
args:
im: bgr based image
pos: center position
model_sz: exemplar size
s_z: original size
avg_chans: channel average
"""
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
# c = (original_sz + 1) / 2
c = (original_sz - 1) / 2
# context_xmin = round(pos[0] - c) # py2 and py3 round
context_xmin = np.floor(pos[0] - c + 0.5)
context_xmax = context_xmin + sz - 1
# context_ymin = round(pos[1] - c)
context_ymin = np.floor(pos[1] - c + 0.5)
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if len(im.shape) == 2:
im = im.reshape(im.shape[0], im.shape[1], 1)
r, c, k = im.shape
# k = 1
else:
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)
te_im = np.zeros(size, np.uint8)
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch = te_im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
else:
im_patch = im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch, (model_sz, model_sz))
if islog:
if islog == 1:
im_log = getPolarImg(im_patch)
# im_log = shift_left_top_2_center(im_log)
else:
im_log = getLinearPolarImg(im_patch)
# im_log = shift_left_top_2_center(im_log)
im_patch = np.concatenate((im_patch, im_log), 2)
if len(im_patch.shape) == 2:
im_patch = im_patch.reshape(im_patch.shape[0], im_patch.shape[1], 1)
im_patch = im_patch.transpose(2, 0, 1)
im_patch = im_patch[np.newaxis, :, :, :]
im_patch = im_patch.astype(np.float32)
im_patch = torch.from_numpy(im_patch)
if cfg.CUDA:
im_patch = im_patch.cuda()
return im_patch, (context_xmin, context_ymin, context_xmax+1, context_ymax+1)
def get_subwindow_for_sift(self, im, pos, obj_sz):
"""
args:
im: bgr based image
pos: center position
model_sz: exemplar size
s_z: original size
avg_chans: channel average
"""
#we only need to mask the non-object field.
if isinstance(pos, float):
pos = [pos, pos]
mask = np.zeros([im.shape[0], im.shape[1]])
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY).astype('uint8')
# im = np.mean(im, axis=2, keepdims=True)[:,:,0]
mask[int(int(pos[1])-obj_sz[1]//2+1):int(int(pos[1])+obj_sz[1]//2+1),int(int(pos[0])-obj_sz[0]//2+1):int(int(pos[0])+obj_sz[0]//2+1), ] = 1
im = (im * mask).astype('uint8')
# im = np.expand_dims(im, axis=2)
return im
def get_subwindow_for_SuperGlue(self, im, pos, obj_sz):
"""
args:
im: bgr based image
pos: center position
model_sz: exemplar size
s_z: original size
avg_chans: channel average
"""
#we only need to mask the non-object field.
if isinstance(pos, float):
pos = [pos, pos]
mask = np.zeros([im.shape[0], im.shape[1]])
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY).astype('uint8')
mask[int(int(pos[1])-obj_sz[1]//2+1):int(int(pos[1])+obj_sz[1]//2+1),int(int(pos[0])-obj_sz[0]//2+1):int(int(pos[0])+obj_sz[0]//2+1), ] = 1
im = (im * mask).astype('uint8')
return im
def get_subwindow_for_lisrd(self, im, pos, obj_sz):
"""
args:
im: bgr based image
pos: center position
model_sz: exemplar size
s_z: original size
avg_chans: channel average
"""
if isinstance(pos, float):
pos = [pos, pos]
mask = np.zeros([im.shape[0], im.shape[1],3])
mask[int(int(pos[1])-obj_sz[1]//2+1):int(int(pos[1])+obj_sz[1]//2+1),int(int(pos[0])-obj_sz[0]//2+1):int(int(pos[0])+obj_sz[0]//2+1), ] = 1
im = (im * mask).astype('uint8')
return im
| 10,345 | 35.95 | 147 | py |
HDN | HDN-master/hdn/tracker/hdn_tracker_proj_e2e.py | #Copyright 2021, XinruiZhan
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch
import math
from hdn.tracker.hdn_tracker import hdnTracker
from hdn.core.config import cfg
from hdn.utils.bbox import corner2center, cetner2poly, getRotMatrix, transformPoly,center2corner
from hdn.utils.point import Point
from hdn.utils.transform import img_rot_around_center, img_rot_scale_around_center, img_shift, img_shift_crop_w_h, get_hamming_window, homo_add_shift, \
rot_scale_around_center_shift_tran, img_proj_trans, shift_tran, find_homo_by_imgs_opencv_ransac, get_mask_window, decompose_affine,compose_affine_homo_RKS
from homo_estimator.Deep_homography.Oneline_DLTv1.tools.get_img_info import get_search_info, get_template_info, merge_tmp_search
import cv2
import matplotlib.pyplot as plt
class hdnTrackerHomo(hdnTracker):
def __init__(self, model):
super(hdnTrackerHomo, self).__init__(model)
self.score_size = (cfg.TRACK.INSTANCE_SIZE - cfg.TRACK.EXEMPLAR_SIZE) // \
cfg.POINT.STRIDE + 1 + cfg.TRACK.BASE_SIZE
hanning = np.hanning(self.score_size)
window = np.outer(hanning, hanning)
self.cls_out_channels = cfg.BAN.KWARGS.cls_out_channels
self.window = window.flatten()
self.points = self.generate_points(cfg.POINT.STRIDE, self.score_size)
self.p = Point(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE, cfg.TRAIN.EXEMPLAR_SIZE // 2)
self.points_lp = self.generate_points_lp(cfg.POINT.STRIDE_LP, cfg.POINT.STRIDE_LP, cfg.TRAIN.OUTPUT_SIZE_LP) #self.p.points.transpose((1, 2, 0)).reshape(-1, 2)
self.model = model
self.model.eval()
def mask_img(self, img, points ):# cx, cy, w, h, rot
mask = np.zeros([img.shape[0], img.shape[1]])
contours = [points.astype(np.int32)]
cv2.drawContours(mask, contours, 0, (1), -1)
img[np.where(mask<=0)] = 0
return img
def homo_estimate(self, tmp, search, tmp_mask):
merge_info = merge_tmp_search(tmp, search)
org_imgs = torch.Tensor(merge_info['org_imgs']).float().unsqueeze(0).cuda()
input_tensors = torch.Tensor(merge_info['input_tensors']).float().unsqueeze(0).cuda()
patch_indices = torch.Tensor(merge_info['patch_indices']).float().unsqueeze(0).cuda()
four_points = torch.Tensor(merge_info['four_points']).float().unsqueeze(0).cuda() # ([1, 2, 360, 640])
tmp_mask = torch.from_numpy(tmp_mask).cuda()
data = {}
data['org_imgs'] = org_imgs
data['input_tensors'] = input_tensors
data['h4p'] = four_points
data['patch_indices'] = patch_indices
H, homo_score, simi_score = self.model.track_proj(data, tmp_mask)
return H, homo_score, simi_score
def init(self, img, bbox, poly, gt_points, first_point):
"""
args:
img(np.ndarray): BGR image
bbox: (x, y, w, h) bbox
poly: (cx, cy, w, h, theta)
first_point: (x1, y1) first point of gt
"""
self.center_pos = np.array([poly[0],poly[1]])
self.init_rot = poly[4] # self.init_rot = 0
self.rot = poly[4] # self.rot = 0
polygon = cetner2poly(poly[:4])
tran = getRotMatrix(poly[0], poly[1], poly[4])
polygon = transformPoly(polygon, tran)
fir_dis = (polygon - first_point) ** 2
fir_dis = np.argmin(fir_dis[:,0] + fir_dis[:,1])
self.poly_shift_l = fir_dis
self.scale = 1
self.lp_shift = [0,0]
self.v = 0
self.size = np.array([poly[2], poly[3]])
self.align_size = np.array([bbox[2], bbox[3]])
# print('cotext',cfg.TRACK.CONTEXT_AMOUNT)
# window range
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
w_z_sm = self.size[0] + 0 * np.sum(self.size)#crop size
h_z_sm = self.size[1] + 0 * np.sum(self.size)
s_z = np.sqrt(w_z * h_z)
s_z_sm = np.sqrt(w_z_sm * h_z_sm)
s_z = np.floor(s_z)
s_z_sm = np.floor(s_z_sm)
self.channel_average = np.mean(img, axis=(0, 1))
# get crop
self.z_crop, self.z_crop_points = self.get_subwindow_for_homo(img, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
s_z,
self.channel_average, islog=1) # normal template
self.z_crop_sm, self.z_crop_points_sm = self.get_subwindow_for_homo(img, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
s_z_sm,
self.channel_average, islog=1) #for homo-estimation
self.model.template(self.z_crop)
self.init_img = img
self.init_crop_size = np.array([w_z, h_z])
self.init_size = self.size
self.init_s_z = s_z
self.init_s_z_sm = s_z_sm # self.init_s_z_r = s_z_r
self.init_pos = np.array([poly[0],poly[1]])
self.window_scale_factor = 1.0
self.lost = True
self.lost_count = 0
self.last_lost = False
self.init_points = np.array(gt_points).astype(np.float32)
self.init_homo_tmp, self.print_tmp_img = get_template_info(self.z_crop_sm[:, 0:3, :, :])
self.H_total = np.array([[1, 0, 0],[0, 1, 0],[0, 0, 1]], dtype=np.float32)
self.H_total_sim = np.array([[1, 0, 0],[0, 1, 0],[0, 0, 1]], dtype=np.float32)
self.uncertain = 0
self.recover_H = np.identity(3).astype('float')
def update_template(self):
img = self.init_img
img = img_rot_around_center(img, self.init_pos[0], self.init_pos[1], img.shape[1], img.shape[0], self.lp_shift[1])
self.z_crop = self.get_subwindow(img, self.init_pos,
cfg.TRACK.EXEMPLAR_SIZE,
self.init_s_z,self.channel_average, islog=1)
self.model.template(self.z_crop)
def update_template_window(self,sc):
img = self.init_img
z_crop = self.get_subwindow(img, self.init_pos,
cfg.TRACK.EXEMPLAR_SIZE,
self.init_s_z*sc ,self.channel_average, islog=1)
self.model.template(z_crop)
def get_points_by_homo(self, uni_points, H):
pred_points = H @ uni_points
pred_points = np.vsplit(pred_points,[2])[0]
pred_points = pred_points.transpose([1,0])
return pred_points
def track_new(self, fr_idx, img, gt_box=None, gt_poly=None, gt_points=None):
"""
args:
img(np.ndarray): BGR image
return:
bbox(list):[x, y, width, height]
"""
####################################################################
####---------------1.translation estimation---------------------####
if np.linalg.det(self.H_total) == 0:
self.H_total = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]).astype(np.float32) # we will do inv after, so make sure H_total is non-singular
img = cv2.warpPerspective(img, np.linalg.inv(self.H_total), (img.shape[1], img.shape[0]),
borderMode=cv2.BORDER_REPLICATE)
init_points = self.init_points.reshape(-1, 2).astype(np.float32)
s_z = self.init_s_z
cur_sz = self.init_s_z
center_pos = self.init_pos
INS_EXAM_RATIO = np.round(cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)
scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
s_x = np.floor(s_z * INS_EXAM_RATIO) #window_scale_factor may influence the accuracy due to the floor in sub window
x_crop = self.get_subwindow(img, center_pos,
cfg.TRACK.INSTANCE_SIZE,
s_x, self.channel_average)
outputs = self.model.track_new(x_crop)
score = self._convert_score(outputs['cls'])
pred_c = self._convert_c(outputs['loc_c'], self.points)
pscore = score
# TODO window penalty
pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \
self.window * cfg.TRACK.WINDOW_INFLUENCE
best_idx = np.argmax(pscore)
stop_update_flag = 0
if pscore[best_idx] < 0.05 :
center = [0,0]
stop_update_flag = 1
else:
center = pred_c[:, best_idx] / scale_z
cx = center[0] + center_pos[0]
cy = center[1] + center_pos[1]
delta_cx = center[0]
delta_cy = center[1]
self.center_pos = np.array([cx, cy])
####---------------1.translation estimation---------------------####
####################################################################
####################################################################
####--------------------2.scale-rot estimation--------------------------####
# lp_result
x_crop_moved = self.get_subwindow(img, self.center_pos,
cfg.TRACK.INSTANCE_SIZE,
s_x, self.channel_average)
#if update template
outputs = self.model.track_new_lp(x_crop_moved, [0,0])
score_lp = self._convert_score(outputs['cls_lp'])
peak_map = score_lp.copy()
peak_idx = np.argmax(peak_map)
peak_map = np.zeros([13*13])
peak_map[peak_idx] = 1
pred_center_lp = self._convert_logpolar_simi(outputs['loc_lp'], self.points_lp, peak_idx, fr_idx)
pscore_lp = score_lp
best_idx_lp = np.argmax(pscore_lp)
sim_lp = pred_center_lp[:, best_idx_lp]
if stop_update_flag or pscore_lp[best_idx_lp] < 0.25:
sim_lp = [1, 1, 0, 0]
best_score = score[best_idx]
scale_delta = sim_lp[0] * cur_sz / self.init_s_z #actually if there is no big delta, we can just sample the patch the same size as template.
rot_delta = sim_lp[2]
#similarity H
H_sim = rot_scale_around_center_shift_tran(cx, cy, rot_delta, scale_delta, delta_cx, delta_cy)#(cx, cy, rot, scale, sx, sy)
self.rot += rot_delta
self.scale *= scale_delta
####--------------------2.lp estimation--------------------------####
######################################################################
######################################################################
####---------------3.residual estimation------------------------####
rot_img_homo = img_rot_around_center(img, cx, cy, img.shape[1], img.shape[0], -rot_delta)
x_crop_homo, crop_points = self.get_subwindow_for_homo(rot_img_homo, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
self.init_s_z_sm * scale_delta,#self.init_s_z_sm * scale_delta
self.channel_average) # TODO reduce the size to 127*127
crop_points_w = self.z_crop_points_sm[2] - self.z_crop_points_sm[0] + 1
crop_points_h = self.z_crop_points_sm[3] - self.z_crop_points_sm[1] + 1
resize_crop_w = 127
resize_crop_h = 127
#mask window
sc = resize_crop_w/crop_points_w
mask_tmp = get_mask_window(self.size[0]*sc, self.size[1]*sc, self.init_rot, \
cfg.TRAIN.EXEMPLAR_SIZE/2, cfg.TRAIN.EXEMPLAR_SIZE/2, cfg.TRAIN.EXEMPLAR_SIZE, cfg.TRAIN.EXEMPLAR_SIZE)#w, h, rot, sx, sy, out_size_w, out_size_h
homo_search_img, print_search_img = get_search_info(x_crop_homo[:, 0:3, :, :])
# #homo estimation
H_hm_comp = np.identity(3)
for i in range(1): # iterate
H_hm, homo_score, simi_score = self.homo_estimate(self.init_homo_tmp, homo_search_img, mask_tmp)
homo_score = homo_score.detach().cpu().numpy()
H_hm = H_hm.detach().cpu().squeeze(0).numpy()
H_hm = np.linalg.inv(H_hm)
H_hm = (1.0 / H_hm.item(8)) * H_hm
homo_search_img = np.expand_dims(cv2.warpPerspective(homo_search_img[0], np.linalg.inv(H_hm), (127,127),
borderMode=cv2.BORDER_REPLICATE), 0)
H_hm_comp = H_hm_comp @ H_hm
scale_H_1 = np.array([[resize_crop_w/crop_points_w , 0, 0],
[0, resize_crop_h/crop_points_h, 0],
[0, 0, 1]]).astype(np.float32)#recover the square to rect
H_hm_comp = np.linalg.inv(scale_H_1) @ H_hm_comp @ scale_H_1
shift_H = np.array([[1, 0, -self.z_crop_points_sm[0]],
[0, 1, -self.z_crop_points_sm[1]],
[0, 0, 1]]).astype(np.float32)#recover the square to rect
H_hm_comp = np.linalg.inv(shift_H) @ H_hm_comp @ shift_H
H_homo = H_hm_comp
#Total Homography
if homo_score > 2.5:# in got 0.5x could be bad
H = self.H_total @ H_sim
else:
H = self.H_total @ H_sim @ H_homo# Idon't know why it fails
H = (1.0 / H.item(8)) * H
self.H_total = H
####---------------3.residual estimation------------------------####
######################################################################
#points
pred_points = cv2.perspectiveTransform(np.expand_dims(init_points, 0), self.H_total)[0]
max_p = np.max(pred_points, 0)
min_p = np.min(pred_points, 0)
align_bbox = [min_p[0], min_p[1],
max_p[0] - min_p[0], max_p[1] - min_p[1]]
self.align_size = [align_bbox[2],align_bbox[3]] #real
return {
'bbox_aligned':align_bbox,
'best_score': best_score,
'polygon': pred_points,
'points': pred_points,
'bbox': align_bbox
}
| 14,392 | 49.149826 | 180 | py |
HDN | HDN-master/hdn/tracker/hdn_tracker.py | #Copyright 2021, XinruiZhan
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch
import math
from hdn.core.config import cfg
from hdn.tracker.base_tracker import SiameseTracker
from hdn.utils.bbox import corner2center, cetner2poly, getRotMatrix, transformPoly,center2corner
from hdn.utils.point import Point, generate_points, generate_points_lp
from hdn.utils.transform import img_rot_around_center, img_rot_scale_around_center, img_shift, img_shift_crop_w_h, get_hamming_window
import cv2
import matplotlib.pyplot as plt
class hdnTracker(SiameseTracker):
def __init__(self, model):
super(hdnTracker, self).__init__()
self.score_size = (cfg.TRACK.INSTANCE_SIZE - cfg.TRACK.EXEMPLAR_SIZE) // \
cfg.POINT.STRIDE + 1 + cfg.TRACK.BASE_SIZE
hanning = np.hanning(self.score_size)
window = np.outer(hanning, hanning)
self.cls_out_channels = cfg.BAN.KWARGS.cls_out_channels
self.window = window.flatten()
self.points = generate_points(cfg.POINT.STRIDE, self.score_size)
self.p = Point(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE, cfg.TRAIN.EXEMPLAR_SIZE // 2)
self.points_lp = generate_points_lp(cfg.POINT.STRIDE_LP, cfg.POINT.STRIDE_LP, cfg.TRAIN.OUTPUT_SIZE_LP) #self.p.points.transpose((1, 2, 0)).reshape(-1, 2)
self.model = model
def generate_points(self, stride, size):
ori = - (size // 2) * stride # -96
x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)],
[ori + stride * dy for dy in np.arange(0, size)])
points = np.zeros((size * size, 2), dtype=np.float32)
points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten()
return points
def generate_points_lp(self, stride_w, stride_h, size):
# ori = - (size // 2) * stride # -96
ori_x = - (size // 2) * stride_w # -96
ori_y = - (size // 2) * stride_h # -96
x, y = np.meshgrid([ori_x + stride_w * dx for dx in np.arange(0, size)],
[ori_y + stride_h * dy for dy in np.arange(0, size)])
points = np.zeros((size * size, 2), dtype=np.float32)
points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten()
return points
def _convert_logpolar_simi(self, delta, point, peak_idx, idx=0):
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
delta = delta.detach().cpu().numpy()
# rotation
delta[2, :] = point[:, 1] - delta[2, :] * cfg.POINT.STRIDE_LP
delta[3, :] = point[:, 1] + delta[3, :] * cfg.POINT.STRIDE_LP
delta[0, :] = point[:, 0] - delta[0, :] * cfg.POINT.STRIDE_LP
delta[1, :] = point[:, 0] + delta[1, :] * cfg.POINT.STRIDE_LP
scale = delta[0, :]
rotation = delta[2, :]
rotation = rotation * (2 * np.pi / cfg.TRAIN.EXEMPLAR_SIZE)
mag = np.log(cfg.TRAIN.EXEMPLAR_SIZE / 2) / cfg.TRAIN.EXEMPLAR_SIZE
delta[0, :] = np.exp(scale * mag)
delta[1, :] = delta[0, :]
delta[2, :] = rotation
return delta
def _convert_logpolar_simi_in_lp(self, delta, point, peak_idx, idx=0):
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
delta = delta.detach().cpu().numpy()
# rotation
delta[2, :] = point[:, 1] - delta[2, :] * cfg.POINT.STRIDE_LP
delta[3, :] = point[:, 1] + delta[3, :] * cfg.POINT.STRIDE_LP
delta[0, :] = point[:, 0] - delta[0, :] * cfg.POINT.STRIDE_LP
delta[1, :] = point[:, 0] + delta[1, :] * cfg.POINT.STRIDE_LP
return delta
def _convert_score(self, score):
if self.cls_out_channels == 1:
score = score.permute(1, 2, 3, 0).contiguous().view(-1)
score = score.sigmoid().detach().cpu().numpy()
else:
score = score.permute(1, 2, 3, 0).contiguous().view(self.cls_out_channels, -1).permute(1, 0)
score = score.softmax(1).detach()[:, 1].cpu().numpy()
return score
def mask_img(self, img, points ):# cx, cy, w, h, rot
mask = np.zeros([img.shape[0], img.shape[1]])
contours = [points.astype(np.int32)]
cv2.drawContours(mask, contours, 0, (1), -1)
img[np.where(mask<=0)] = 0
return img
def get_window_scale_coef(self,region):
region = region.reshape(8,-1)
x1 = min(region[0::2])
x2 = max(region[0::2])
y1 = min(region[1::2])
y2 = max(region[1::2])
A1 = np.linalg.norm(region[0:2] - region[2:4]) * \
np.linalg.norm(region[2:4] - region[4:6])
A2 = (x2 - x1) * (y2 - y1)
s = np.sqrt(A1 / A2)
return s
def init(self, img, bbox, poly, first_point):
"""
args:
img(np.ndarray): BGR image
bbox: (x, y, w, h) bbox
poly: (cx, cy, w, h, theta)
first_point: (x1, y1) first point of gt
"""
# self.center_pos = np.array([bbox[0]+(bbox[2]-1)/2,
# bbox[1]+(bbox[3]-1)/2])
self.center_pos = np.array([poly[0],poly[1]])
self.init_rot = poly[4] # self.init_rot = 0
self.rot = poly[4] # self.rot = 0
polygon = cetner2poly(poly[:4])
tran = getRotMatrix(poly[0], poly[1], poly[4])
polygon = transformPoly(polygon, tran)
self.scale_coeff = self.get_window_scale_coef(polygon)
fir_dis = (polygon - first_point) ** 2
fir_dis = np.argmin(fir_dis[:,0] + fir_dis[:,1])
self.poly_shift_l = fir_dis
self.scale = 1
self.lp_shift = [0,0]
self.v = 0
self.size = np.array([poly[2], poly[3]])
self.align_size = np.array([bbox[2], bbox[3]])
# calculate z crop size
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = np.sqrt(w_z * h_z)
s_z = np.floor(s_z)
# calculate channle average
self.channel_average = np.mean(img, axis=(0, 1))
# get crop
z_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
s_z, self.channel_average, islog=1)
self.model.template(z_crop)
self.init_img = img
self.init_crop_size = np.array([w_z, h_z])
self.init_size = self.size
self.init_s_z = s_z
self.init_pos = np.array([poly[0],poly[1]])
self.window_scale_factor = 1.0
self.lost = True
self.lost_count = 0
self.last_lost = False
def update_template(self):
img = self.init_img
img = img_rot_around_center(img, self.init_pos[0], self.init_pos[1], img.shape[1], img.shape[0], self.lp_shift[1])
z_crop = self.get_subwindow(img, self.init_pos,
cfg.TRACK.EXEMPLAR_SIZE,
self.init_s_z,self.channel_average, islog=1)
self.model.template(z_crop)
def update_template_window(self,sc):
img = self.init_img
z_crop = self.get_subwindow(img, self.init_pos,
cfg.TRACK.EXEMPLAR_SIZE,
self.init_s_z*sc ,self.channel_average, islog=1)
self.model.template(z_crop)
def track_new(self, fr_idx, img, gt_box, gt_poly):
"""
args:
img(np.ndarray): BGR image
return:
bbox(list):[x, y, width, height]
"""
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
if w_z > img.shape[0]/2:
self.window_scale_factor = 1
self.window_scale_factor = 1
s_z = np.floor(np.sqrt(w_z * h_z))
INS_EXAM_RATIO = np.round(cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)
scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
s_x = np.floor(s_z * INS_EXAM_RATIO * self.window_scale_factor) #window_scale_factor may influence the accuracy due to the floor in sub window
self.window_scale_factor = s_x / (s_z * INS_EXAM_RATIO)
#without rot template(if update the template state)
x_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.INSTANCE_SIZE,
s_x, self.channel_average)
outputs = self.model.track_new(x_crop) # add log-polar translation
score = self._convert_score(outputs['cls'])
pred_c = self._convert_c(outputs['loc_c'], self.points)
#*--------------------plot search, cls_label & response map
pscore = score
# window penalty
pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \
self.window * cfg.TRACK.WINDOW_INFLUENCE
best_idx = np.argmax(pscore)
stop_update_flag = 0
if pscore[best_idx] < 0.05 :
center = [0,0]
stop_update_flag = 1
else:
center = pred_c[:, best_idx] / scale_z * self.window_scale_factor
new_window_scale_factor = 1
if pscore[best_idx] < cfg.TRACK.SCALE_SCORE_THRESH:
new_window_scale_factor = 1.5
if self.lost_count ==0:
self.last_lost = True
self.lost_count+=1
if not self.last_lost and self.lost_count < 5:
self.lost_count = 0
self.last_lost = False
if fr_idx == 1:
self.v = math.sqrt(center[0] * center[0] + center[1] * center[1])
else:
self.v = (self.v + math.sqrt(center[0] * center[0] + center[1] * center[1])) / 2
cx = center[0] + self.center_pos[0]
cy = center[1] + self.center_pos[1]
# smooth bbox
self.center_pos = np.array([cx, cy])
# lp_result
x_crop_moved = self.get_subwindow(img, self.center_pos,
cfg.TRACK.INSTANCE_SIZE,
s_x, self.channel_average)
#if update template
outputs = self.model.track_new_lp(x_crop_moved, [0,0])
#if not update template
score_lp = self._convert_score(outputs['cls_lp'])
peak_map = score_lp.copy()
peak_idx = np.argmax(peak_map)
pred_center_lp = self._convert_logpolar_simi(outputs['loc_lp'], self.points_lp, peak_idx, fr_idx)
#*----------------------------------plt lp map
pscore_lp = score_lp
best_idx_lp = np.argmax(score_lp)
sim_lp = pred_center_lp[:, best_idx_lp]
if stop_update_flag or pscore_lp[best_idx_lp] < 0.25:
sim_lp = [1, 1, 0, 0]
width = self.size[0] * sim_lp[0] * self.window_scale_factor
height = self.size[1] * sim_lp[1] * self.window_scale_factor
width = max(10*self.init_size[0]/self.init_size[1], min(width, img.shape[:2][1]))
height = max(10, min(height, img.shape[:2][0]))
# clip boundary
self.size = np.array([width, height])
self.lp_shift[1] += sim_lp[2]
self.rot += sim_lp[2]
self.scale = width / self.init_size[0]
bbox = [cx - width / 2,
cy - height / 2,
width,
height]
if self.rot >= 2 *math.pi :
self.rot -= math.pi * 2
self.lp_shift[1] -= math.pi * 2
elif self.rot < -2*math.pi :
self.rot += math.pi * 2
self.lp_shift[1] += math.pi * 2
best_score = score[best_idx]
polygon = cetner2poly([cx, cy, width, height])
tran = getRotMatrix(cx, cy, self.rot)
polygon = transformPoly(polygon, tran)
polygon = np.roll(polygon, 4 - self.poly_shift_l, 0)
max_p = np.max(polygon, 0)
min_p = np.min(polygon, 0)
align_bbox = [min_p[0], min_p[1],
max_p[0] - min_p[0], max_p[1] - min_p[1]]
self.align_size = [align_bbox[2],align_bbox[3]] #real
bbox_align = align_bbox
# update template state
self.update_template()
self.window_scale_factor = new_window_scale_factor
return {
'bbox': bbox,
'bbox_aligned':bbox_align,
'best_score': best_score,
'rot': self.rot,
'polygon': polygon,
}
| 12,537 | 40.379538 | 162 | py |
HDN | HDN-master/hdn/core/xcorr.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.functional as F
def xcorr_slow(x, kernel):
"""for loop to calculate cross correlation, slow version
"""
batch = x.size()[0]
out = []
for i in range(batch):
px = x[i]
pk = kernel[i]
px = px.view(1, -1, px.size()[1], px.size()[2])
pk = pk.view(1, -1, pk.size()[1], pk.size()[2])
po = F.conv2d(px, pk)
out.append(po)
out = torch.cat(out, 0)
return out
def xcorr_fast(x, kernel):
"""group conv2d to calculate cross correlation, fast version
"""
batch = kernel.size()[0]
pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3])
px = x.view(1, -1, x.size()[2], x.size()[3])
po = F.conv2d(px, pk, groups=batch)
po = po.view(batch, -1, po.size()[2], po.size()[3])
return po
def xcorr_depthwise(x, kernel):
"""depthwise cross correlation
"""
batch = kernel.size(0)
channel = kernel.size(1)
x = x.view(1, batch*channel, x.size(2), x.size(3))
kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3))
out = F.conv2d(x, kernel, groups=batch*channel)
out = out.view(batch, channel, out.size(2), out.size(3))
return out
def xcorr_depthwise_circular(x, kernel):
"""depthwise cross correlation with circular
This corr is specular for logpolar coordinates
"""
batch = kernel.size(0)
channel = kernel.size(1)
# padding the input data
x = F.pad(x, (0, 0, x.size(2)//2, x.size(2)//2), "circular") # rotation is circular
x = F.pad(x, (x.size(3)//2, x.size(3)//2, 0, 0), "replicate") # polar coordinate lacks info, so use the nearby data
x = x.view(1, batch*channel, x.size(2), x.size(3))
kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3))
out = F.conv2d(x, kernel, groups=batch*channel)
out = out.view(batch, channel, out.size(2), out.size(3)) # the size should be the same as input x
return out
| 2,109 | 33.032258 | 120 | py |
HDN | HDN-master/hdn/models/iou_loss.py | import torch
from torch import nn
class IOULoss(nn.Module):
def __init__(self, loc_loss_type):
super(IOULoss, self).__init__()
self.loc_loss_type = loc_loss_type
def forward(self, pred, target, weight=None):
pred_left = pred[:, 0]
pred_top = pred[:, 1]
pred_right = pred[:, 2]
pred_bottom = pred[:, 3]
target_left = target[:, 0]
target_top = target[:, 1]
target_right = target[:, 2]
target_bottom = target[:, 3]
pred_area = (pred_left + pred_right) * (pred_top + pred_bottom)
target_area = (target_left + target_right) * (target_top + target_bottom)
w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right)
g_w_intersect = torch.max(pred_left, target_left) + torch.max(pred_right, target_right)
h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top)
g_h_intersect = torch.max(pred_bottom, target_bottom) + torch.max(pred_top, target_top)
ac_uion = g_w_intersect * g_h_intersect + 1e-7
area_intersect = w_intersect * h_intersect
area_union = target_area + pred_area - area_intersect
ious = (area_intersect + 1.0) / (area_union + 1.0)
gious = ious - (ac_uion - area_union) / ac_uion
if self.loc_loss_type == 'iou':
losses = -torch.log(ious)
elif self.loc_loss_type == 'linear_iou':
losses = 1 - ious
elif self.loc_loss_type == 'giou':
losses = 1 - gious
else:
raise NotImplementedError
if weight is not None and weight.sum() > 0:
return (losses * weight).sum() / weight.sum()
else:
assert losses.numel() != 0
return losses.mean()
linear_iou = IOULoss(loc_loss_type='linear_iou')
| 1,855 | 35.392157 | 95 | py |
HDN | HDN-master/hdn/models/model_builder_e2e_unconstrained_v2.py | #Copyright 2021, XinruiZhan
'''
Designed for end-to-end homo-estimation.
unconstrained means we whether dataset give us label we can train the model.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from hdn.core.config import cfg
from hdn.models.loss import select_cross_entropy_loss, \
select_l1_loss, select_l1_loss_c, \
select_l1_loss_lp, \
select_xr_focal_fuse_smooth_l1_loss_top_k, kalyo_l1_loss
from hdn.models.loss import select_xr_focal_fuse_smooth_l1_loss
from hdn.models.backbone import get_backbone
from hdn.models.head import get_ban_head
from hdn.models.neck import get_neck
from hdn.models.logpolar import STN_Polar, getPolarImg, Polar_Pick, STN_LinearPolar
import matplotlib.pyplot as plt
from hdn.utils.point import Point
from homo_estimator.Deep_homography.Oneline_DLTv1.models.homo_model_builder import HomoModelBuilder, normMask
from hdn.utils.transform import combine_affine_c0, combine_affine_lt0, combine_affine_c0_v2
from hdn.utils.point import generate_points, generate_points_lp, lp_pick, get_center
from homo_estimator.Deep_homography.Oneline_DLTv1.utils import DLT_solve
from homo_estimator.Deep_homography.Oneline_DLTv1.utils import transform as Homo_STN
criterion_l2 = nn.MSELoss(reduce=True, size_average=True)
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=1, reduce=False, size_average=False)
class ModelBuilder(nn.Module):
# @profile
def __init__(self):
super(ModelBuilder, self).__init__()
# build backbone
self.backbone = get_backbone(cfg.BACKBONE.TYPE,
**cfg.BACKBONE.KWARGS) #200M cpu mem
self.logpolar_instance = STN_Polar(cfg.TRACK.INSTANCE_SIZE)
self.getPolar = Polar_Pick()
# build adjust layer
if cfg.ADJUST.ADJUST:
self.neck = get_neck(cfg.ADJUST.TYPE,
**cfg.ADJUST.KWARGS)
self.neck_lp = get_neck(cfg.ADJUST.TYPE,
**cfg.ADJUST.KWARGS, cut=False)
self.score_size = (cfg.TRACK.INSTANCE_SIZE - cfg.TRACK.EXEMPLAR_SIZE) // \
cfg.POINT.STRIDE + 1 + cfg.TRACK.BASE_SIZE
self.cls_out_channels = cfg.BAN.KWARGS.cls_out_channels
self.points = generate_points(cfg.POINT.STRIDE, self.score_size)
self.p = Point(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE, cfg.TRAIN.EXEMPLAR_SIZE // 2)
self.points_lp = generate_points_lp(cfg.POINT.STRIDE_LP, cfg.POINT.STRIDE_LP,
cfg.TRAIN.OUTPUT_SIZE_LP) # self.p.points.transpose((1, 2, 0)).reshape(-1, 2)
# build ban head
# print('cfg',cfg)
if cfg.BAN.BAN:
self.head = get_ban_head(cfg.BAN.TYPE,
**cfg.BAN.KWARGS)
self.head_lp = get_ban_head('MultiCircBAN', **cfg.BAN.KWARGS)
self.hm_net = HomoModelBuilder(pretrained=True)
def _convert_score(self, score):
score = score.contiguous().view(score.shape[0], self.cls_out_channels, -1).permute(0, 2, 1) # [28, 625, 2] or [28, 169, 2]
score = score[:,:,1]
return score
def _convert_c(self, delta, point):
delta = delta.contiguous().view(delta.shape[0], 2, -1) #(28, 2, 625)
point = torch.from_numpy(point).cuda() #(625, 2)
delta[:, 0, :] = point[:, 0] - delta[:, 0, :]*8
delta[:, 1, :] = point[:, 1] - delta[:, 1, :]*8
return delta
def feature_extractor(self, x):
xf = self.backbone(x)
return xf #+ xf_lp
def template(self, z):
z_lp = z[:, 3:6, :, :]
z = z[:, 0:3, :, :]
zf = self.feature_extractor(z)
zf_lp = self.feature_extractor(z_lp)
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
zf_lp = self.neck_lp(zf_lp) # please think about this part, not sure whether is proper.
self.zf = zf
self.zf_lp = zf_lp
def update_template(self, z, rot):
zf = self.feature_extractor(z)
polar = torch.zeros(2).unsqueeze(0).cuda() #assuming the target has been moved to the center in the img.
z_lp,_ = self.logpolar_instance(z, polar, rot)
zf_lp = self.feature_extractor(z_lp)
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
zf_lp = self.neck_lp(zf_lp)
self.zf = zf
self.zf_lp = zf_lp
def track(self, x, delta=[0,0]):
xf = self.feature_extractor(x)
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
cls, loc, cls_c, loc_c = self.head(self.zf, xf)
polar = self.getPolar.get_polar_from_two_para_loc(cls, loc)
x_lp, _ = self.logpolar_instance(x, polar, delta)
# xxq
xf_lp = self.feature_extractor(x_lp)
if cfg.ADJUST.ADJUST:
xf_lp = self.neck_lp(xf_lp)
cls_lp, loc_lp = self.head_lp(self.zf_lp, xf_lp)
return {
'cls': cls,
'loc': loc,
'cls_c': cls_c,
'loc_c': loc_c,
'cls_lp': cls_lp,
'loc_lp': loc_lp
}
def track_new(self, x, delta=[0,0]):
# x: [1, 3, 255, 255]
xf = self.feature_extractor(x)
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
cls, loc_c = self.head(self.zf, xf)
return {
'cls': cls,
'loc_c': loc_c,
}
def track_new_lp(self, x, delta=[0,0]):
polar = torch.zeros(2).unsqueeze(0).cuda() #assuming the target has been moved to the center in the img.
x_lp, grid = self.logpolar_instance(x, polar, delta)
xf_lp = self.feature_extractor(x_lp)
if cfg.ADJUST.ADJUST:
xf_lp = self.neck_lp(xf_lp)
cls_lp, loc_lp = self.head_lp(self.zf_lp, xf_lp)
return {
'x_lp': x_lp,
'cls_lp': cls_lp,
'loc_lp': loc_lp,
'grid': grid,
}
def track_proj(self, data, tmp_mask):
org_imgs = data['org_imgs']
input_tensors = data['input_tensors']
h4p = data['h4p']
patch_inds = data['patch_indices']
batch_size, _, img_h, img_w = org_imgs.size()
_, _, patch_size_h, patch_size_w = input_tensors.size()
y_t = torch.arange(0, batch_size * img_w * img_h,
img_w * img_h)
batch_inds_tensor = y_t.unsqueeze(1).expand(y_t.shape[0], patch_size_h * patch_size_w).reshape(-1)
M_tensor = torch.tensor([[img_w / 2.0, 0., img_w / 2.0],
[0., img_h / 2.0, img_h / 2.0],
[0., 0., 1.]])
if torch.cuda.is_available():
M_tensor = M_tensor.cuda()
batch_indices_tensor = batch_inds_tensor.cuda()
# Inverse of M
#fixme
patch_1 = self.hm_net.ShareFeature(input_tensors[:, :1, ...])
patch_2 = self.hm_net.ShareFeature(input_tensors[:, 1:, ...])
#without mask
patch_1_res = patch_1
patch_2_res = patch_2
x = torch.cat((patch_1_res, patch_2_res), dim=1)
x = self.hm_net.backbone(x)
#fixme
x = self.hm_net.avgpool(x)
x = x.view(x.size(0), -1)
#fixme
x = self.hm_net.fc(x)
H_mat = DLT_solve(h4p, x).squeeze(1) #
w_h_scala = torch.tensor(63.5)
M_tensor = torch.tensor([[w_h_scala, 0., w_h_scala],
[0., w_h_scala, w_h_scala],
[0., 0., 1.]])
if torch.cuda.is_available():
M_tensor = M_tensor.cuda()
batch_indices_tensor = batch_inds_tensor.cuda()
M_tile = M_tensor.unsqueeze(0).expand(batch_size, M_tensor.shape[-2], M_tensor.shape[-1])
# Inverse of M
M_tensor_inv = torch.inverse(M_tensor)
M_tile_inv = M_tensor_inv.unsqueeze(0).expand(batch_size, M_tensor_inv.shape[-2],
M_tensor_inv.shape[-1])
pred_I2 = Homo_STN(patch_size_h, patch_size_w, M_tile_inv, H_mat, M_tile,
org_imgs[:, :1, ...], patch_inds, batch_indices_tensor)
pred_I2_CnnFeature = self.hm_net.ShareFeature(pred_I2)
delta_mat = torch.abs(patch_2 - pred_I2_CnnFeature)[0][0]
similarity_norm = torch.sum(delta_mat) / (127*127) # wo mask
delta_sim_mat = torch.abs(patch_2 - patch_1)[0][0]
similarity_norm_simi = torch.sum(delta_sim_mat) / (127*127) # wo mask
return H_mat, similarity_norm, similarity_norm_simi
def log_softmax(self, cls):
if cfg.BAN.BAN:
cls = cls.permute(0, 2, 3, 1).contiguous()
cls = F.log_softmax(cls, dim=3)
return cls
def softmax(self, cls):
if cfg.BAN.BAN:
#cls [28, 2, 25, 25]
cls = cls.permute(0, 2, 3, 1).contiguous()
cls = F.softmax(cls, dim=3)
return cls
def stn_with_theta(self, x, theta, size):
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, size)
x = F.grid_sample(x, grid)
return x
def make_mesh(self, patch_w, patch_h):
x_flat = np.arange(0, patch_w)
x_flat = x_flat[np.newaxis, :]
y_one = np.ones(patch_h)
y_one = y_one[:, np.newaxis]
x_mesh = np.matmul(y_one, x_flat)
y_flat = np.arange(0, patch_h)
y_flat = y_flat[:, np.newaxis]
x_one = np.ones(patch_w)
x_one = x_one[np.newaxis, :]
y_mesh = np.matmul(y_flat, x_one)
return x_mesh, y_mesh
def getPatchFromFullimg(self, patch_size_h, patch_size_w, patchIndices, batch_indices_tensor, img_full):
num_batch, num_channels, height, width = img_full.size()
warped_images_flat = img_full.reshape(-1)
patch_indices_flat = patchIndices.reshape(-1)
pixel_indices = patch_indices_flat.long() + batch_indices_tensor
mask_patch = torch.gather(warped_images_flat, 0, pixel_indices)
mask_patch = mask_patch.reshape([num_batch, 1, patch_size_h, patch_size_w])
return mask_patch
def normMask(self, mask, strenth=0.5):
"""
:return: to attention more region
"""
batch_size, c_m, c_h, c_w = mask.size()
max_value = mask.reshape(batch_size, -1).max(1)[0]
max_value = max_value.reshape(batch_size, 1, 1, 1)
mask = mask / (max_value * strenth)
mask = torch.clamp(mask, 0, 1)
return mask
# @profile
def get_homo_data(self,warped_template, warped_search, if_pos, if_unsup, template_window=None, search_window=None):
_WIDTH, _HEIGHT = 127, 127
_patch_w, _patch_h = 127, 127
_rho = 0
_x_mesh, _y_mesh = self.make_mesh(_patch_w, _patch_h)
template_mean = torch.mean(warped_template, 1, keepdim=True)
search_mean = torch.mean(warped_search, 1, keepdim=True)
org_imges = torch.cat([template_mean, search_mean], dim=1).float()
# input_tesnors = org_imges.clone()
h, w = org_imges.shape[2], org_imges.shape[3]
batch_size = org_imges.shape[0]
x, y = 0, 0
y_t_flat = np.reshape(_y_mesh, [-1])
x_t_flat = np.reshape(_x_mesh, [-1])
patch_indices = torch.from_numpy((y_t_flat + y) * w + (x_t_flat + x)).unsqueeze(0).repeat(batch_size, 1).cuda()
top_left_point = (x, y)
bottom_left_point = (x, y + _patch_h)
bottom_right_point = (_patch_w + x, _patch_h + y)
top_right_point = (x + _patch_w, y)
four_points = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]
four_points = np.reshape(four_points, (-1))
h4p = torch.from_numpy(four_points).unsqueeze(0).repeat(batch_size, 1).float().cuda()
input_tensors = org_imges[:, :, y: y + _patch_h, x: x + _patch_w]
I = org_imges[:, 0, ...]
I = I[:, np.newaxis, ...]
I2_ori_img = org_imges[:, 1, ...]
I2_ori_img = I2_ori_img[:, np.newaxis, ...]
I1 = input_tensors[:, 0, ...]
I1 = I1[:, np.newaxis, ...]
I2 = input_tensors[:, 1, ...]
I2 = I2[:, np.newaxis, ...]
if torch.cuda.is_available():
input_tensors = input_tensors.cuda()
patch_indices = patch_indices.cuda()
h4p = h4p.cuda()
org_imges = org_imges.cuda()
data = {}
data['org_imgs'] = org_imges
data['input_tensors'] = input_tensors
data['h4p'] = h4p
data['patch_indices'] = patch_indices
data['template_window'] = template_window
data['search_window'] = search_window
data['if_pos'] = if_pos
data['if_unsup'] = if_unsup
return data
def get_simi_data(self,template, search, warped_search):
_WIDTH, _HEIGHT = 127, 127
_patch_w, _patch_h = 127, 127
_rho = 0
_x_mesh, _y_mesh = self.make_mesh(_patch_w, _patch_h)
template_mean = torch.mean(template, 1, keepdim=True)
search_mean = torch.mean(search, 1, keepdim=True)
warped_search_mean = torch.mean(warped_search, 1, keepdim=True)
return template_mean, search_mean, warped_search_mean
def forward(self, data):
""" only used in training
"""
##get data from dataset
template = data['template'].cuda()
template_lp = data['template_lp'].cuda()
search = data['search'].cuda()
label_cls = data['label_cls'].cuda()
label_loc_c = data['label_loc_c'].cuda()
label_cls_lp = data['label_cls_lp'].cuda()
label_loc_lp = data['label_loc_lp'].cuda()
template_poly = data['template_poly'].cuda()
search_poly = data['search_poly'].cuda()
search_hm = data['search_hm'].cuda()
template_hm = data['template_hm'].cuda()
template_window = data['template_window'].cuda().float()
search_window = data['search_window'].cuda().float()
if_pos = data['if_pos'].cuda().float()
if_neg = if_pos.eq(0)
if_unsup = data['if_unsup'].cuda().float()
if_sup = if_unsup.eq(0)
temp_cx = data['temp_cx'].cuda().float()
temp_cy = data['temp_cy'].cuda().float()
tmp = label_cls.unsqueeze(1)
tmp = tmp.expand(tmp.size(0), 2, tmp.size(2), tmp.size(3))
batch_sz = cfg.TRAIN.BATCH_SIZE
cur_device = search.device
# get feature
zf = self.feature_extractor(template)
zf_lp = self.feature_extractor(template_lp)
xf = self.feature_extractor(search)
##cut feature
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
xf = self.neck(xf)
##normal head
cls, loc = self.head(zf, xf)
## log-polar branch
#fixme e2e or separate
# polar = self.getPolar(tmp, label_loc)#sample according to the label(original method)
polar = self.getPolar.get_polar_from_two_para_loc(cls, loc*cfg.POINT.STRIDE)
# # polar = self.getPolar(cls, loc*cfg.POINT.STRIDE) #sample according to the cls_map. we div the stride before.
x_lp, _ = self.logpolar_instance(search, polar)#[8, 3, 127, 127]
xf_lp = self.feature_extractor(x_lp)
#neck_lp
if cfg.ADJUST.ADJUST:
zf_lp = self.neck_lp(zf_lp)
xf_lp = self.neck_lp(xf_lp)
#head_lp
cls_lp, loc_lp = self.head_lp(zf_lp, xf_lp)#cls_lp :[batch_sz,2,13,13],loc_lp:[batch_sz,4,13,13]
scale, rot = lp_pick(cls_lp, loc_lp, cfg.BAN.KWARGS.cls_out_channels, cfg.POINT.STRIDE, cfg.POINT.STRIDE_LP, cfg.TRAIN.OUTPUT_SIZE_LP, cfg.TRAIN.EXEMPLAR_SIZE)
##homo-estimator
if cfg.TRAIN.OBJ == 'ALL' or cfg.TRAIN.OBJ == 'HOMO':
scale_h = True
scale_ones = torch.ones([batch_sz]).float().cuda()
rot_zero = torch.zeros([batch_sz]).float().cuda()
polar_zero = torch.zeros([batch_sz,2]).float().cuda()
#FIXME use SIM-estimator
affine_m = combine_affine_c0_v2(cfg.TRACK.EXEMPLAR_SIZE/2, cfg.TRACK.EXEMPLAR_SIZE/2, polar, scale, rot, scale_h, cfg.TRACK.INSTANCE_SIZE, cfg.TRACK.EXEMPLAR_SIZE)#warp the search image to 127*127 (self, nm_shift, scale, rot, scale_h, in_sz, out_sz):
affine_m_lt0 = combine_affine_lt0(cfg.TRACK.EXEMPLAR_SIZE/2, cfg.TRACK.EXEMPLAR_SIZE/2, polar, 1/scale, -rot, cfg.TRACK.INSTANCE_SIZE, cfg.TRACK.EXEMPLAR_SIZE)# warp the poly in search to poly in 127*127
affine_m_c_aug = combine_affine_c0_v2(temp_cx, temp_cy, polar_zero, scale_ones, rot_zero, scale_h, cfg.TRACK.INSTANCE_SIZE, cfg.TRACK.EXEMPLAR_SIZE)# warp the search accroding to the ,considering the template location
affine_m_lt0_aug = combine_affine_lt0(temp_cx, temp_cy, polar_zero, scale_ones, rot_zero, cfg.TRACK.INSTANCE_SIZE, cfg.TRACK.EXEMPLAR_SIZE)
grid_size = torch.Size([cfg.TRAIN.BATCH_SIZE, 1, cfg.TRAIN.EXEMPLAR_SIZE, cfg.TRAIN.EXEMPLAR_SIZE])
warped_search_ori = self.stn_with_theta(search_hm, affine_m, grid_size)#directly warped the search_hm
search_hm_simi_ori = self.stn_with_theta(search_hm, affine_m_c_aug, grid_size) #to 127*127
poly_ones_tmp_var = torch.ones([cfg.TRAIN.BATCH_SIZE,1,4]).cuda()
affine_m_ones_tmp_var = torch.tensor([0,0,1]).float().repeat([cfg.TRAIN.BATCH_SIZE, 1, 1]).cuda()
search_poly_hmg = torch.cat([search_poly.permute(0,2,1), poly_ones_tmp_var],1)
search_lt0_hm = torch.cat([affine_m_lt0, affine_m_ones_tmp_var],1)
search_aug_lt0_hm = torch.cat([affine_m_lt0_aug, affine_m_ones_tmp_var],1)#only resize the search poly
aug_sear_points = torch.bmm(search_aug_lt0_hm, search_poly_hmg)
pred_points = torch.bmm(search_lt0_hm, search_poly_hmg)#
template_hm_simi, search_hm_simi, wapred_search_simi = self.get_simi_data(template_hm, search_hm_simi_ori, warped_search_ori)
search_window = search_window.unsqueeze(1).float()
search_window = self.stn_with_theta(search_window, affine_m, grid_size)
#ShareFeature
tmp_f = self.hm_net.ShareFeature(template_hm_simi)
sear_f = self.hm_net.ShareFeature(search_hm_simi)
#without mask
masked_tmp_f = tmp_f
masked_sear_f = sear_f
pre_sear_f = self.hm_net.ShareFeature(wapred_search_simi)
#negative sample
unsup_pos_ids = (if_pos*if_unsup).eq(1).nonzero().squeeze(1)
unsup_ids = if_unsup.eq(1).nonzero().squeeze(1)
if unsup_pos_ids.shape[0] != 0:
masked_tmp_f = masked_tmp_f[unsup_pos_ids]
masked_sear_f = masked_sear_f[unsup_pos_ids]
pre_sear_f = pre_sear_f[unsup_pos_ids]
sim_loss_mat = triplet_loss(masked_tmp_f, pre_sear_f, masked_sear_f)# cause we warp back the search
#fixme
sim_loss = torch.sum(sim_loss_mat) / (127*127) / unsup_pos_ids.shape[0]
corner_pts_simi = pred_points.permute(0,2,1)[:, :, :-1] #
corner_pts_simi_pos = corner_pts_simi[unsup_pos_ids]
template_poly_simi_pos = template_poly[unsup_pos_ids]
corner_error_simi = torch.sum(torch.abs(corner_pts_simi_pos - template_poly_simi_pos)) / unsup_pos_ids.shape[0] / 4
#todo normalization
if cfg.TRAIN.MODEL_TYPE == 'E2E':
warped_search = warped_search_ori
warped_template = template_hm
homo_data = self.get_homo_data(warped_template, warped_search, if_pos, if_unsup, template_window, search_window)
#fixme use entire homo-net
batch_out = self.hm_net(homo_data)
H_mat = batch_out['H_mat'] #search-> template
if len(unsup_ids) > 0:
loss_feature = batch_out['feature_loss'].mean()
sup_ids = if_unsup.eq(0).nonzero().squeeze(1) #so far the neg_ids are designed for simi-estimator,
if len(sup_ids) > 0 :
cls_sup = cls[sup_ids]
label_cls_sup = label_cls[sup_ids]
label_cls_lp_sup = label_cls_lp[sup_ids]
label_loc_c_sup = label_loc_c[sup_ids]
label_loc_lp_sup = label_loc_lp[sup_ids]
cls_lp_sup = cls_lp[sup_ids]
loc_sup = loc[sup_ids]
loc_lp_sup = loc_lp[sup_ids]
cls_sup = self.softmax(cls_sup)#[n, 25, 25, 2]
cls_loss_sup = select_xr_focal_fuse_smooth_l1_loss_top_k(cls_sup, label_cls_sup)
loc_loss_sup = select_l1_loss_c(loc_sup, label_loc_c_sup, label_cls_sup)
if cfg.TRAIN.WEIGHTED_MAP_LP:
cls_lp_sup = self.softmax(cls_lp_sup)
cls_loss_lp_sup = select_xr_focal_fuse_smooth_l1_loss(cls_lp_sup, label_cls_lp_sup)
loc_loss_lp_sup = select_l1_loss_lp(loc_lp_sup, label_loc_lp_sup, label_cls_lp_sup)
else:
cls_lp_sup = self.log_softmax(cls_lp_sup)
cls_loss_lp_sup = select_cross_entropy_loss(cls_lp_sup, label_cls_lp_sup)
loc_loss_lp_sup = select_l1_loss(loc_lp_sup, label_loc_lp_sup, label_cls_lp_sup)
sup_pos_ids = (if_sup*if_pos).eq(1).nonzero().squeeze(1)
sup_neg_ids = (if_sup*if_neg).eq(1).nonzero().squeeze(1)
if len(sup_pos_ids) > 0 :
##corner error for supervised data(the poly)
corner_pts = torch.bmm(H_mat, pred_points).permute(0,2,1)
corner_pts = corner_pts / (corner_pts[:,:,2].unsqueeze(2)) #normalize
corner_pts = corner_pts[:, :, :-1]
corner_pts_pos = corner_pts[sup_pos_ids]
template_poly_pos = template_poly[sup_pos_ids]
corner_error = torch.sum(torch.abs(corner_pts_pos - template_poly_pos)) / sup_pos_ids.shape[0] / 4
corner_pts_aug = aug_sear_points.permute(0,2,1)[:, :, :-1]
corner_pts_aug_pos = corner_pts_aug[sup_pos_ids]
corner_error_aug = torch.sum(torch.abs(corner_pts_aug_pos - template_poly_pos)) / sup_pos_ids.shape[0] / 4
#supervised_loss
if len(sup_pos_ids) > 0 :
#corner error for supervised data(the crop)
#H_gt
homo_points = pred_points.permute(0,2,1)
homo_points = (homo_points / (homo_points[:,:,2].unsqueeze(2)))[:,:,:-1]
H_gt = DLT_solve(template_poly.reshape(-1,8), (homo_points - template_poly).reshape(-1,8)).squeeze(1)[sup_pos_ids] #H: search -> template
#construct a 127*127 search points.
search_corner = torch.tensor([[0,0], [0,127], [127,127], [127,0]]).float().unsqueeze(0).repeat((H_gt.shape[0], 1, 1)).to(H_gt.device)
poly_ones_tmp_var = torch.ones([H_gt.shape[0],1,4]).cuda()
search_corner_hmg = torch.cat([search_corner.permute(0,2,1), poly_ones_tmp_var],1)
# #warp the search corner accroding to H_gt
search_corner_warped = torch.bmm(H_gt, search_corner_hmg).permute(0,2,1)
search_corner_warped = search_corner_warped / (search_corner_warped[:,:,2].unsqueeze(2)) #normalize
search_corner_warped = search_corner_warped[:, :, :-1]
# #delta error.
search_corner_delta = (search_corner_warped - search_corner).reshape(-1,8)
pred_delta = batch_out['x'][sup_pos_ids]
corner_loss_pos = kalyo_l1_loss(pred_delta, search_corner_delta)
neg_ids = if_neg.eq(1).nonzero().squeeze(1)
if len(neg_ids):
pred_neg_delta = batch_out['x'][neg_ids]
neg_zeros = torch.zeros_like(pred_neg_delta).to(cur_device)
corner_loss_neg = kalyo_l1_loss(pred_neg_delta, neg_zeros)
outputs = {}
if cfg.TRAIN.OBJ == 'ALL':
outputs['cls_loss_sup'] = torch.tensor(0.0).to(cur_device)
outputs['loc_loss_sup'] = torch.tensor(0.0).to(cur_device)
outputs['cls_loss_lp_sup'] = torch.tensor(0.0).to(cur_device)
outputs['loc_loss_lp_sup'] = torch.tensor(0.0).to(cur_device)
outputs['sim_cent_loss'] = torch.tensor(0.0).to(cur_device)
outputs['sim_loss'] = torch.tensor(0.0).to(cur_device)
outputs['cor_err_aug'] = torch.tensor(0.0).to(cur_device)
outputs['cor_err_sim'] = torch.tensor(0.0).to(cur_device)
outputs['cor_err'] = torch.tensor(0.0).to(cur_device)
outputs['cor_loss'] = torch.tensor(0.0).to(cur_device)
outputs['cor_pos_loss'] = torch.tensor(0.0).to(cur_device)
outputs['cor_neg_loss'] = torch.tensor(0.0).to(cur_device)
outputs['homo_unsup_loss'] = torch.tensor(0.0).to(cur_device)
if len(sup_ids) > 0 :
outputs['cls_loss_sup'] = cls_loss_sup
outputs['loc_loss_sup'] = loc_loss_sup
outputs['cls_loss_lp_sup'] = cls_loss_lp_sup
outputs['loc_loss_lp_sup'] = loc_loss_lp_sup
outputs['sim_cent_loss'] = cfg.TRAIN.CLS_WEIGHT * (cls_loss_lp_sup + cls_loss_sup) + \
cfg.TRAIN.LOC_WEIGHT * (loc_loss_sup + loc_loss_lp_sup)#supervised
if len(sup_pos_ids) > 0:
outputs['cor_err_aug'] = corner_error_aug #supervised
outputs['cor_err'] = corner_error #supervised
outputs['cor_loss'] += corner_loss_pos
outputs['cor_pos_loss'] = corner_loss_pos
if len(sup_neg_ids) > 0:
outputs['cor_loss'] += corner_loss_neg
outputs['cor_neg_loss'] = corner_loss_neg
if len(unsup_ids) > 0:
outputs['homo_unsup_loss'] = loss_feature
if len(unsup_pos_ids) > 0:
outputs['sim_loss'] = sim_loss #unsupervised
outputs['cor_err_sim'] = corner_error_simi #supervised
outputs['total_loss'] = outputs['sim_cent_loss']*100 + outputs['homo_unsup_loss'] + outputs['cor_neg_loss'] + outputs['cor_err']/4
return outputs | 26,131 | 45.415631 | 262 | py |
HDN | HDN-master/hdn/models/init_weight.py | import torch.nn as nn
def init_weights(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| 386 | 31.25 | 56 | py |
HDN | HDN-master/hdn/models/loss.py | #Copyright 2021, XinruiZhan
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from hdn.core.config import cfg
from hdn.models.iou_loss import linear_iou
from torch.autograd import Variable
def get_cls_loss(pred, label, select):
if len(select.size()) == 0 or \
select.size() == torch.Size([0]):
return 0
pred = torch.index_select(pred, 0, select)
label = torch.index_select(label, 0, select)
return F.nll_loss(pred, label)
def select_cross_entropy_loss(pred, label):
pred = pred.view(-1, 2)
label = label.view(-1)
pos = label.data.eq(1).nonzero().squeeze().cuda()
neg = label.data.eq(0).nonzero().squeeze().cuda()
loss_pos = get_cls_loss(pred, label, pos)
loss_neg = get_cls_loss(pred, label, neg)
return loss_pos * 0.5 + loss_neg * 0.5
def select_xr_focal_fuse_smooth_l1_loss_top_k(pred_cls, label_cls,delta_weight=0.1):
"""
smooth_l1_loss, we only choose the top_K neg_loss as neg_loss cause too many neg points pull the loss down.
:param pred_cls:
:param label_cls:
:param delta_weight:
:return:
"""
batch_size = label_cls.shape[0]
label_cls = label_cls.reshape(-1)
label_cls_new = label_cls.clone()
pred_cls = pred_cls.view(-1,2)
neg = label_cls.data.eq(0).nonzero().squeeze().cuda()
pos = label_cls.data.gt(0).nonzero().squeeze().cuda()
cur_device = pred_cls.device
zero_loss = torch.tensor(0.0).to(cur_device)
if len(pos.size()) == 0 or \
pos.size() == torch.Size([0]):
pos_loss = zero_loss
else:
pred_cls_pos = torch.index_select(pred_cls, 0, pos)[:, 1]
absolute_loss_pos = torch.abs(label_cls_new[pos] - pred_cls_pos)
reg_loss_pos = absolute_loss_pos# use l1 loss
pos_loss = reg_loss_pos.sum()/ (reg_loss_pos.shape[0]+1)
if len(neg.size()) == 0 or \
neg.size() == torch.Size([0]):
neg_loss = zero_loss #problem here
else:
pred_cls_neg = torch.index_select(pred_cls, 0, neg)[:, 1]
pred_cls_neg = pred_cls_neg.clamp(min=0.000001, max=0.9999999)
reg_loss_neg = - torch.log(1 - pred_cls_neg)
reg_loss_neg = torch.topk(reg_loss_neg, batch_size*100).values
neg_loss = reg_loss_neg.sum() / (reg_loss_neg.shape[0]+1)
reg_loss = pos_loss + neg_loss
return reg_loss
def select_xr_focal_fuse_smooth_l1_loss(pred_cls, label_cls,delta_weight=0.1):
label_cls = label_cls.reshape(-1)
label_cls_new = label_cls.clone()
pred_cls = pred_cls.view(-1,2)
neg = label_cls.data.eq(0).nonzero().squeeze().cuda()
pos = label_cls.data.gt(0).nonzero().squeeze().cuda()
pos_loss = 0
neg_loss = 0
if len(pos.size()) == 0 or \
pos.size() == torch.Size([0]):
reg_loss_pos = 0
neg_loss = 0
else:
pred_cls_pos = torch.index_select(pred_cls, 0, pos)[:, 1]
absolute_loss_pos = torch.abs(label_cls_new[pos] - pred_cls_pos)
square_loss_pos = 0.5 * ((label_cls_new[pos] - pred_cls_pos)) ** 2
inds_pos = absolute_loss_pos.le(1).float()
reg_loss_pos = ( inds_pos * square_loss_pos + (1 - inds_pos) * (absolute_loss_pos - 0.5))
pos_loss = reg_loss_pos.sum()/ (reg_loss_pos.shape[0]+1)
if len(neg.size()) == 0 or \
neg.size() == torch.Size([0]):
reg_loss_neg = 0 #problem here
pos_loss = 0
else:
pred_cls_neg = torch.index_select(pred_cls, 0, neg)[:, 1]
pred_cls_neg = pred_cls_neg.clamp(min=0.000001, max=0.9999999)
absolute_loss_neg = torch.abs(label_cls_new[neg] - pred_cls_neg)
reg_loss_neg = -0.5*absolute_loss_neg * torch.log(1 - pred_cls_neg)
neg_loss = reg_loss_neg.sum() / (reg_loss_neg.shape[0]+1)
reg_loss = pos_loss + neg_loss
return reg_loss
def select_xr_focal_fuse_smooth_l1_loss(pred_cls, label_cls,delta_weight=0.1):
label_cls = label_cls.reshape(-1)
label_cls_new = label_cls.clone()
pred_cls = pred_cls.view(-1,2)
neg = label_cls.data.eq(0).nonzero().squeeze().cuda()
pos = label_cls.data.gt(0).nonzero().squeeze().cuda()
cur_device = pred_cls.device
zero_loss = torch.tensor(0.0).to(cur_device)
pos_loss = zero_loss
neg_loss = zero_loss
if len(pos.size()) == 0 or \
pos.size() == torch.Size([0]):
pos_loss = zero_loss
else:
pred_cls_pos = torch.index_select(pred_cls, 0, pos)[:, 1]
absolute_loss_pos = torch.abs(label_cls_new[pos] - pred_cls_pos)
square_loss_pos = 0.5 * ((label_cls_new[pos] - pred_cls_pos)) ** 2
inds_pos = absolute_loss_pos.le(1).float()
reg_loss_pos = ( inds_pos * square_loss_pos + (1 - inds_pos) * (absolute_loss_pos - 0.5))
pos_loss = reg_loss_pos.sum()/ (reg_loss_pos.shape[0]+1)
if len(neg.size()) == 0 or \
neg.size() == torch.Size([0]):
neg_loss = zero_loss #problem here
else:
pred_cls_neg = torch.index_select(pred_cls, 0, neg)[:, 1]
pred_cls_neg = pred_cls_neg.clamp(min=0.000001, max=0.9999999)
absolute_loss_neg = torch.abs(label_cls_new[neg] - pred_cls_neg)
reg_loss_neg = -0.5*absolute_loss_neg * torch.log(1 - pred_cls_neg)
neg_loss = reg_loss_neg.sum() / (reg_loss_neg.shape[0]+1)
reg_loss = pos_loss + neg_loss
return reg_loss
def select_l1_loss(pred_loc, label_loc, label_cls):
label_cls = label_cls.reshape(-1)
pos = label_cls.data.gt(0).nonzero().squeeze().cuda()
pred_loc = pred_loc.permute(0, 2, 3, 1).reshape(-1, 4)
pred_loc = torch.index_select(pred_loc, 0, pos)
label_loc = label_loc.permute(0, 2, 3, 1).reshape(-1, 4)
label_loc = torch.index_select(label_loc, 0, pos)
return kalyo_l1_loss(pred_loc, label_loc) #+ 0.5 * kalyo_l1_loss(pred_loc_add, label_loc_add)
def select_l1_loss_c(pred_loc, label_loc, label_cls):
label_cls = label_cls.reshape(-1)
max_c = torch.max(label_cls.data)
pos = label_cls.data.gt(max_c - 0.2).nonzero().squeeze().cuda()
cur_device = pred_loc.device
zero_loss = torch.tensor(0.0).to(cur_device)
if len(pos.size()) == 0 or \
pos.size() == torch.Size([0]):
loss_pos = zero_loss
return loss_pos
pred_loc = pred_loc.permute(0, 2, 3, 1).reshape(-1, 2)
pred_loc = torch.index_select(pred_loc, 0, pos)
label_loc = label_loc.permute(0, 2, 3, 1).reshape(-1, 2)
label_loc = torch.index_select(label_loc, 0, pos)
absolute_loss = torch.abs(pred_loc - label_loc)
square_loss = 0.5 * ((label_loc - pred_loc)) ** 2
inds = absolute_loss.lt(1).float()
reg_loss = (inds * square_loss + (1 - inds) * (absolute_loss - 0.5))
tsz = label_loc.size()[0] * label_loc.size()[1]+1
reg_loss = reg_loss.sum()/tsz#weighted loss
return reg_loss
def select_l1_loss_lp(pred_loc, label_loc, label_cls):
label_cls = label_cls.reshape(-1)
label_cls_new = label_cls.clone()
pos = label_cls_new.data.gt(0).nonzero().squeeze().cuda()
pred_loc = pred_loc.permute(0, 2, 3, 1).reshape(-1, 4)
pred_loc = torch.index_select(pred_loc, 0, pos)
label_loc = label_loc.permute(0, 2, 3, 1).reshape(-1, 4)
label_loc = torch.index_select(label_loc, 0, pos)
absolute_loss = torch.abs(pred_loc - label_loc)
square_loss = 0.5 * ((label_loc - pred_loc)) ** 2
inds = absolute_loss.lt(1).float()
reg_loss = (inds * square_loss + (1 - inds) * (absolute_loss - 0.5))
reg_loss = (reg_loss[:,1]).sum()/(pos.sum()) #weighted loss
reg_loss = (reg_loss.sum())/(pos.sum()) #weighted loss
tsz = label_loc.size()[0] * label_loc.size()[1]+1
reg_loss = (reg_loss.sum())/tsz #weighted loss
return reg_loss
def kalyo_l1_loss(output, target, norm=False):
tsz = output.size()[0] * output.size()[1]+1
# w, h
absolute_loss = torch.abs(target - output)
square_loss = 0.5 * (target - output) ** 2
if norm:
absolute_loss = absolute_loss / (target[:, :2] + 1e-10)
square_loss = square_loss / (target[:, :2] + 1e-10) ** 2
inds = absolute_loss.lt(1).float()
reg_loss = (inds * square_loss + (1 - inds) * (absolute_loss - 0.5))
return reg_loss.sum()/tsz
| 8,330 | 38.112676 | 111 | py |
HDN | HDN-master/hdn/models/logpolar.py | import cv2
import numpy as np
import math
import torch.nn as nn
import torch.nn.functional as F
import torch
import matplotlib.pyplot as plt
from hdn.core.config import cfg
def getPolarImg(img, original = None):
"""
some assumption that img W==H
:param img: image
:return: polar image
"""
sz = img.shape
# maxRadius = math.hypot(sz[0] / 2, sz[1] / 2)
maxRadius = sz[1]/2
m = sz[1] / math.log(maxRadius)
o = tuple(np.round(original)) if original is not None else (sz[0] // 2, sz[1] // 2)
result = cv2.logPolar(img, o, m, cv2.WARP_FILL_OUTLIERS + cv2.INTER_LINEAR )
#test
# plt.imshow(result/255)
# plt.show()
# plt.imshow(img/255)
# plt.show()
# plt.close('all')
return result
def getLinearPolarImg(img, original = None):
"""
some assumption that img W==H
:param img: image
:return: polar image
"""
sz = img.shape
# maxRadius = math.hypot(sz[0] / 2, sz[1] / 2)
maxRadius = sz[1]/2
o = tuple(np.round(original)) if original is not None else (sz[0] // 2, sz[1] // 2)
result = cv2.linearPolar(img, o, maxRadius, cv2.WARP_FILL_OUTLIERS + cv2.INTER_LINEAR )
# plt.imshow(result)
# plt.show()
# plt.imshow(img)
# plt.show()
return result
class STN_Polar(nn.Module):
"""
STN head
"""
def __init__(self, image_sz):
super(STN_Polar, self).__init__()
self._orignal_sz = [image_sz//2, image_sz//2] # sample center position
def _prepare_grid(self, sz, delta):
assert len(sz) == 2 # W, H
x_ls = torch.linspace(0, sz[0]-1, sz[0])
y_ls = torch.linspace(0, sz[1]-1, sz[1])
# get log polar coordinates
mag = math.log(sz[0]/2) / sz[0]
# rho = (torch.exp(mag * x_ls) - 1.0) + delta[0]
rho = (torch.exp(mag * x_ls) - 1.0)
theta = y_ls * 2.0 * math.pi / sz[1] + delta[1]# add rotation
y, x = torch.meshgrid([theta, rho])
cosy = torch.cos(y)
siny = torch.sin(y)
# construct final indices
self.indices_x = torch.mul(x, cosy)
self.indices_y = torch.mul(x, siny)
# # test
# y, x = torch.meshgrid([x_ls, y_ls])
# self.indices_x = x.cuda()
# self.indices_y = y.cuda()
def _prepare_batch_grid(self, sz, delta, batch):
assert len(sz) == 2 # W, H
x_ls = torch.linspace(0, sz[0]-1, sz[0])
y_ls = torch.linspace(0, sz[1]-1, sz[1])
# get log polar coordinates
mag = math.log(sz[0]/2) / sz[0]
rho_batch = delta[0] + (torch.exp(mag * x_ls) - 1.0)
theta_batch = delta[1] + y_ls * 2.0 * math.pi / sz[1]
for rho, theta in rho_batch,theta_batch:
y, x = torch.meshgrid([theta, rho])
cosy = torch.cos(y)
siny = torch.sin(y)
# construct final indices
self.indices_x = torch.mul(x, cosy)
self.indices_y = torch.mul(x, siny)
def get_logpolar_grid(self, polar, sz):
"""
This implementation is based on OpenCV source code to match the transformation.
:param polar: N*2 N pairs of original of coordinates [-1.0, 1.0]
:param sz: 4 the size of the output
:return: N*W*H*2 the grid we generated
"""
assert len(sz) == 4 # N, C, W, H
batch = sz[0]
# generate grid mesh
x = self.indices_x.cuda() # for multi-gpus
y = self.indices_y.cuda()
indices_x = x.repeat([batch, 1, 1]) + polar[:, 0].unsqueeze(1).unsqueeze(1)
indices_y = y.repeat([batch, 1, 1]) + polar[:, 1].unsqueeze(1).unsqueeze(1)
# print('indices_x.shape',indices_x.shape)
# print('indices_y.shape',indices_y.shape)
indices = torch.cat((indices_x.unsqueeze(3)/(sz[2]//2), indices_y.unsqueeze(3)/(sz[3]//2)), 3)
return indices
def forward(self, x, polar, delta=[0,0]):
self._prepare_grid(self._orignal_sz, delta)
grid = self.get_logpolar_grid(polar, x.size())#[1 127 127 2]
# self.test_polar_points(grid.cpu().squeeze(0).view(-1,2))
x = F.grid_sample(x, grid, mode='bilinear', padding_mode='border')
# test plt log-polar img
# x_lp_cpu = x[0].cpu().detach().squeeze(0).permute(1,2,0).numpy()
# plt.imshow(x_lp_cpu/256)
# fig = plt.figure()
# fig,ax = plt.subplots(1,dpi=96)
# # ax.plot([polar[0], ], [polar[1], ], c='r', marker='x')
# plt.show()
# plt.close('all')
return x, grid
class STN_LinearPolar(nn.Module):
"""
STN head
"""
def __init__(self, image_sz):
super(STN_LinearPolar, self).__init__()
self._orignal_sz = [image_sz//2, image_sz//2] # sample center position
self._prepare_grid(self._orignal_sz)
def _prepare_grid(self, sz):
assert len(sz) == 2 # W, H
x_ls = torch.linspace(0, sz[0]-1, sz[0])
y_ls = torch.linspace(0, sz[1]-1, sz[1])
# get linear polar coordinates
maxR =sz[0]/2
rho = maxR * x_ls / sz[0]
theta = y_ls * 2.0 * math.pi / sz[1]
y, x = torch.meshgrid([theta, rho])
cosy = torch.cos(y)
siny = torch.sin(y)
# construct final indices
self.indices_x = torch.mul(x, cosy)
self.indices_y = torch.mul(x, siny)
# # test
# y, x = torch.meshgrid([x_ls, y_ls])
# self.indices_x = x.cuda()
# self.indices_y = y.cuda()
def get_logpolar_grid(self, polar, sz):
"""
This implementation is based on OpenCV source code to match the transformation.
:param polar: N*2 N pairs of original of coordinates [-1.0, 1.0]
:param sz: 4 the size of the output
:return: N*W*H*2 the grid we generated
"""
assert len(sz) == 4 # N, C, W, H
batch = sz[0]
# generate grid mesh
x = self.indices_x.cuda() # for multi-gpus
y = self.indices_y.cuda()
indices_x = x.repeat([batch, 1, 1]) + polar[:, 0].unsqueeze(1).unsqueeze(1)
indices_y = y.repeat([batch, 1, 1]) + polar[:, 1].unsqueeze(1).unsqueeze(1)
indices = torch.cat((indices_x.unsqueeze(3)/(sz[2]//2), indices_y.unsqueeze(3)/(sz[3]//2)), 3)
return indices
def forward(self, x, polar):
grid = self.get_logpolar_grid(polar, x.size())
# x = F.grid_sample(x, grid, mode='bilinear', padding_mode='border', align_corners=False)
x = F.grid_sample(x, grid, mode='bilinear', padding_mode='border')
return x
class Polar_Pick(nn.Module):
"""
SiamFC head
"""
def __init__(self):
super(Polar_Pick, self).__init__()
points = self.generate_points(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE)
self.points = torch.from_numpy(points)
self.points_cuda = self.points.cuda()
def generate_points(self, stride, size):
# print('stride',stride,'size',size)
ori = - (size // 2) * stride # -96
x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)],
[ori + stride * dy for dy in np.arange(0, size)])
points = np.zeros((size * size, 2), dtype=np.float32)
points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten()
return points
def _getArgMax(self, r):
sizes = r.size()
batch = sizes[0]
m = r.view(batch, -1).argmax(1).view(-1, 1)
indices = torch.cat((m // sizes[2], m % sizes[2]), dim=1)
indices = (indices - (sizes[2]-1)/2) / (sizes[2]-1)/2
return indices
def _getSoftArgMax(self, r):
r = r.squeeze(1)
sizes = r.size()
assert len(sizes) == 3
batch = sizes[0]
sm = r.view(batch, -1).softmax(1).view(sizes)
x_ls = torch.linspace(0, sizes[1] - 1, sizes[1])
y_ls = torch.linspace(0, sizes[2] - 1, sizes[2])
x, y = torch.meshgrid([x_ls, y_ls])
indices_x = torch.mul(sm, x.unsqueeze(0).cuda()).sum([1, 2]) / (sizes[1] - 1)
indices_y = torch.mul(sm, y.unsqueeze(0).cuda()).sum([1, 2]) / (sizes[2] - 1)
indices = torch.cat((indices_x.view(-1, 1), indices_y.view(-1, 1)), 1)
return indices
def test_self_points(self):
points = self.points
points = points.permute(1,0)
plt.scatter(points[0],points[1])
#4 parameters loc
def forward(self, cls, loc):
# self.test_self_points()
sizes = cls.size()
batch = sizes[0]
score = cls.view(batch, cfg.BAN.KWARGS.cls_out_channels, -1).permute(0, 2, 1)
best_idx = torch.argmax(score[:, :, 1], 1)
idx = best_idx.unsqueeze(1)
idx = idx.unsqueeze(2)
delta = loc.view(batch, 4, -1).permute(0, 2, 1)
# delta = loc.view(batch, 6, -1).permute(0, 2, 1)
# delta = loc.view(batch, 2, -1).permute(0, 2, 1)
dummy = idx.expand(batch, 1, delta.size(2))
point = self.points.cuda()
point = point.expand(batch, point.size(0), point.size(1))
delta = torch.gather(delta, 1, dummy).squeeze(1)
point = torch.gather(point, 1, dummy[:,:,0:2]).squeeze(1)
out = torch.zeros(batch, 2).cuda()
out[:, 0] = (point[:, 0] - delta[:, 0] + point[:, 0] + delta[:, 2]) / 2
out[:, 1] = (point[:, 1] - delta[:, 1] + point[:, 1] + delta[:, 3]) / 2
return out
def get_polar_from_two_para_loc (self, cls, loc):
# self.test_self_points()
sizes = cls.size()
batch = sizes[0]
score = cls.view(batch, cfg.BAN.KWARGS.cls_out_channels, -1).permute(0, 2, 1)
best_idx = torch.argmax(score[:, :, 1], 1)
idx = best_idx.unsqueeze(1)
idx = idx.unsqueeze(2)
#fixme use gt
# delta = loc.view(batch, 4, -1).permute(0, 2, 1)
# delta = loc.view(batch, 6, -1).permute(0, 2, 1)
#fixme use pred_loc
delta = loc.view(batch, 2, -1).permute(0, 2, 1)
dummy = idx.expand(batch, 1, delta.size(2))
point = self.points.cuda()
point = point.expand(batch, point.size(0), point.size(1))
delta = torch.gather(delta, 1, dummy).squeeze(1)
point = torch.gather(point, 1, dummy[:,:,0:2]).squeeze(1)
out = torch.zeros(batch, 2).cuda()
out[:, 0] = point[:, 0] - delta[:, 0]
out[:, 1] = point[:, 1] - delta[:, 1]
return out
#shorten the time.
def get_polar_from_two_para_loc (self, cls, loc):
sizes = cls.size()
batch = sizes[0]
score = cls.view(batch, cfg.BAN.KWARGS.cls_out_channels, -1).permute(0, 2, 1)
best_idx = torch.argmax(score[:, :, 1], 1)
idx = best_idx.unsqueeze(1)
idx = idx.unsqueeze(2)
delta = loc.view(batch, 2, -1).permute(0, 2, 1)
dummy = idx.expand(batch, 1, delta.size(2))
point = self.points_cuda
point = point.expand(batch, point.size(0), point.size(1))
delta = torch.gather(delta, 1, dummy).squeeze(1)
point = torch.gather(point, 1, dummy[:,:,0:2]).squeeze(1)
out = torch.zeros(batch, 2).cuda()
out[:, 0] = point[:, 0] - delta[:, 0]
out[:, 1] = point[:, 1] - delta[:, 1]
return out | 11,196 | 33.558642 | 102 | py |
HDN | HDN-master/hdn/models/backbone/resnet_atrous.py | import math
import torch.nn as nn
import torch
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50']
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1,
downsample=None, dilation=1):
super(BasicBlock, self).__init__()
padding = 2 - stride
if dilation > 1:
padding = dilation
dd = dilation
pad = padding
if downsample is not None and dilation > 1:
dd = dilation // 2
pad = dd
self.conv1 = nn.Conv2d(inplanes, planes,
stride=stride, dilation=dd, bias=False,
kernel_size=3, padding=pad)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1,
downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
padding = 2 - stride
if downsample is not None and dilation > 1:
dilation = dilation // 2
padding = dilation
assert stride == 1 or dilation == 1, \
"stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, used_layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.feature_size = 128 * block.expansion
self.used_layers = used_layers
layer3 = True if 3 in used_layers or 4 in used_layers else False
layer4 = True if 4 in used_layers else False
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2],
stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x: x # identity
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3],
stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x: x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride,
downsample, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
# @profile
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x_ = self.relu(x)
x = self.maxpool(x_)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
p4 = self.layer4(p3)
out = [x_, p1, p2, p3, p4]
out = [out[i] for i in self.used_layers]
if len(out) == 1:
return out[0]
else:
return out
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
if __name__ == '__main__':
net = resnet50(used_layers=[2, 3, 4])
print(net)
net = net.cuda()
template_var = torch.FloatTensor(1, 3, 127, 127).cuda()
search_var = torch.FloatTensor(1, 3, 255, 255).cuda()
t = net(template_var)
s = net(search_var)
print(t[-1].shape, s[-1].shape)
| 7,286 | 29.746835 | 78 | py |
HDN | HDN-master/hdn/models/backbone/mobile_v2.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
def conv_bn(inp, oup, stride, padding=1):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, padding, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, dilation=1):
super(InvertedResidual, self).__init__()
self.stride = stride
self.use_res_connect = self.stride == 1 and inp == oup
padding = 2 - stride
if dilation > 1:
padding = dilation
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3,
stride, padding, dilation=dilation,
groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Sequential):
def __init__(self, width_mult=1.0, used_layers=[3, 5, 7]):
super(MobileNetV2, self).__init__()
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1, 1],
[6, 24, 2, 2, 1],
[6, 32, 3, 2, 1],
[6, 64, 4, 2, 1],
[6, 96, 3, 1, 1],
[6, 160, 3, 2, 1],
[6, 320, 1, 1, 1],
]
# 0,2,3,4,6
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1, 1],
[6, 24, 2, 2, 1],
[6, 32, 3, 2, 1],
[6, 64, 4, 1, 2],
[6, 96, 3, 1, 2],
[6, 160, 3, 1, 4],
[6, 320, 1, 1, 4],
]
self.channels = [24, 32, 96, 320]
self.channels = [int(c * width_mult) for c in self.channels]
input_channel = int(32 * width_mult)
self.last_channel = int(1280 * width_mult) \
if width_mult > 1.0 else 1280
self.add_module('layer0', conv_bn(3, input_channel, 2, 0))
last_dilation = 1
self.used_layers = used_layers
for idx, (t, c, n, s, d) in \
enumerate(self.interverted_residual_setting, start=1):
output_channel = int(c * width_mult)
layers = []
for i in range(n):
if i == 0:
if d == last_dilation:
dd = d
else:
dd = max(d // 2, 1)
layers.append(InvertedResidual(input_channel,
output_channel, s, t, dd))
else:
layers.append(InvertedResidual(input_channel,
output_channel, 1, t, d))
input_channel = output_channel
last_dilation = d
self.add_module('layer%d' % (idx), nn.Sequential(*layers))
def forward(self, x):
outputs = []
for idx in range(8):
name = "layer%d" % idx
x = getattr(self, name)(x)
outputs.append(x)
p0, p1, p2, p3, p4 = [outputs[i] for i in [1, 2, 3, 5, 7]]
out = [outputs[i] for i in self.used_layers]
return out
def mobilenetv2(**kwargs):
model = MobileNetV2(**kwargs)
return model
if __name__ == '__main__':
net = mobilenetv2()
print(net)
from torch.autograd import Variable
tensor = Variable(torch.Tensor(1, 3, 255, 255)).cuda()
net = net.cuda()
out = net(tensor)
for i, p in enumerate(out):
print(i, p.size())
| 4,315 | 27.20915 | 77 | py |
HDN | HDN-master/hdn/models/backbone/alexnet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch.nn as nn
class AlexNetLegacy(nn.Module):
configs = [3, 96, 256, 384, 384, 256]
def __init__(self, width_mult=1):
configs = list(map(lambda x: 3 if x == 3 else
int(x*width_mult), AlexNet.configs))
super(AlexNetLegacy, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2),
nn.BatchNorm2d(configs[1]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(configs[1], configs[2], kernel_size=5),
nn.BatchNorm2d(configs[2]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(configs[2], configs[3], kernel_size=3),
nn.BatchNorm2d(configs[3]),
nn.ReLU(inplace=True),
nn.Conv2d(configs[3], configs[4], kernel_size=3),
nn.BatchNorm2d(configs[4]),
nn.ReLU(inplace=True),
nn.Conv2d(configs[4], configs[5], kernel_size=3),
nn.BatchNorm2d(configs[5]),
)
self.feature_size = configs[5]
def forward(self, x):
x = self.features(x)
return x
class AlexNet(nn.Module):
configs = [3, 96, 256, 384, 384, 256]
def __init__(self, width_mult=1):
configs = list(map(lambda x: 3 if x == 3 else
int(x*width_mult), AlexNet.configs))
super(AlexNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2),
nn.BatchNorm2d(configs[1]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
)
self.layer2 = nn.Sequential(
nn.Conv2d(configs[1], configs[2], kernel_size=5),
nn.BatchNorm2d(configs[2]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
)
self.layer3 = nn.Sequential(
nn.Conv2d(configs[2], configs[3], kernel_size=3),
nn.BatchNorm2d(configs[3]),
nn.ReLU(inplace=True),
)
self.layer4 = nn.Sequential(
nn.Conv2d(configs[3], configs[4], kernel_size=3),
nn.BatchNorm2d(configs[4]),
nn.ReLU(inplace=True),
)
self.layer5 = nn.Sequential(
nn.Conv2d(configs[4], configs[5], kernel_size=3),
nn.BatchNorm2d(configs[5]),
)
self.feature_size = configs[5]
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
def alexnetlegacy(**kwargs):
return AlexNetLegacy(**kwargs)
def alexnet(**kwargs):
return AlexNet(**kwargs)
| 2,991 | 31.521739 | 72 | py |
HDN | HDN-master/hdn/models/neck/neck.py | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch.nn as nn
class AdjustLayer(nn.Module):
def __init__(self, in_channels, out_channels, cut=True, cut_left=4, cut_num=7):
super(AdjustLayer, self).__init__()
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.cut = cut
self.cut_left = cut_left
self.cut_num = cut_num
def forward(self, x):
x = self.downsample(x)
if self.cut and x.size(3) < 20:
l = self.cut_left
r = l + self.cut_num
x = x[:, :, l:r, l:r]
return x
class AdjustAllLayer(nn.Module):
def __init__(self, in_channels, out_channels, cut=True, cut_left=4, cut_num=7):
super(AdjustAllLayer, self).__init__()
self.num = len(out_channels)
if self.num == 1:
self.downsample = AdjustLayer(in_channels[0], out_channels[0], cut, cut_left, cut_num)
else:
for i in range(self.num):
self.add_module('downsample'+str(i+2),
AdjustLayer(in_channels[i], out_channels[i], cut, cut_left, cut_num))
def forward(self, features):
if self.num == 1:
return self.downsample(features)
else:
out = []
for i in range(self.num):
adj_layer = getattr(self, 'downsample'+str(i+2))
out.append(adj_layer(features[i]))
return out
| 1,709 | 31.884615 | 101 | py |
HDN | HDN-master/hdn/models/neck/__init__.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from hdn.models.neck.neck import AdjustLayer, AdjustAllLayer
NECKS = {
'AdjustLayer': AdjustLayer,
'AdjustAllLayer': AdjustAllLayer,
}
def get_neck(name, **kwargs):
return NECKS[name](**kwargs)
| 445 | 21.3 | 60 | py |
HDN | HDN-master/hdn/models/head/ban.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from hdn.core.xcorr import xcorr_fast, xcorr_depthwise
class BAN(nn.Module):
def __init__(self):
super(BAN, self).__init__()
def forward(self, z_f, x_f):
raise NotImplementedError
class UPChannelBAN(BAN):
def __init__(self, feature_in=256, cls_out_channels=2):
super(UPChannelBAN, self).__init__()
cls_output = cls_out_channels
loc_output = 4
self.template_cls_conv = nn.Conv2d(feature_in,
feature_in * cls_output, kernel_size=3)
self.template_loc_conv = nn.Conv2d(feature_in,
feature_in * loc_output, kernel_size=3)
self.search_cls_conv = nn.Conv2d(feature_in,
feature_in, kernel_size=3)
self.search_loc_conv = nn.Conv2d(feature_in,
feature_in, kernel_size=3)
self.loc_adjust = nn.Conv2d(loc_output, loc_output, kernel_size=1)
def forward(self, z_f, x_f):
cls_kernel = self.template_cls_conv(z_f)
loc_kernel = self.template_loc_conv(z_f)
cls_feature = self.search_cls_conv(x_f)
loc_feature = self.search_loc_conv(x_f)
cls = xcorr_fast(cls_feature, cls_kernel)
loc = self.loc_adjust(xcorr_fast(loc_feature, loc_kernel))
return cls, loc
class DepthwiseXCorr(nn.Module):
def __init__(self, in_channels, hidden, out_channels, kernel_size=3):
super(DepthwiseXCorr, self).__init__()
self.conv_kernel = nn.Sequential(
nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
)
self.conv_search = nn.Sequential(
nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
)
self.head = nn.Sequential(
nn.Conv2d(hidden, hidden, kernel_size=1, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
nn.Conv2d(hidden, out_channels, kernel_size=1)
)
def forward(self, kernel, search):
kernel = self.conv_kernel(kernel)
search = self.conv_search(search)
feature = xcorr_depthwise(search, kernel)
out = self.head(feature)
return out
class DepthwiseBAN(BAN):
def __init__(self, in_channels=256, out_channels=256, cls_out_channels=2, weighted=False):
super(DepthwiseBAN, self).__init__()
self.cls = DepthwiseXCorr(in_channels, out_channels, cls_out_channels)
self.loc = DepthwiseXCorr(in_channels, out_channels, 2)
def forward(self, z_f, x_f):
cls = self.cls(z_f, x_f)
loc = self.loc(z_f, x_f)
return cls, loc
class MultiBAN(BAN):
def __init__(self, in_channels, cls_out_channels, weighted=False):
super(MultiBAN, self).__init__()
self.weighted = weighted
for i in range(len(in_channels)):
self.add_module('box'+str(i+2), DepthwiseBAN(in_channels[i], in_channels[i], cls_out_channels))
if self.weighted:
self.cls_weight = nn.Parameter(torch.ones(len(in_channels)))
self.loc_weight = nn.Parameter(torch.ones(len(in_channels)))
self.loc_scale = nn.Parameter(torch.ones(len(in_channels)))
def forward(self, z_fs, x_fs):
cls = []
loc = []
for idx, (z_f, x_f) in enumerate(zip(z_fs, x_fs), start=2):
box = getattr(self, 'box'+str(idx))
c, l = box(z_f, x_f)
cls.append(c)
loc.append(l*self.loc_scale[idx-2])
if self.weighted:
cls_weight = F.softmax(self.cls_weight, 0)
loc_weight = F.softmax(self.loc_weight, 0)
def avg(lst):
return sum(lst) / len(lst)
def weighted_avg(lst, weight):
s = 0
for i in range(len(weight)):
s += lst[i] * weight[i]
return s
if self.weighted:
return weighted_avg(cls, cls_weight), weighted_avg(loc, loc_weight)
else:
return avg(cls), avg(loc)
| 4,392 | 33.054264 | 107 | py |
HDN | HDN-master/hdn/models/head/ban_lp.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from hdn.core.xcorr import xcorr_fast, xcorr_depthwise, xcorr_depthwise_circular
from hdn.models.head.ban import BAN
class DepthwiseXCorrCirc(nn.Module):
def __init__(self, in_channels, hidden, out_channels, kernel_size=3):
super(DepthwiseXCorrCirc, self).__init__()
self.conv_kernel = nn.Sequential(
nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
)
self.conv_search = nn.Sequential(
nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
)
self.head = nn.Sequential(
nn.Conv2d(hidden, hidden, kernel_size=1, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
nn.Conv2d(hidden, out_channels, kernel_size=1)
)
def forward(self, kernel, search):
kernel = self.conv_kernel(kernel)
search = self.conv_search(search)
feature = xcorr_depthwise_circular(search, kernel)
out = self.head(feature)
return out
class DepthwiseCircBAN(BAN):
def __init__(self, in_channels=256, out_channels=256, cls_out_channels=2, weighted=False):
super(DepthwiseCircBAN, self).__init__()
self.cls = DepthwiseXCorrCirc(in_channels, out_channels, cls_out_channels)
self.loc = DepthwiseXCorrCirc(in_channels, out_channels, 4)
def forward(self, z_f, x_f):
cls = self.cls(z_f, x_f)
loc = self.loc(z_f, x_f)
return cls, loc
class MultiCircBAN(BAN):
def __init__(self, in_channels, cls_out_channels, weighted=False):
super(MultiCircBAN, self).__init__()
self.weighted = weighted
for i in range(len(in_channels)):
self.add_module('box'+str(i+2), DepthwiseCircBAN(in_channels[i], in_channels[i], cls_out_channels))
if self.weighted:
self.cls_weight = nn.Parameter(torch.ones(len(in_channels)))
self.loc_weight = nn.Parameter(torch.ones(len(in_channels)))
self.loc_scale = nn.Parameter(torch.ones(len(in_channels)))
def forward(self, z_fs, x_fs):
cls = []
loc = []
for idx, (z_f, x_f) in enumerate(zip(z_fs, x_fs), start=2):
box = getattr(self, 'box'+str(idx))
c, l = box(z_f, x_f)
cls.append(c)
loc.append(l*self.loc_scale[idx-2])
if self.weighted:
cls_weight = F.softmax(self.cls_weight, 0)
loc_weight = F.softmax(self.loc_weight, 0)
def avg(lst):
return sum(lst) / len(lst)
def weighted_avg(lst, weight):
s = 0
for i in range(len(weight)):
s += lst[i] * weight[i]
return s
if self.weighted:
return weighted_avg(cls, cls_weight), weighted_avg(loc, loc_weight)
else:
return avg(cls), avg(loc)
| 3,288 | 34.365591 | 111 | py |
HDN | HDN-master/hdn/datasets/custom_transforms.py | import torch
import numpy as np
import cv2
class Normalize(object):
def __init__(self):
self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
def __call__(self, sample):
return (sample / 255. - self.mean) / self.std
class ToTensor(object):
def __call__(self, sample):
sample = sample.transpose(2, 0, 1)
return torch.from_numpy(sample.astype(np.float32))
| 478 | 27.176471 | 69 | py |
HDN | HDN-master/hdn/datasets/dataset/unconstrained_v2_dataset.py | #Copyright 2021, XinruiZhan
"""
this file implements the perspective transforma augmentation on template image as search, or just use sampled two images from video as template and search.
we just need to adjust the interval, if there is interval we use unsupervised, if not, then use supervised
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torchvision.transforms as transforms
from hdn.datasets.custom_transforms import Normalize, ToTensor
import time
import json
import logging
import sys
import os
import math
import cv2
import numpy as np
from torch.utils.data import Dataset
from hdn.utils.transform import img_shift_crop_w_h
from hdn.utils.bbox import center2corner, Center, corner2center, SimT
from hdn.datasets.point_target.point_target import PointTarget, PointTargetLP, PointTargetRot
from hdn.datasets.augmentation.homo_augmentation_e2e import Augmentation
from hdn.core.config import cfg
from hdn.models.logpolar import getPolarImg
import matplotlib.pyplot as plt
logger = logging.getLogger("global")
from hdn.datasets.dataset.dataset import SubDataset
# setting opencv
from memory_profiler import profile
pyv = sys.version[0]
if pyv[0] == '3':
cv2.ocl.setUseOpenCL(False)
class BANDataset(Dataset):
# @profile
def __init__(self,):
super(BANDataset, self).__init__()
self.transforms = transforms.Compose([
transforms.Grayscale(1),
# Normalize(),
# ToTensor()
])
self.maxscale = 0.0
self.totalscale = 0.0
self.curScale=0
desired_size = (cfg.TRAIN.SEARCH_SIZE - cfg.TRAIN.EXEMPLAR_SIZE) / \
cfg.POINT.STRIDE + 1 + cfg.TRAIN.BASE_SIZE
if desired_size != cfg.TRAIN.OUTPUT_SIZE:
raise Exception('size not match!')
# create point target
self.point_target = PointTarget()
self.point_target_lp = PointTargetLP()
self.point_target_c = PointTargetRot()
# create sub dataset
self.all_dataset = []
start = 0
# start = 0
self.num = 0
for name in cfg.DATASET.NAMES:
subdata_cfg = getattr(cfg.DATASET, name)
sub_dataset = SubDataset(
name,
subdata_cfg.ROOT,
subdata_cfg.ANNO,
subdata_cfg.FRAME_RANGE,
subdata_cfg.NUM_USE,
# unsup_start,
start,
subdata_cfg.IF_UNSUP
)
start += sub_dataset.num
self.num += sub_dataset.num_use
sub_dataset.log()
self.all_dataset.append(sub_dataset)
# data augmentation
self.template_sup_aug = Augmentation(# rho, shift, scale, blur, flip, color, rotation
cfg.DATASET.TEMPLATE.RHO,
cfg.DATASET.TEMPLATE.SHIFT,
cfg.DATASET.TEMPLATE.SCALE,
cfg.DATASET.TEMPLATE.BLUR,
cfg.DATASET.TEMPLATE.FLIP,
cfg.DATASET.TEMPLATE.COLOR,
cfg.DATASET.TEMPLATE.ROTATION,
cfg.DATASET.TEMPLATE.DISTORTION,
cfg.DATASET.TEMPLATE.AFFINE_A,
cfg.DATASET.TEMPLATE.AFFINE_C,
cfg.DATASET.TEMPLATE.AFFINE_D
)
self.search_sup_aug = Augmentation(
cfg.DATASET.SEARCH.RHO,
cfg.DATASET.SEARCH.SHIFT,
cfg.DATASET.SEARCH.SCALE,
cfg.DATASET.SEARCH.BLUR,
cfg.DATASET.SEARCH.FLIP,
cfg.DATASET.SEARCH.COLOR,
cfg.DATASET.SEARCH.ROTATION,
cfg.DATASET.SEARCH.DISTORTION,
cfg.DATASET.SEARCH.AFFINE_A,
cfg.DATASET.SEARCH.AFFINE_C,
cfg.DATASET.SEARCH.AFFINE_D,
cfg.DATASET.SEARCH.IMG_COMP_ALPHA,
cfg.DATASET.SEARCH.IMG_COMP_BETA,
cfg.DATASET.SEARCH.IMG_COMP_GAMMA,
)
self.template_unsup_aug = Augmentation(# rho, shift, scale, blur, flip, color, rotation
cfg.DATASET.TEMPLATE.UNSUPERVISED.RHO,
cfg.DATASET.TEMPLATE.UNSUPERVISED.SHIFT,
cfg.DATASET.TEMPLATE.UNSUPERVISED.SCALE,
cfg.DATASET.TEMPLATE.UNSUPERVISED.BLUR,
cfg.DATASET.TEMPLATE.UNSUPERVISED.FLIP,
cfg.DATASET.TEMPLATE.UNSUPERVISED.COLOR,
cfg.DATASET.TEMPLATE.UNSUPERVISED.ROTATION,
cfg.DATASET.TEMPLATE.UNSUPERVISED.DISTORTION,
cfg.DATASET.TEMPLATE.UNSUPERVISED.AFFINE_A,
cfg.DATASET.TEMPLATE.UNSUPERVISED.AFFINE_C,
cfg.DATASET.TEMPLATE.UNSUPERVISED.AFFINE_D,
)
self.search_unsup_aug = Augmentation(
cfg.DATASET.SEARCH.UNSUPERVISED.RHO,
cfg.DATASET.SEARCH.UNSUPERVISED.SHIFT,
cfg.DATASET.SEARCH.UNSUPERVISED.SCALE,
cfg.DATASET.SEARCH.UNSUPERVISED.BLUR,
cfg.DATASET.SEARCH.UNSUPERVISED.FLIP,
cfg.DATASET.SEARCH.UNSUPERVISED.COLOR,
cfg.DATASET.SEARCH.UNSUPERVISED.ROTATION,
cfg.DATASET.SEARCH.UNSUPERVISED.DISTORTION,
cfg.DATASET.SEARCH.UNSUPERVISED.AFFINE_A,
cfg.DATASET.SEARCH.UNSUPERVISED.AFFINE_C,
cfg.DATASET.SEARCH.UNSUPERVISED.AFFINE_D,
cfg.DATASET.SEARCH.IMG_COMP_ALPHA,
cfg.DATASET.SEARCH.IMG_COMP_BETA,
cfg.DATASET.SEARCH.IMG_COMP_GAMMA,
)
videos_per_epoch = cfg.DATASET.VIDEOS_PER_EPOCH * cfg.TRAIN.EPOCH #30000
self.num = videos_per_epoch if videos_per_epoch > 0 else self.num
self.pick = self.shuffle()
def shuffle(self):
pick = []
m = 0
while m < self.num:
p = []
last_name = ""
for sub_dataset in self.all_dataset:
sub_p = sub_dataset.pick
p += sub_p
#fixme random
np.random.shuffle(p)
pick += p
m = len(pick)
logger.info("shuffle done!")
logger.info("dataset length {}".format(self.num))
return pick[:self.num]
def _find_dataset(self, index):
for dataset in self.all_dataset:
if dataset.start_idx + dataset.num > index:
return dataset, index - dataset.start_idx
def _get_bbox(self, image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2]-shape[0], shape[3]-shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = cfg.TRAIN.EXEMPLAR_SIZE
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w*scale_z
h = h*scale_z
cx, cy = imw//2, imh//2
bbox = center2corner(Center(cx, cy, w, h))
return bbox, scale_z
def _get_trans_of_poly_from_ori_to_patch(self, poly, patch_size, scale):
poly_mat = np.array(poly).reshape(4, 2)
# we use outer bounding box here
max_p = np.max(poly_mat, 0)
min_p = np.min(poly_mat, 0)
aligned_bbox_in_ori = [int(min_p[0]), int(min_p[1]), int(max_p[0]),
int(max_p[1]), ] # (xmin, ymin, xmax, ymax)
center_in_ori = [(aligned_bbox_in_ori[0] + aligned_bbox_in_ori[2])/2 , (aligned_bbox_in_ori[1] + aligned_bbox_in_ori[3])/2]
center_in_patch = [patch_size/2, patch_size/2]
shift_trans_before = np.array([[1, 0, -center_in_ori[0]],
[0, 1, -center_in_ori[1]],
[0, 0, 1]]).astype(np.float32)
scale_trans = np.array([[scale, 0, 0],
[0, scale, 0],
[0, 0, 1]]).astype(np.float32)
shift_trans_after = np.array([[1, 0, center_in_patch[0]],
[0, 1, center_in_patch[1]],
[0, 0, 1]]).astype(np.float32)
trans = shift_trans_after@scale_trans@shift_trans_before
poly_new = cv2.perspectiveTransform(np.expand_dims(poly_mat, 0).astype('float32'), trans)[0]
return trans, poly_new
def __len__(self):
return self.num
def __getitem__(self, index):
while True:
index = self.pick[index]
dataset, index = self._find_dataset(index)
if_unsup = dataset.if_unsup
gray = cfg.DATASET.GRAY and cfg.DATASET.GRAY > np.random.random()
neg = cfg.DATASET.NEG and cfg.DATASET.NEG > np.random.random()
if_comp = False
if_light = False
if_dark = False
img_add = None
if cfg.DATASET.COMP > np.random.random():
if_comp = True
img_add = cv2.imread(np.random.choice(self.all_dataset).get_random_target()[0])
if cfg.DATASET.LIGHT > np.random.random():
if_light = True
if cfg.DATASET.DARK > np.random.random():
if_dark = True
if neg:
template = dataset.get_random_target(index)# #(x_min, y_min, x_max, y_max)#[w, h, theta, center_x, center_y]
search = np.random.choice(self.all_dataset).get_random_target()
else:
template, search = dataset.get_positive_pair(index)
# get image
template_image = cv2.imread(template[0])#511*511*3
search_image = cv2.imread(search[0])#511*511*3
template_box, tmp_scale_z = self._get_bbox(template_image, template[1][0])
search_box, sea_scale_z = self._get_bbox(search_image, search[1][0])#aligned bbox and scale_z
#poly points
tmp_ori_bbox = template[1][0] # upright
sear_ori_bbox = search[1][0]
template_poly = [tmp_ori_bbox[0], tmp_ori_bbox[1], tmp_ori_bbox[2], tmp_ori_bbox[1], \
tmp_ori_bbox[2], tmp_ori_bbox[3], tmp_ori_bbox[0], tmp_ori_bbox[3]]
search_poly = [sear_ori_bbox[0], sear_ori_bbox[1], sear_ori_bbox[2], sear_ori_bbox[1], \
sear_ori_bbox[2], sear_ori_bbox[3], sear_ori_bbox[0], sear_ori_bbox[3]]
# get tranformation the polygon representation of target from ori to patch
temp_points_trans, temp_poly = self._get_trans_of_poly_from_ori_to_patch(template_poly, template_image.shape[0], tmp_scale_z)
search_points_trans, sear_poly = self._get_trans_of_poly_from_ori_to_patch(search_poly, search_image.shape[0], sea_scale_z)
tmp1 = corner2center(template_box)
tmp2 = corner2center(search_box)
tmp_theta = template[1][1][2]
search_theta = search[1][1][2]
sx = tmp2.w / tmp1.w
sy = tmp2.h / tmp1.h
# augmentation
if if_unsup:
template, box_lp, temp_poly, sim_tmp, init_wh_tmp = self.template_unsup_aug(template_image,
template_box,
temp_poly,
cfg.TRAIN.EXEMPLAR_SIZE,
gray=gray,
theta=tmp_theta)
else:
template, box_lp, temp_poly, sim_tmp, init_wh_tmp = self.template_sup_aug(template_image,
template_box,
temp_poly,
cfg.TRAIN.EXEMPLAR_SIZE,
gray=gray,
theta=tmp_theta)
tmp_box = corner2center(box_lp)
template_lp = getPolarImg(template, (tmp_box.x, tmp_box.y))
if if_unsup:
search, bbox, sear_poly, sim, init_wh = self.search_unsup_aug(search_image,
search_box,
sear_poly,
cfg.TRAIN.SEARCH_SIZE,
# cfg.TRAIN.EXEMPLAR_SIZE,
gray=gray,
theta=search_theta,
if_comp=if_comp,
if_light=if_light,
if_dark=if_dark,
img_add=img_add)#del origin rot casuse we want rot =0 in template
else:
search, bbox, sear_poly, sim, init_wh = self.search_sup_aug(search_image,
search_box,
sear_poly,
cfg.TRAIN.SEARCH_SIZE,
# cfg.TRAIN.EXEMPLAR_SIZE,
gray=gray,
theta=search_theta,
if_comp=if_comp,
if_light=if_light,
if_dark=if_dark,
img_add=img_add)#del origin rot casuse we want rot =0 in template
##to mask the border(black edge) which is not related to the obj,
if neg:
mask_tmp = np.zeros([cfg.TRAIN.EXEMPLAR_SIZE, cfg.TRAIN.EXEMPLAR_SIZE])
mask_search = np.zeros([cfg.TRAIN.SEARCH_SIZE, cfg.TRAIN.SEARCH_SIZE])
if_pos = 0
else:
if_pos = 1
mask_tmp = np.ones([cfg.TRAIN.EXEMPLAR_SIZE, cfg.TRAIN.EXEMPLAR_SIZE])
mask_search = np.ones([cfg.TRAIN.SEARCH_SIZE, cfg.TRAIN.SEARCH_SIZE])
search_rot = sim.rot
delta_theta = sim.rot - sim_tmp.rot
delta_sx = sim.sx / sim_tmp.sx
if delta_theta > (math.pi * 2):
delta_theta -= math.pi * 2
elif delta_theta <= (-math.pi * 2):
delta_theta += math.pi * 2
sim = SimT(sim.x, sim.y, delta_sx, delta_sx, delta_theta)
# checking point
shape = search_image.shape
crop_bbox = center2corner(Center(shape[0]//2, shape[1]//2,
cfg.TRAIN.SEARCH_SIZE-1, cfg.TRAIN.SEARCH_SIZE-1))
crop_bbox_center = corner2center(crop_bbox)
h, w = crop_bbox_center.h, crop_bbox_center.w
if sx > float(shape[0]) / w or sy > float(shape[1]) / h:
continue
self.totalscale = self.totalscale + sim.sx
self.curScale = self.curScale + 1
if sim.sx > self.maxscale:
self.maxscale = sim.sx
#fixme for homo-estimator test
if cfg.TRAIN.MODEL_TYPE == 'SEP':
search = img_shift_crop_w_h(search, 0, 0, cfg.TRAIN.EXEMPLAR_SIZE, cfg.TRAIN.EXEMPLAR_SIZE)
#fixme for e2e
elif cfg.TRAIN.MODEL_TYPE == 'E2E':
search_127 = img_shift_crop_w_h(search, 0, 0, cfg.TRAIN.EXEMPLAR_SIZE, cfg.TRAIN.EXEMPLAR_SIZE)
search, cls, delta, window_map = self.point_target(search, bbox, cfg.TRAIN.OUTPUT_SIZE, search_rot, neg, init_wh)
#for homo-estimator training
if cfg.TRAIN.MODEL_TYPE == 'SEP':
_mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
_std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
template = (template - _mean_I) / _std_I
search = (search - _mean_I) / _std_I
template = template.transpose((2, 0, 1)).astype(np.float32)
search = search.transpose((2, 0, 1)).astype(np.float32)
return {
'template': template,
'template_lp': template_lp,
'search': search,
'template_poly': temp_poly,
'search_poly': sear_poly,
}
#for e2e
elif cfg.TRAIN.MODEL_TYPE == 'E2E':
cls_c, delta_c = self.point_target_c(bbox, cfg.TRAIN.OUTPUT_SIZE, search_rot, neg, init_wh)
cls_lp, delta_lp = self.point_target_lp(sim, bbox, cfg.TRAIN.OUTPUT_SIZE_LP, neg, init_wh)
_mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
_std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
template_hm = (template - _mean_I) / _std_I
search_hm = (search - _mean_I) / _std_I#255*255
template = template.transpose((2, 0, 1)).astype(np.float32)
search = search.transpose((2, 0, 1)).astype(np.float32)
template_lp = template_lp.transpose((2, 0, 1)).astype(np.float32)
template_hm = template_hm.transpose((2, 0, 1)).astype(np.float32)
search_hm = search_hm.transpose((2, 0, 1)).astype(np.float32)
return {
'template': template,
'template_lp': template_lp,
'search': search,
'template_poly': temp_poly,
'search_poly': sear_poly,
'label_cls': cls,
'label_loc': delta,
'label_cls_lp': cls_lp,
'label_loc_lp': delta_lp,
'scale_dist': sim.sx,
'label_cls_c': cls_c,
'label_loc_c': delta_c,
'window_map': window_map,
'template_hm':template_hm,
'search_hm': search_hm,
'template_window': mask_tmp,
'search_window': mask_search,
'if_pos': if_pos,
'temp_cx': sim_tmp.x,
'temp_cy': sim_tmp.y,
'if_unsup': if_unsup
} | 19,251 | 46.535802 | 156 | py |
HDN | HDN-master/hdn/datasets/dataset/dataset.py | #Copyright 2021, XinruiZhan
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torchvision.transforms as transforms
from hdn.datasets.custom_transforms import Normalize, ToTensor
import orjson as json
import logging
import sys
import os
import math
import cv2
import numpy as np
from torch.utils.data import Dataset
from hdn.utils.bbox import center2corner, Center, corner2center, SimT
from hdn.datasets.point_target.point_target import PointTarget, PointTargetLP, PointTargetRot
from hdn.datasets.augmentation.homo_augmentation_e2e import Augmentation
from hdn.core.config import cfg
from hdn.models.logpolar import getPolarImg
import matplotlib.pyplot as plt
logger = logging.getLogger("global")
# matplotlib.use('agg')
# matplotlib.use('TkAgg')
pyv = sys.version[0]
if pyv[0] == '3':
cv2.ocl.setUseOpenCL(False)
class SubDataset(object):
# @profile
def __init__(self, name, root, anno, frame_range, num_use, start_idx, if_unsup=False):
cur_path = os.path.dirname(os.path.realpath(__file__))
print('cur_path',cur_path)
self.name = name
self.root = os.path.join(cur_path, '../../../', root)
self.anno = os.path.join(cur_path, '../../../', anno)
self.frame_range = frame_range
self.num_use = num_use
self.start_idx = start_idx
self.if_unsup = if_unsup
logger.info("loading " + name)
with open(self.anno, 'r') as f:
# meta_data = json.load(f)
meta_data = json.loads(f.read())
meta_data = self._filter_zero(meta_data)
for video in list(meta_data.keys()):
for track in meta_data[video]:
frames = meta_data[video][track]
frames = list(map(int,
filter(lambda x: x.isdigit(), frames.keys())))
frames.sort()
meta_data[video][track]['frames'] = frames
if len(frames) <= 0:
logger.warning("{}/{} has no frames".format(video, track))
del meta_data[video][track]
for video in list(meta_data.keys()):
if len(meta_data[video]) <= 0:
logger.warning("{} has no tracks".format(video))
del meta_data[video]
self.labels = meta_data
self.num = len(self.labels) #video_num
self.num_use = self.num if self.num_use == -1 else self.num_use
self.videos = list(meta_data.keys())
logger.info("{} loaded".format(self.name))
self.path_format = '{}.{}.{}.jpg'
self.pick = self.shuffle()
def _filter_zero(self, meta_data):
meta_data_new = {}
for video, tracks in meta_data.items():
new_tracks = {}
for trk, frames in tracks.items():
new_frames = {}
for frm, bbox in frames.items():
if not isinstance(bbox[0], dict):
if len(bbox[0]) == 4:
x1, y1, x2, y2 = bbox[0]
w, h = x2 - x1, y2 - y1
else:
w, h = bbox[0]
if w <= 1 or h <= 1: # 1 pixel too small to handle
continue
new_frames[frm] = bbox
if len(new_frames) > 0:
new_tracks[trk] = new_frames
if len(new_tracks) > 0:
meta_data_new[video] = new_tracks
return meta_data_new
def log(self):
logger.info("{} start-index {} select [{}/{}] path_format {}".format(
self.name, self.start_idx, self.num_use,
self.num, self.path_format))
def shuffle(self):
print('self.start_idx',self.start_idx,'self.num',self.num, 'self.num_use:',self.num_use)#self.start_idx 0 self.num 24 self.num_use: 100000
lists = list(range(self.start_idx, self.start_idx + self.num))
pick = []
while len(pick) < self.num_use:
np.random.shuffle(lists)
pick += lists
return pick[:self.num_use]
def get_image_anno(self, video, track, frame):
frame = "{:06d}".format(frame)
image_path = os.path.join(self.root, video,
self.path_format.format(frame, track, 'x'))
image_anno = self.labels[video][track][frame]
return image_path, image_anno
def get_positive_pair(self, index):
video_name = self.videos[index]
video = self.labels[video_name]
track = np.random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
template_frame = np.random.randint(0, len(frames))
left = max(template_frame - self.frame_range, 0)
right = min(template_frame + self.frame_range, len(frames)-1) + 1
search_range = frames[left:right]
template_frame = frames[template_frame]
search_frame = np.random.choice(search_range)
return self.get_image_anno(video_name, track, template_frame), \
self.get_image_anno(video_name, track, search_frame)
def get_random_target(self, index=-1):
if index == -1:
index = np.random.randint(0, self.num)
video_name = self.videos[index]
video = self.labels[video_name]
track = np.random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
frame = np.random.choice(frames)
return self.get_image_anno(video_name, track, frame)
def __len__(self):
return self.num
class BANDataset(Dataset):
def __init__(self,):
super(BANDataset, self).__init__()
self.transforms = transforms.Compose([
Normalize(),
ToTensor()
])
self.maxscale = 0.0
self.totalscale = 0.0
self.curScale=0
desired_size = (cfg.TRAIN.SEARCH_SIZE - cfg.TRAIN.EXEMPLAR_SIZE) / \
cfg.POINT.STRIDE + 1 + cfg.TRAIN.BASE_SIZE
if desired_size != cfg.TRAIN.OUTPUT_SIZE:
raise Exception('size not match!')
# create point target
self.point_target = PointTarget()
self.point_target_lp = PointTargetLP()
self.point_target_c = PointTargetRot()
# create sub dataset
self.all_dataset = []
start = 0
self.num = 0
for name in cfg.DATASET.NAMES:
subdata_cfg = getattr(cfg.DATASET, name)
sub_dataset = SubDataset(
name,
subdata_cfg.ROOT,
subdata_cfg.ANNO,
subdata_cfg.FRAME_RANGE,
subdata_cfg.NUM_USE,
start
)
start += sub_dataset.num
self.num += sub_dataset.num_use
sub_dataset.log()
self.all_dataset.append(sub_dataset)
# data augmentation
self.template_aug = Augmentation(
cfg.DATASET.TEMPLATE.SHIFT,
cfg.DATASET.TEMPLATE.SCALE,
cfg.DATASET.TEMPLATE.BLUR,
cfg.DATASET.TEMPLATE.FLIP,
cfg.DATASET.TEMPLATE.COLOR,
cfg.DATASET.TEMPLATE.ROTATION
)
self.search_aug = Augmentation(
cfg.DATASET.SEARCH.SHIFT,
cfg.DATASET.SEARCH.SCALE,
cfg.DATASET.SEARCH.BLUR,
cfg.DATASET.SEARCH.FLIP,
cfg.DATASET.SEARCH.COLOR,
cfg.DATASET.SEARCH.ROTATION
)
videos_per_epoch = cfg.DATASET.VIDEOS_PER_EPOCH
print('videos_per_epoch',videos_per_epoch)
self.num = videos_per_epoch if videos_per_epoch > 0 else self.num
self.num *= cfg.TRAIN.EPOCH
self.pick = self.shuffle()
def shuffle(self):
pick = []
m = 0
while m < self.num:
p = []
for sub_dataset in self.all_dataset:
sub_p = sub_dataset.pick
p += sub_p
np.random.shuffle(p)
pick += p
m = len(pick)
logger.info("shuffle done!")
logger.info("dataset length {}".format(self.num))
return pick[:self.num]
def _find_dataset(self, index):
for dataset in self.all_dataset:
if dataset.start_idx + dataset.num > index:
return dataset, index - dataset.start_idx
def _get_bbox(self, image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2]-shape[0], shape[3]-shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = cfg.TRAIN.EXEMPLAR_SIZE
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w*scale_z
h = h*scale_z
cx, cy = imw//2, imh//2
bbox = center2corner(Center(cx, cy, w, h))
return bbox, scale_z
def __len__(self):
return self.num
def __getitem__(self, index):
while True:
index = self.pick[index]
dataset, index = self._find_dataset(index)
gray = cfg.DATASET.GRAY and cfg.DATASET.GRAY > np.random.random()
neg = cfg.DATASET.NEG and cfg.DATASET.NEG > np.random.random()
same = cfg.DATASET.SAME and cfg.DATASET.SAME > np.random.random()
if neg:
template = dataset.get_random_target(index)
search = np.random.choice(self.all_dataset).get_random_target()
else:
template, search = dataset.get_positive_pair(index)
# get image
template_image = cv2.imread(template[0])#511*511*3
search_image = cv2.imread(search[0])#511*511*3
template_box, tmp_scale_z = self._get_bbox(template_image, template[1][0])
search_box, sea_scale_z = self._get_bbox(search_image, search[1][0]) #aligned bbox and scale_z
tmp1 = corner2center(template_box)
tmp2 = corner2center(search_box)
tmp_theta = template[1][1][2]
search_theta = search[1][1][2]
#in order to correct the angle
if not neg:
if abs(search_theta - tmp_theta) > 0.8 :
if abs(search_theta - tmp_theta) < 2:
if search_theta > tmp_theta: # eg: square
search_theta -= math.pi / 2
else:
tmp_theta -= math.pi / 2
else:
if search_theta > tmp_theta: # normal
search_theta -= math.pi
else:
tmp_theta -= math.pi
# get rot_rect ratio and theata, theata is hard to obtain
template_rot_rect = template[1][1]
search_rot_rect = search[1][1]
ori_tmp_w = template_rot_rect[0]
ori_tmp_h = template_rot_rect[1]
ori_sea_w = search_rot_rect[0]
ori_sea_h = search_rot_rect[1]
sx_rot = ori_sea_w / ori_tmp_w * (sea_scale_z / tmp_scale_z)
sy_rot = ori_sea_h / ori_tmp_h * (sea_scale_z / tmp_scale_z)
sx = tmp2.w / tmp1.w
sy = tmp2.h / tmp1.h
# augmentation
template, box_lp, sim_tmp, img_sx1, img_sy1, _ = self.template_aug(template_image,
template_box,
cfg.TRAIN.EXEMPLAR_SIZE,
gray=gray,
theta=tmp_theta)
tmp_box = corner2center(box_lp)
template_lp = getPolarImg(template, (tmp_box.x, tmp_box.y))
search, bbox, sim, img_sx2, img_sy2, init_wh = self.search_aug(search_image,
search_box,
cfg.TRAIN.SEARCH_SIZE,
gray=gray,
same=same,
sx=sx, sy=sy,
sx_rot=sx_rot,
sy_rot=sy_rot,
theta=search_theta)#del origin rot casuse we want rot =0 in template
sx_rot = sx_rot * (img_sx2 / img_sx1)
sy_rot = sy_rot * (img_sy2 / img_sy1)
search_rot = sim.rot
delta_theta = sim.rot - sim_tmp.rot
if delta_theta > (math.pi * 2):
delta_theta -= math.pi * 2
elif delta_theta <= (-math.pi * 2):
delta_theta += math.pi * 2
sim = SimT(sim.x,sim.y, sx_rot, sy_rot, delta_theta)
# checking point
shape = search_image.shape
crop_bbox = center2corner(Center(shape[0]//2, shape[1]//2,
cfg.TRAIN.SEARCH_SIZE-1, cfg.TRAIN.SEARCH_SIZE-1))
crop_bbox_center = corner2center(crop_bbox)
h, w = crop_bbox_center.h, crop_bbox_center.w
if sx > float(shape[0]) / w or sy > float(shape[1]) / h:
continue
self.totalscale = self.totalscale + sim.sx
self.curScale = self.curScale + 1
if sim.sx > self.maxscale:
self.maxscale = sim.sx
search, cls, delta, window_map = self.point_target(search, bbox, cfg.TRAIN.OUTPUT_SIZE, search_rot, neg, init_wh)
cls_c, delta_c = self.point_target_c(bbox, cfg.TRAIN.OUTPUT_SIZE, search_rot, neg, init_wh)
cls_lp, delta_lp = self.point_target_lp(sim, bbox, cfg.TRAIN.OUTPUT_SIZE_LP, neg, init_wh)
template = template.transpose((2, 0, 1)).astype(np.float32)
search = search.transpose((2, 0, 1)).astype(np.float32)
template_lp = template_lp.transpose((2, 0, 1)).astype(np.float32)
return {
'template': template,
'template_lp': template_lp,
'search': search,
'label_cls': cls,
'label_loc': delta,
'label_cls_lp': cls_lp,
'label_loc_lp': delta_lp,
'scale_dist': sim.sx,
'label_cls_c': cls_c,
'label_loc_c': delta_c,
'window_map': window_map
}
| 15,007 | 39.128342 | 146 | py |
HDN | HDN-master/hdn/utils/lr_scheduler.py | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import numpy as np
from torch.optim.lr_scheduler import _LRScheduler
from hdn.core.config import cfg
class LRScheduler(_LRScheduler):
def __init__(self, optimizer, last_epoch=-1):
if 'lr_spaces' not in self.__dict__:
raise Exception('lr_spaces must be set in "LRSchduler"')
super(LRScheduler, self).__init__(optimizer, last_epoch)
def get_cur_lr(self):
return self.lr_spaces[self.last_epoch]
def get_lr(self):
epoch = self.last_epoch
# print('last_epoch',self.last_epoch,'lr',[self.lr_spaces[epoch] * pg['initial_lr'] / self.start_lr
# for pg in self.optimizer.param_groups])
return [self.lr_spaces[epoch] * pg['initial_lr'] / self.start_lr
for pg in self.optimizer.param_groups]
def __repr__(self):
return "({}) lr spaces: \n{}".format(self.__class__.__name__,
self.lr_spaces)
class LogScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.03, end_lr=5e-4,
epochs=50, last_epoch=-1, **kwargs):
self.start_lr = start_lr
self.end_lr = end_lr
self.epochs = epochs
self.lr_spaces = np.logspace(math.log10(start_lr),
math.log10(end_lr),
epochs)
super(LogScheduler, self).__init__(optimizer, last_epoch)
# print('self.start_lr',self.start_lr,'self.end_lr',self.end_lr)
class StepScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.01, end_lr=None,
step=10, mult=0.1, epochs=50, last_epoch=-1, **kwargs):
if end_lr is not None:
if start_lr is None:
start_lr = end_lr / (mult ** (epochs // step))
else: # for warm up policy
mult = math.pow(end_lr/start_lr, 1. / (epochs // step))
self.start_lr = start_lr
self.lr_spaces = self.start_lr * (mult**(np.arange(epochs) // step))
self.mult = mult
self._step = step
super(StepScheduler, self).__init__(optimizer, last_epoch)
class MultiStepScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.01, end_lr=None,
steps=[10, 20, 30, 40], mult=0.5, epochs=50,
last_epoch=-1, **kwargs):
if end_lr is not None:
if start_lr is None:
start_lr = end_lr / (mult ** (len(steps)))
else:
mult = math.pow(end_lr/start_lr, 1. / len(steps))
self.start_lr = start_lr
self.lr_spaces = self._build_lr(start_lr, steps, mult, epochs)
self.mult = mult
self.steps = steps
super(MultiStepScheduler, self).__init__(optimizer, last_epoch)
def _build_lr(self, start_lr, steps, mult, epochs):
lr = [0] * epochs
lr[0] = start_lr
for i in range(1, epochs):
lr[i] = lr[i-1]
if i in steps:
lr[i] *= mult
return np.array(lr, dtype=np.float32)
class LinearStepScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.01, end_lr=0.005,
epochs=50, last_epoch=-1, **kwargs):
self.start_lr = start_lr
self.end_lr = end_lr
self.lr_spaces = np.linspace(start_lr, end_lr, epochs)
super(LinearStepScheduler, self).__init__(optimizer, last_epoch)
class CosStepScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.01, end_lr=0.005,
epochs=50, last_epoch=-1, **kwargs):
self.start_lr = start_lr
self.end_lr = end_lr
self.lr_spaces = self._build_lr(start_lr, end_lr, epochs)
super(CosStepScheduler, self).__init__(optimizer, last_epoch)
def _build_lr(self, start_lr, end_lr, epochs):
index = np.arange(epochs).astype(np.float32)
lr = end_lr + (start_lr - end_lr) * \
(1. + np.cos(index * np.pi / epochs)) * 0.5
return lr.astype(np.float32)
class WarmUPScheduler(LRScheduler):
def __init__(self, optimizer, warmup, normal, epochs=50, last_epoch=-1):
warmup = warmup.lr_spaces # [::-1]
normal = normal.lr_spaces
self.lr_spaces = np.concatenate([warmup, normal])
self.start_lr = normal[0]
super(WarmUPScheduler, self).__init__(optimizer, last_epoch)
LRs = {
'log': LogScheduler,
'step': StepScheduler,
'multi-step': MultiStepScheduler,
'linear': LinearStepScheduler,
'cos': CosStepScheduler}
def _build_lr_scheduler(optimizer, config, epochs=50, last_epoch=-1):
return LRs[config.TYPE](optimizer, last_epoch=last_epoch,
epochs=epochs, **config.KWARGS)
def _build_warm_up_scheduler(optimizer, epochs=50, last_epoch=-1):
warmup_epoch = cfg.TRAIN.LR_WARMUP.EPOCH
# print('warmup_epoch:', warmup_epoch)
# print('last_epoch', last_epoch)
sc1 = _build_lr_scheduler(optimizer, cfg.TRAIN.LR_WARMUP,
warmup_epoch, last_epoch)
sc2 = _build_lr_scheduler(optimizer, cfg.TRAIN.LR,
epochs - warmup_epoch, last_epoch)
return WarmUPScheduler(optimizer, sc1, sc2, epochs, last_epoch)
def build_lr_scheduler(optimizer, epochs=50, last_epoch=-1):
if cfg.TRAIN.LR_WARMUP.WARMUP:
# print('use_warmup')
return _build_warm_up_scheduler(optimizer, epochs, last_epoch)
else:
# print('not_use warmup')
return _build_lr_scheduler(optimizer, cfg.TRAIN.LR,
epochs, last_epoch)
if __name__ == '__main__':
import torch.nn as nn
from torch.optim import SGD
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(10, 10, kernel_size=3)
net = Net().parameters()
optimizer = SGD(net, lr=0.01)
# test1
step = {
'type': 'step',
'start_lr': 0.01,
'step': 10,
'mult': 0.1
}
lr = build_lr_scheduler(optimizer, step)
print(lr)
log = {
'type': 'log',
'start_lr': 0.03,
'end_lr': 5e-4,
}
lr = build_lr_scheduler(optimizer, log)
print(lr)
log = {
'type': 'multi-step',
"start_lr": 0.01,
"mult": 0.1,
"steps": [10, 15, 20]
}
lr = build_lr_scheduler(optimizer, log)
print(lr)
cos = {
"type": 'cos',
'start_lr': 0.01,
'end_lr': 0.0005,
}
lr = build_lr_scheduler(optimizer, cos)
print(lr)
step = {
'type': 'step',
'start_lr': 0.001,
'end_lr': 0.03,
'step': 1,
}
warmup = log.copy()
warmup['warmup'] = step
warmup['warmup']['epoch'] = 5
lr = build_lr_scheduler(optimizer, warmup, epochs=55)
print(lr)
lr.step()
print(lr.last_epoch)
lr.step(5)
print(lr.last_epoch)
| 7,253 | 31.097345 | 107 | py |
HDN | HDN-master/hdn/utils/model_load.py | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import torch
from memory_profiler import profile
logger = logging.getLogger('global')
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
# filter 'num_batches_tracked'
missing_keys = [x for x in missing_keys
if not x.endswith('num_batches_tracked')]
if len(missing_keys) > 0:
logger.info('[Warning] missing keys: {}'.format(missing_keys))
logger.info('missing keys:{}'.format(len(missing_keys)))
if len(unused_pretrained_keys) > 0:
logger.info('[Warning] unused_pretrained_keys: {}'.format(
unused_pretrained_keys))
logger.info('unused checkpoint keys:{}'.format(
len(unused_pretrained_keys)))
logger.info('used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, \
'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters
share common prefix 'module.' '''
logger.info('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
# @profile # no memory increment
def load_pretrain(model, pretrained_path):
logger.info('load pretrained model from {}'.format(pretrained_path))
device = torch.cuda.current_device()
print('device',device)
pretrained_dict = torch.load(pretrained_path,
map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'],
'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
try:
check_keys(model, pretrained_dict)
except:
logger.info('[Warning]: using pretrain as features.\
Adding "features." as prefix')
new_dict = {}
for k, v in pretrained_dict.items():
k = 'features.' + k
new_dict[k] = v
pretrained_dict = new_dict
check_keys(model, pretrained_dict)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if 'rf' not in k}
pretrained_dict = {k: v for k, v in pretrained_dict.items() if 'rf_lp' not in k}
pretrained_dict.update(pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
# @profile
def restore_from(model, optimizer, ckpt_path):
device = torch.cuda.current_device()
ckpt = torch.load(ckpt_path,
map_location=lambda storage, loc: storage.cuda(device))
epoch = 0#ckpt['epoch']
model_state_dict = model.state_dict()
ckpt_model_dict = remove_prefix(ckpt['state_dict'], 'module.')
# ckpt_model_dict = remove_parameter(ckpt_model_dict, 'head_lp.')
check_keys(model, ckpt_model_dict)
# model.load_state_dict(ckpt_model_dict, strict=False)
# 1. filter out unnecessary keys
pretrained_dict = ckpt_model_dict
# pretrained_dict = {k: v for k, v in cy"kpt_model_dict.items() if '_lp.' not in k}
# pretrained_dict = {k: v for k, v in ckpt_model_dict.items() if 'head' not in k}
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if ('head.' != k[:5] and 'neck.'!=k[:5])}
# pretrained_dict = {k: v for k, v in ckpt_model_dict.items()}
print('pretrain_dict',pretrained_dict.keys())
model_state_dict.update(pretrained_dict)
# 3. load the new state dict
# model.load_state_dict(ckpt_model_dict)
model.load_state_dict(model_state_dict)
# check_keys(optimizer, ckpt['optimizer'])
# optimizer.load_state_dict(ckpt['optimizer'])
return model, optimizer, epoch
| 4,183 | 36.026549 | 108 | py |
HDN | HDN-master/hdn/utils/point.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch
"""
cpu version
"""
#generate grid for NM
def generate_points(stride, size):
ori = - (size // 2) * stride # -96
x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)],
[ori + stride * dy for dy in np.arange(0, size)])
points = np.zeros((size * size, 2), dtype=np.float32)
points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten()
return points
#generate grid for LP
def generate_points_lp(stride_w, stride_h, size):
ori_x = - (size // 2) * stride_w # -96
ori_y = - (size // 2) * stride_h # -96
x, y = np.meshgrid([ori_x + stride_w * dx for dx in np.arange(0, size)],
[ori_y + stride_h * dy for dy in np.arange(0, size)])
points = np.zeros((size * size, 2), dtype=np.float32)
points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten()
return points
"""
gpu version
"""
def lp_pick(cls, loc, CLS_OUT_CHANNELS, STRIDE, STRIDE_LP, OUTPUT_SIZE_LP, MAP_SIZE):
sizes = cls.size()#[batch_size, 2, 13, 13]
batch = sizes[0]
score = cls.view(batch, CLS_OUT_CHANNELS, -1).permute(0, 2, 1)#[batch_size,13*13,2]
best_idx = torch.argmax(score.softmax(2)[:, :, 1], 1)#batch_size
idx = best_idx.unsqueeze(1).unsqueeze(2)
delta = loc.view(batch, 4, -1).permute(0, 2, 1)#torch.Size([8, 4, 13, 13])-> torch.Size([8, 13*13, 4])
dummy = idx.expand(batch, 1, delta.size(2)) ##torch.Size([batch_size, 1, 4])
points = generate_points(STRIDE, OUTPUT_SIZE_LP)
point = torch.from_numpy(points).cuda()
point = point.expand(batch, point.size(0), point.size(1))#torch.Size([batch_size, 625, 2])
delta = torch.gather(delta, 1, dummy).squeeze(1)
point = torch.gather(point, 1, dummy[:, :, 0:2]).squeeze(1)
scale = point[:, 0] - delta[:, 0] * STRIDE_LP
rot = point[:, 1] - delta[:, 2] * STRIDE_LP
rot = rot * (2 * np.pi / MAP_SIZE)
mag = np.log(MAP_SIZE / 2) / MAP_SIZE
scale = torch.exp(scale * mag)
return scale, rot
def get_center(self, score_o, loc_c_o, label_c_o, points, REFINE_CLS_POS_THRESH):
"""
:param score_o: score [28 25 25 2]
:param loc_c_o: loc_c [28 2 25 25]
:param label_c_o: label_c[28 2 25 25]
:param points:
:return:
"""
score = score_o.clone().detach().permute(0,3,1,2)
loc_c = loc_c_o.clone().detach()
label_c = label_c_o.clone().detach()
score = self._convert_score(score)#(28, 625)
pred_c = self._convert_c(loc_c, points)#(28, 2, 625)
label_c = self._convert_c(label_c,points) # print('score',score)
best_idx = score.argmax(1) #28
tmp_idx = range(0,best_idx.shape[0])
cls_pos = score[tmp_idx, best_idx].gt(REFINE_CLS_POS_THRESH).nonzero().squeeze()
pred_c = pred_c[tmp_idx, :, best_idx] #(28, 2)
label_c = label_c[tmp_idx, :, best_idx]#(28, 2)
best_idx = best_idx[cls_pos]
pred_c = pred_c[cls_pos]
label_c = label_c[cls_pos]
return best_idx, pred_c, label_c, cls_pos
class Point:
"""
This class generate points.
"""
def __init__(self, stride, size, image_center):
self.stride = stride
self.size = size
self.image_center = image_center
self.points = self.generate_points(self.stride, self.size, self.image_center)
def generate_points(self, stride, size, im_c):
ori = im_c - size // 2 * stride
x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)],
[ori + stride * dy for dy in np.arange(0, size)])
points = np.zeros((2, size, size), dtype=np.float32)
points[0, :, :], points[1, :, :] = x.astype(np.float32), y.astype(np.float32)
return points
| 3,894 | 37.186275 | 106 | py |
HDN | HDN-master/hdn/utils/homo_utils.py | import torch
import numpy as np
import cv2
def DLT_solve(src_p, off_set):
# src_p: shape=(bs, n, 4, 2)
# off_set: shape=(bs, n, 4, 2)
# can be used to compute mesh points (multi-H)
bs, _ = src_p.shape
divide = int(np.sqrt(len(src_p[0]) / 2) - 1)
row_num = (divide + 1) * 2
for i in range(divide):
for j in range(divide):
h4p = src_p[:, [2 * j + row_num * i, 2 * j + row_num * i + 1,
2 * (j + 1) + row_num * i, 2 * (j + 1) + row_num * i + 1,
2 * (j + 1) + row_num * i + row_num, 2 * (j + 1) + row_num * i + row_num + 1,
2 * j + row_num * i + row_num, 2 * j + row_num * i + row_num + 1]].reshape(bs, 1, 4, 2)
pred_h4p = off_set[:, [2 * j + row_num * i, 2 * j + row_num * i + 1,
2 * (j + 1) + row_num * i, 2 * (j + 1) + row_num * i + 1,
2 * (j + 1) + row_num * i + row_num, 2 * (j + 1) + row_num * i + row_num + 1,
2 * j + row_num * i + row_num, 2 * j + row_num * i + row_num + 1]].reshape(bs, 1, 4,
2)
if i + j == 0:
src_ps = h4p
off_sets = pred_h4p
else:
src_ps = torch.cat((src_ps, h4p), axis=1)
off_sets = torch.cat((off_sets, pred_h4p), axis=1)
bs, n, h, w = src_ps.shape
N = bs * n
src_ps = src_ps.reshape(N, h, w)
off_sets = off_sets.reshape(N, h, w)
dst_p = src_ps + off_sets
ones = torch.ones(N, 4, 1)
if torch.cuda.is_available():
ones = ones.cuda()
xy1 = torch.cat((src_ps, ones), 2)
zeros = torch.zeros_like(xy1)
if torch.cuda.is_available():
zeros = zeros.cuda()
xyu, xyd = torch.cat((xy1, zeros), 2), torch.cat((zeros, xy1), 2)
M1 = torch.cat((xyu, xyd), 2).reshape(N, -1, 6)
M2 = torch.matmul(
dst_p.reshape(-1, 2, 1),
src_ps.reshape(-1, 1, 2),
).reshape(N, -1, 2)
A = torch.cat((M1, -M2), 2)
b = dst_p.reshape(N, -1, 1)
Ainv = torch.inverse(A)
h8 = torch.matmul(Ainv, b).reshape(N, 8)
H = torch.cat((h8, ones[:, 0, :]), 1).reshape(N, 3, 3)
H = H.reshape(bs, n, 3, 3)
return H
def transformer(U, theta, out_size, **kwargs):
"""Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Based on [2]_ and edited by David Dao for Tensorflow.
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
theta: float
The output of the
localisation network should be [num_batch, 6].
out_size: tuple of two ints
The size of the output of the network (height, width)
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
Notes
-----
To initialize the network to the identity transform init
``theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
"""
def _repeat(x, n_repeats):
rep = torch.ones([n_repeats, ]).unsqueeze(0)
rep = rep.int()
x = x.int()
x = torch.matmul(x.reshape([-1, 1]), rep)
return x.reshape([-1])
def _interpolate(im, x, y, out_size, scale_h):
num_batch, num_channels, height, width = im.size()
height_f = height
width_f = width
out_height, out_width = out_size[0], out_size[1]
zero = 0
max_y = height - 1
max_x = width - 1
if scale_h:
x = (x + 1.0) * (width_f) / 2.0
y = (y + 1.0) * (height_f) / 2.0
# do sampling
x0 = torch.floor(x).int()
x1 = x0 + 1
y0 = torch.floor(y).int()
y1 = y0 + 1
x0 = torch.clamp(x0, zero, max_x)
x1 = torch.clamp(x1, zero, max_x)
y0 = torch.clamp(y0, zero, max_y)
y1 = torch.clamp(y1, zero, max_y)
dim2 = torch.from_numpy(np.array(width))
dim1 = torch.from_numpy(np.array(width * height))
base = _repeat(torch.arange(0, num_batch) * dim1, out_height * out_width)
if torch.cuda.is_available():
dim2 = dim2.cuda()
dim1 = dim1.cuda()
y0 = y0.cuda()
y1 = y1.cuda()
x0 = x0.cuda()
x1 = x1.cuda()
base = base.cuda()
base_y0 = base + y0 * dim2
base_y1 = base + y1 * dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# channels dim
im = im.permute(0, 2, 3, 1)
im_flat = im.reshape([-1, num_channels]).float()
idx_a = idx_a.unsqueeze(-1).long()
idx_a = idx_a.expand(height * width * num_batch, num_channels)
Ia = torch.gather(im_flat, 0, idx_a)
idx_b = idx_b.unsqueeze(-1).long()
idx_b = idx_b.expand(height * width * num_batch, num_channels)
Ib = torch.gather(im_flat, 0, idx_b)
idx_c = idx_c.unsqueeze(-1).long()
idx_c = idx_c.expand(height * width * num_batch, num_channels)
Ic = torch.gather(im_flat, 0, idx_c)
idx_d = idx_d.unsqueeze(-1).long()
idx_d = idx_d.expand(height * width * num_batch, num_channels)
Id = torch.gather(im_flat, 0, idx_d)
x0_f = x0.float()
x1_f = x1.float()
y0_f = y0.float()
y1_f = y1.float()
wa = torch.unsqueeze(((x1_f - x) * (y1_f - y)), 1)
wb = torch.unsqueeze(((x1_f - x) * (y - y0_f)), 1)
wc = torch.unsqueeze(((x - x0_f) * (y1_f - y)), 1)
wd = torch.unsqueeze(((x - x0_f) * (y - y0_f)), 1)
output = wa * Ia + wb * Ib + wc * Ic + wd * Id
return output
def _meshgrid(height, width, scale_h):
if scale_h:
x_t = torch.matmul(torch.ones([height, 1]),
torch.transpose(torch.unsqueeze(torch.linspace(-1.0, 1.0, width), 1), 1, 0))
y_t = torch.matmul(torch.unsqueeze(torch.linspace(-1.0, 1.0, height), 1),
torch.ones([1, width]))
else:
x_t = torch.matmul(torch.ones([height, 1]),
torch.transpose(torch.unsqueeze(torch.linspace(0.0, width.float(), width), 1), 1, 0))
y_t = torch.matmul(torch.unsqueeze(torch.linspace(0.0, height.float(), height), 1),
torch.ones([1, width]))
x_t_flat = x_t.reshape((1, -1)).float()
y_t_flat = y_t.reshape((1, -1)).float()
ones = torch.ones_like(x_t_flat)
grid = torch.cat([x_t_flat, y_t_flat, ones], 0)
if torch.cuda.is_available():
grid = grid.cuda()
return grid
def _transform(theta, input_dim, out_size, scale_h):
num_batch, num_channels, height, width = input_dim.size()
# Changed
theta = theta.reshape([-1, 3, 3]).float()
out_height, out_width = out_size[0], out_size[1]
grid = _meshgrid(out_height, out_width, scale_h)
grid = grid.unsqueeze(0).reshape([1, -1])
shape = grid.size()
grid = grid.expand(num_batch, shape[1])
grid = grid.reshape([num_batch, 3, -1])
T_g = torch.matmul(theta, grid)
x_s = T_g[:, 0, :]
y_s = T_g[:, 1, :]
t_s = T_g[:, 2, :]
t_s_flat = t_s.reshape([-1])
# smaller
small = 1e-7
smallers = 1e-6 * (1.0 - torch.ge(torch.abs(t_s_flat), small).float())
t_s_flat = t_s_flat + smallers
condition = torch.sum(torch.gt(torch.abs(t_s_flat), small).float())
# Ty changed
x_s_flat = x_s.reshape([-1]) / t_s_flat
y_s_flat = y_s.reshape([-1]) / t_s_flat
input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size, scale_h)
output = input_transformed.reshape([num_batch, out_height, out_width, num_channels])
return output, condition
img_w = U.size()[2]
img_h = U.size()[1]
scale_h = True
output, condition = _transform(theta, U, out_size, scale_h)
return output, condition
def transform(patch_size_h, patch_size_w, M_tile_inv, H_mat, M_tile, I1, patch_indices, batch_indices_tensor):
# Transform H_mat since we scale image indices in transformer
batch_size, num_channels, img_h, img_w = I1.size()
if torch.cuda.is_available():
M_tile_inv = M_tile_inv.cuda()
H_mat = torch.matmul(torch.matmul(M_tile_inv, H_mat), M_tile)
# Transform image 1 (large image) to image 2
out_size = (img_h, img_w)
warped_images, _ = transformer(I1, H_mat, out_size)
warped_images_flat = warped_images.reshape([-1, num_channels])
patch_indices_flat = patch_indices.reshape([-1])
pixel_indices = patch_indices_flat.long() + batch_indices_tensor
pixel_indices = pixel_indices.unsqueeze(-1).long()
pixel_indices = pixel_indices.expand(patch_size_h * patch_size_w * batch_size, num_channels)
pred_I2_flat = torch.gather(warped_images_flat, 0, pixel_indices)
pred_I2 = pred_I2_flat.reshape([batch_size, patch_size_h, patch_size_w, num_channels])
return pred_I2.permute(0, 3, 1, 2)
def getBatchHLoss(H, H_inv):
batch_size = H.size()[0]
Identity = torch.eye(3)
if torch.cuda.is_available():
Identity = Identity.cuda()
Identity = Identity.unsqueeze(0).expand(batch_size, 3, 3)
return criterion_l2(H.bmm(H_inv), Identity)
def display_using_tensorboard(I, I2_ori_img, I2, pred_I2, I2_dataMat_CnnFeature, pred_I2_dataMat_CnnFeature, triMask,
loss_map, writer):
I1_ori_img = cv2.normalize(I.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
I2_ori_img_ = cv2.normalize(I2_ori_img.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
input_I2 = cv2.normalize(I2.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
pred_I2 = cv2.normalize(pred_I2.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
I2_channel_1 = cv2.normalize(I2_dataMat_CnnFeature.cpu().detach().numpy()[0, 0, ...], None, 0, 255,
cv2.NORM_MINMAX, cv2.CV_8U)
pred_I2_channel_1 = cv2.normalize(pred_I2_dataMat_CnnFeature.cpu().detach().numpy()[0, 0, ...], None, 0,
255, cv2.NORM_MINMAX, cv2.CV_8U)
mask_1 = cv2.normalize(triMask.cpu().detach().numpy()[0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
loss_fig = cv2.normalize(loss_map.cpu().detach().numpy()[0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
writer.add_image('I1 and I2',
I1_ori_img,
global_step=1,
dataformats='HW')
writer.add_image('I1 and I2',
I2_ori_img_,
global_step=2,
dataformats='HW')
writer.add_image('I2 and pred_I2',
input_I2,
global_step=1,
dataformats='HW')
writer.add_image('I2 and pred_I2',
pred_I2,
global_step=2,
dataformats='HW')
writer.add_image('I2 and pred I2 feature_1',
I2_channel_1,
global_step=1,
dataformats='HW')
writer.add_image('I2 and pred I2 feature_1',
pred_I2_channel_1,
global_step=2,
dataformats='HW')
writer.add_image('loss_map and mask',
loss_fig,
global_step=1,
dataformats='HW')
writer.add_image('loss_map and mask',
mask_1,
global_step=2,
dataformats='HW')
| 12,410 | 35.183673 | 119 | py |
HDN | HDN-master/hdn/utils/transform.py | #Copyright 2021, XinruiZhan
import cv2
import matplotlib.pyplot as plt
import math
import numpy as np
import torch
def img_padding(img, sx, sy):
"""
add padding to an image [w,h] => [w+sx*2, h+sy*2]
:param img:
:param sx:
:param sy:
:return:
"""
padd_w = img.shape[1] + sx*2
padd_h = img.shape[0] + sy*2
mapping_shift = np.array([[1, 0, sx],
[0, 1, sy],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
sh_img = cv2.warpAffine(img, project @ mapping_shift, (padd_h, padd_w), \
flags=2, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0))
return sh_img
def img_shift(img, sx, sy):
"""
shifting the image
:param img:
:param sx: shift x
:param sy: shift y
:return:
"""
mapping_shift = np.array([[1, 0, sx],
[0, 1, sy],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
sh_img = cv2.warpAffine(img, project @ mapping_shift, (img.shape[0], img.shape[1]), \
flags=2, borderMode=cv2.BORDER_REPLICATE)
return sh_img
def img_shift_crop_w_h(img, sx, sy, nw, nh):
"""
template obj center is placed on image center.
use [nw,nh] to corp the image, obj still on the center.
:param img:
:param sx:
:param sy:
:param nw:
:param nh:
:return:
"""
crop_shift_x = sx - img.shape[1]/2 + nh/2
crop_shift_y = sy - img.shape[0]/2 + nw/2
mapping_shift = np.array([[1, 0, crop_shift_x],
[0, 1, crop_shift_y],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
sh_img = cv2.warpAffine(img, project @ mapping_shift, (nw, nh), \
flags=2, borderMode=cv2.BORDER_REPLICATE)
return sh_img
def img_rot_around_center(img, cx, cy, w, h, rot):
"""
rotate the image(center as the pivot)
:param img:
:param cx:
:param cy:
:param w:
:param h:
:param rot:
:return:
"""
aa = cx
bb = cy # rot center
cc = math.cos(rot)
ss = math.sin(rot)
"""
crop image with rot
we first do the rotation before the original _crop_hwc
[ cos(c), -sin(c), a - a*cos(c) + b*sin(c)] [ 1, 0, a][ cos(c), -sin(c), 0][ 1, 0, -a]
[ sin(c), cos(c), b - b*cos(c) - a*sin(c)] = [ 0, 1, b][ sin(c), cos(c), 0][ 0, 1, -b]
[ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1]
"""
mapping_rot = np.array([[cc, -ss, aa - aa * cc + bb * ss],
[ss, cc, bb - bb * cc - aa * ss],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
rot_img = cv2.warpAffine(img, project @ mapping_rot, (w, h),
flags=2,borderMode=cv2.BORDER_REPLICATE)
return rot_img
def img_rot_scale_around_center(img, cx, cy, w, h, rot, scale):
"""
rotate and scale the image(center as the pivot)
:param img:
:param cx:
:param cy:
:param w:
:param h:
:param rot:
:param scale:
:return:
"""
a = scale #isometry
b = scale
c = cx*(1-scale)
d = cy*(1-scale)
mapping = np.array([[a, 0, c],
[0, b, d],
[0, 0, 1]]).astype(np.float)
aa = cx
bb = cy # rot center
cc = math.cos(rot)
ss = math.sin(rot)
"""
crop image with rot
we first do the rotation before the original _crop_hwc
[ cos(c), -sin(c), a - a*cos(c) + b*sin(c)] [ 1, 0, a][ cos(c), -sin(c), 0][ 1, 0, -a]
[ sin(c), cos(c), b - b*cos(c) - a*sin(c)] = [ 0, 1, b][ sin(c), cos(c), 0][ 0, 1, -b]
[ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1]
"""
mapping_rot = np.array([[cc, -ss, aa - aa * cc + bb * ss],
[ss, cc, bb - bb * cc - aa * ss],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
rot_img = cv2.warpAffine(img, project @ mapping_rot@mapping, (w, h),
borderMode=cv2.BORDER_REPLICATE)
return rot_img
def img_shift_left_top_2_center(img):
# shift the img left_top point to center
center = [(img.shape[1]-1)/2, (img.shape[0]-1)//2]
mapping_shift = np.array([[1, 0, center[0]],
[0, 1, center[1]],
[0, 0, 1]]).astype(np.float)
#
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
rot_img = cv2.warpAffine(img, project @ mapping_shift, (img.shape[0],img.shape[1]),
)
return rot_img
def get_hamming_window(w, h, rot, sx, sy, out_size_w, out_size_h):
"""
A hamming window map
:param w: init ellipse w
:param h: init ellipse h
:param rot: rotation angle(rad)
:param sx: center x coordinate of the window
:param sy: center y-coordinate of the window
:param out_size_w: output window size width
:param out_size_h: output window size height
:return:
"""
alpha = 1
w = math.floor(w * alpha)
h = math.floor(h * alpha)
ham_window = np.outer(np.hamming(h), np.hamming(w))
sx -= w / 2
sy -= h / 2
aa = w / 2
bb = h / 2 # rot center
cc = math.cos(rot)
ss = math.sin(rot)
# """
# we first do the rotation before the original _crop_hwc
# [ cos(c), -sin(c), a - a*cos(c) + b*sin(c)] [ 1, 0, a][ cos(c), -sin(c), 0][ 1, 0, -a]
# [ sin(c), cos(c), b - b*cos(c) - a*sin(c)] = [ 0, 1, b][ sin(c), cos(c), 0][ 0, 1, -b]
# [ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1]
# """
mapping_rot = np.array([[cc, -ss, aa - aa * cc + bb * ss + sx],
[ss, cc, bb - bb * cc - aa * ss + sy],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
new_ham_window = cv2.warpAffine(ham_window, project @ mapping_rot, (out_size_w, out_size_h),
)
# plt.close('all')
# plt.imshow(new_ham_window)
# plt.show()
# plt.close('all')
return new_ham_window
def get_mask_window(w, h, rot, sx, sy, out_size_w, out_size_h):
"""
A hamming window map
:param w: init ellipse w
:param h: init ellipse h
:param rot: rotation angle(rad)
:param sx: center x coordinate of the window
:param sy: center y-coordinate of the window
:param out_size_w: output window size width
:param out_size_h: output window size height
:return:
"""
alpha = 1
w = math.floor(w * alpha)
h = math.floor(h * alpha)
window = np.ones([h,w]).astype('float32')
sx -= w / 2
sy -= h / 2
aa = w / 2
bb = h / 2 # rot center
cc = math.cos(rot)
ss = math.sin(rot)
# """
# we first do the rotation before the original _crop_hwc
# [ cos(c), -sin(c), a - a*cos(c) + b*sin(c)] [ 1, 0, a][ cos(c), -sin(c), 0][ 1, 0, -a]
# [ sin(c), cos(c), b - b*cos(c) - a*sin(c)] = [ 0, 1, b][ sin(c), cos(c), 0][ 0, 1, -b]
# [ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1]
# """
mapping_rot = np.array([[cc, -ss, aa - aa * cc + bb * ss + sx],
[ss, cc, bb - bb * cc - aa * ss + sy],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
new_ham_window = cv2.warpAffine(window, project @ mapping_rot, (out_size_w, out_size_h),
)
# plt.close('all')
# plt.imshow(new_ham_window)
# plt.show()
# plt.close('all')
return new_ham_window
def homo_add_shift(H, shift):
H[0][2] += shift[0]
H[1][2] += shift[1]
return H
def rot_scale_around_center_shift_tran(cx, cy, rot, scale, sx, sy):
"""
this is designed for the simi estimation, after we got the sim estimation, if we need the transformed points, we need the homography matrix.
this function is used to calculate the homography accroding to the rotation scale around center etc.
:param cx: center x
:param cy: center y
:param rot: rot angle, clockwise
:param scale: scale ratio
:param sx: shift x
:param sy: shift y
:return:
"""
# scale_H = rot_scale_around_center_shift_tran(cfg.TRACK.EXEMPLAR_SIZE // 2, cfg.TRACK.EXEMPLAR_SIZE // 2, 0,
# crop_points_w / cfg.TRACK.EXEMPLAR_SIZE, 0, 0)
mapping_shift = np.array([[1, 0, sx],
[0, 1, sy],
[0, 0, 1]]).astype(np.float)
tran = mapping_shift
if abs(scale)>0 and scale!=1:
a = scale
b = scale
c = cx*(1-scale)
d = cy*(1-scale)
mapping_scale = np.array([[a, 0, c],
[0, b, d],
[0, 0, 1]]).astype(np.float)
tran = mapping_scale @ tran
if abs(rot) > 0:
aa = cx
bb = cy # rot center
cc = math.cos(rot)
ss = math.sin(rot)
"""
we first do the rotation before the original _crop_hwc
[ cos(c), -sin(c), a - a*cos(c) + b*sin(c)] [ 1, 0, a][ cos(c), -sin(c), 0][ 1, 0, -a]
[ sin(c), cos(c), b - b*cos(c) - a*sin(c)] = [ 0, 1, b][ sin(c), cos(c), 0][ 0, 1, -b]
[ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1]
"""
mapping_rot = np.array([[cc, -ss, aa - aa * cc + bb * ss ],
[ss, cc, bb - bb * cc - aa * ss ],
[0, 0, 1]]).astype(np.float)
tran = mapping_rot @ tran
# tran = mapping_rot @ mapping_scale @ mapping_shift
return tran
def shift_tran(sx, sy):
mapping_shift = np.array([[1, 0, sx],
[0, 1, sy],
[0, 0, 1]]).astype(np.float)
return mapping_shift
def img_proj_trans(img, trans, w, h):
rot_img = cv2.warpPerspective(img, trans, (w, h),
borderMode=cv2.BORDER_REPLICATE)
return rot_img
def find_homo_by_imgs_opencv_ransac(im1, im2):
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY).astype('uint8')
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY).astype('uint8')
# if len(im1.shape) == 3:
# im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
# im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# else:
# im1Gray = im1
# im2Gray = im2
# Detect ORB features and compute descriptors.
# plt.imshow(im1Gray)
# plt.show()
# im1Gray = np.expand_dims(im1Gray, 2)
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
# imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
# cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
# height, width, channels = im2.shape
# im1Reg = cv2.warpPerspective(im1, h, (width, height))
return h
def decompose_affine(H):
"""
[ A, B] [ cos(c), -sin(c)][ 1, m][sx 0]
[ C, D] = [ sin(c), cos(c)][ 0, 1][0 sy]
:param H:
:return:
"""
A = H[0][0]
B = H[0][1]
C = H[1][0]
D = H[1][1]
sx = np.sqrt(A**2 + C**2)
theta = np.arctan2(C, A)
msy = B*np.cos(theta) + D*np.sin(theta)
if np.sin(theta) != 0:
sy = (msy * np.cos(theta) - B) / np.sin(theta)
else:
sy = (D - msy * np.sin(theta)) / np.cos(theta)
m = msy / sy
sh_x = H[0][2]
sh_y = H[1][2]
return sx, sy, theta, sh_x, sh_y, m
def compose_affine_homo_RKS(sx, sy, theta, sh_x, sh_y, m =0):
"""
compose affine homography matrix accroding to rotation, K, shift matrix
[ A, B] [ cos(c), -sin(c)][ 1, m][sx 0]
[ C, D] = [ sin(c), cos(c)][ 0, 1][0 sy]
:param H:
:return:
"""
p = np.cos(theta)
q = np.sin(theta)
A = sx * p
B = sy * m * p - sy * q
C = sx * q
D = sy * m * q + sy * p
H = np.array([[A, B, sh_x],
[C, D, sh_y],
[0, 0, 1]]).astype(np.float32)#recover the square to rect
return H
"""
GPU version (with batch)
"""
#combine_affine if center is origin, designed for image, because image is shrink,but groundtruth probably not.
def combine_affine_c0(nm_shift, scale, rot, scale_h, in_sz, out_sz):
# tmp-> search transformation
#first, move the obj to img center, second, scale and rot. (sear_obj --rot--> --shift--> tmp_obj)
"""
crop image with similarity
we first do the rotation before the original _crop_hwc
[ cos(c), -sin(c), a - a*cos(c) + b*sin(c)] [ 1, 0, a][ sc*cos(c), -sc*sin(c), 0][ 1, 0, -a]
[ sin(c), cos(c), b - b*cos(c) - a*sin(c)] = [ 0, 1, b][ sc*sin(c), sc*cos(c), 0][ 0, 1, -b]
[ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1]
"""
cc = torch.cos(rot)
ss = torch.sin(rot)
sc = out_sz / in_sz * scale
a = sc*cc
b = -sc*ss
c = sc*ss
d = sc*cc
if scale_h:
affine_matrix = torch.stack([a, b, nm_shift[:, 0]/out_sz, \
c, d, nm_shift[:, 1]/out_sz]) \
.permute(1, 0).reshape([-1, 2, 3]).float() # [6,batch_size] -> [batch_size, 6] -> [batch_size, 2, 3]
else:
affine_matrix = torch.stack([a, b, nm_shift[:, 0], \
c, d, nm_shift[:, 1]]) \
.permute(1, 0).reshape([-1, 2, 3]).floatl() # [6,batch_size] -> [batch_size, 6] -> [batch_size, 2, 3]
return affine_matrix
#combine_affine if center is origin, designed for image, because image is shrink,but groundtruth probably not.
def combine_affine_c0_v2(cx, cy, nm_shift, scale, rot, scale_h, in_sz, out_sz):
# tmp-> search transformation
#first, move the obj to img center, second, scale and rot. (sear_obj --rot--> --shift--> tmp_obj)
#nm_shift: the shift origin as the reference point
"""
crop image with similarity
[ sc*cos(c), -sc*sin(c), -o(cx)-p(cy)+sx] [ 1, 0, sx][ o, p, 0][ 1, 0, -cx] [ 1, 0, sx][ sc*cos(c), -sc*sin(c), 0][ 1, 0, -cx]
[ sc*sin(c), sc*cos(c), -q(cx)-l(cy)+sy] <= [ 0, 1, sy][ q, l, 0][ 0, 1, -cy] <= [ 0, 1, sy][ sc*sin(c), sc*cos(c), 0][ 0, 1, -cy]
[ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1] [ 0, 0, 1 ][ 0, 0, 1][ 0, 0, 1 ]
"""
cc = torch.cos(rot)
ss = torch.sin(rot)
sc = out_sz / in_sz * scale
a = sc*cc
b = -sc*ss
c = sc*ss
d = sc*cc
cx_s = (cx - out_sz/2) * in_sz / out_sz # i don't know why we need to scale here
cy_s = (cy - out_sz/2) * in_sz / out_sz
sx = nm_shift[:, 0]
sy = nm_shift[:, 1]
# if scale_h:
# affine_matrix = torch.stack([a, b, (-a*(sx)-b*(sy)+cx)/out_sz, \
# c, d, (-c*(sx)-d*(sy)+cy)/out_sz]) \
# .permute(1, 0).reshape([-1, 2, 3]).float() # [6,batch_size] -> [batch_size, 6] -> [batch_size, 2, 3]
# else:
# affine_matrix = torch.stack([a, b, (-a*(sx)-b*(sy)+cx), \
# c, d, (-c*(sx)-d*(sy)+cy)]) \
# .permute(1, 0).reshape([-1, 2, 3]).floatl() # [6,batch_size] -> [batch_size, 6] -> [batch_size, 2, 3]
if scale_h:
affine_matrix = torch.stack([a, b, (-a*(cx_s)-b*(cy_s)+sx)/out_sz, \
c, d, (-c*(cx_s)-d*(cy_s)+sy)/out_sz]) \
.permute(1, 0).reshape([-1, 2, 3]).float() # [6,batch_size] -> [batch_size, 6] -> [batch_size, 2, 3]
else:
affine_matrix = torch.stack([a, b, (-a*(cx_s)-b*(cy_s)+sx), \
c, d, (-c*(cx_s)-d*(cy_s)+sy)]) \
.permute(1, 0).reshape([-1, 2, 3]).float() # [6,batch_size] -> [batch_size, 6] -> [batch_size, 2, 3]
return affine_matrix
#combine_affine if left-top point is origin, designed for image, because image is shrink,but groundtruth probably not.
def combine_affine_lt0(cx, cy, nm_shift, scale, rot, in_sz, o_sz):
"""
cal search -> temp
:param cx: template center (we estimate the image move, not the obj center move)
:param cy:
:param nm_shift: search shift
:param scale:
:param rot:
:param in_sz: 255
:param o_sz: 127
:return:
"""
#first, move the obj center to origin , second, scale and rot. then move back to tmp center,
"""
crop image with similarity
[ sc*cos(c), -sc*sin(c), -o(sx+cx)-p(sy+cy)+cx] [ 1, 0, cx][ o, p, 0][ 1, 0, -sx] [ 1, 0, cx][ sc*cos(c), -sc*sin(c), 0][ 1, 0, -sx ]
[ sc*sin(c), sc*cos(c), -q(sx+cx)-l(sy+cy)+cy] <= [ 0, 1, cy][ q, l, 0][ 0, 1, -sy)] <= [ 0, 1, cy][ sc*sin(c), sc*cos(c), 0][ 0, 1, -sy ]
[ 0, 0, 1] [ 0, 0, 1 ][ 0, 0, 1][ 0, 0, 1 ] [ 0, 0, 1 ][ 0, 0, 1][ 0, 0, 1]
"""
#need to compute first shift then rot finally shift_back
cc = torch.cos(rot)
ss = torch.sin(rot)
sc = scale
o = sc*cc
p = -sc*ss
q = sc*ss
l = sc*cc
# delta_x_tmp = cx - o_sz/2
# delta_y_tmp = cy - o_sz/2
# sx = -delta_x_tmp + nm_shift[:, 0] + in_sz/2
# sy = -delta_y_tmp + nm_shift[:, 1] + in_sz/2
sx = nm_shift[:, 0] + in_sz/2
sy = nm_shift[:, 1] + in_sz/2
affine_matrix = torch.stack([o, p, -o*(sx)-p*(sy)+cx, \
q, l, -q*(sx)-l*(sy)+cy]) \
.permute(1, 0).reshape([-1, 2, 3]).float() # [6,batch_size] -> [batch_size, 6] -> [batch_size, 2, 3]
return affine_matrix
| 19,034 | 35.326336 | 147 | py |
HDN | HDN-master/hdn/utils/basic_trackers.py | import cv2
import matplotlib.pyplot as plt
import math
import numpy as np
import torch
from math import sin, cos, atan2, sqrt, degrees
from hdn.core.config import cfg
sift = cv2.xfeatures2d.SIFT_create()
def find_homo_by_imgs_opencv_ORB_ransac(im1, im2):
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY).astype('uint8')
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY).astype('uint8')
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
# imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
# cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
return h
def find_homo_by_imgs_opencv_SIFT_ransac_ori(im1, im2, idx=0, keypoints1=None, descriptors1=None):
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
MIN_MATCH_COUNT = 10
im1Gray = im1
im2Gray = im2
if keypoints1 == None:
keypoints1, descriptors1 = sift.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = sift.detectAndCompute(im2Gray, None)
FLANN_INDEX_KDTREE = 0 # kd树
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params, search_params)
if type(descriptors2) is np.ndarray and type(descriptors1) is np.ndarray:
if descriptors2.shape[0] >=10:
matches = flann.knnMatch(descriptors1, descriptors2, k=2)
else:
M = np.identity(3).astype('float')
return M
else:
M = np.identity(3).astype('float')
# print('1',M)
return M
good = []
for m, n in matches:
if m.distance < 0.4 * n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:#original
src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 4.0)
if type(M) is not np.ndarray:
M = np.identity(3).astype('float')
M_i = np.identity(3).astype('float')
else:
M = np.identity(3).astype('float')
return M
def find_homo_by_imgs_opencv_SIFT_ransac(im1, im2, idx=0):
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
MIN_MATCH_COUNT = 10
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY).astype('uint8')
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY).astype('uint8')
keypoints1, descriptors1 = sift.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = sift.detectAndCompute(im2Gray, None)
FLANN_INDEX_KDTREE = 0 # kd树
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params, search_params)
if type(descriptors2) is np.ndarray and type(descriptors1) is np.ndarray:
if descriptors2.shape[0] >=10:
matches = flann.knnMatch(descriptors1, descriptors2, k=2)
else:
M = np.identity(3).astype('float')
return M
else:
M = np.identity(3).astype('float')
return M
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:#original
src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
if type(M) is not np.ndarray:
M = np.identity(3).astype('float')
M_i = np.identity(3).astype('float')
else:
M = np.identity(3).astype('float')
return M
| 4,807 | 32.158621 | 98 | py |
HDN | HDN-master/hdn/utils/distributed.py | """
distriebuted training method
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import socket
import logging
import torch
import torch.nn as nn
import torch.distributed as dist
from hdn.utils.log_helper import log_once
from memory_profiler import profile
logger = logging.getLogger('global')
def average_reduce(v):
if get_world_size() == 1:
return v
tensor = torch.cuda.FloatTensor(1)
tensor[0] = v
dist.all_reduce(tensor)
v = tensor[0] / get_world_size()
return v
class DistModule(nn.Module):
def __init__(self, module, bn_method=0):
super(DistModule, self).__init__()
self.module = module
self.bn_method = bn_method
if get_world_size() > 1:
broadcast_params(self.module)
else:
self.bn_method = 0 # single proccess
def forward(self, *args, **kwargs):
broadcast_buffers(self.module, self.bn_method)
return self.module(*args, **kwargs)
def train(self, mode=True):
super(DistModule, self).train(mode)
self.module.train(mode)
return self
def broadcast_params(model):
""" broadcast model parameters """
for p in model.state_dict().values():
dist.broadcast(p, 0)
def broadcast_buffers(model, method=0):
""" broadcast model buffers """
if method == 0:
return
world_size = get_world_size()
for b in model._all_buffers():
if method == 1: # broadcast from main proccess
dist.broadcast(b, 0)
elif method == 2: # average
dist.all_reduce(b)
b /= world_size
else:
raise Exception('Invalid buffer broadcast code {}'.format(method))
inited = False
def _dist_init():
'''
if guess right:
ntasks: world_size (process num)
proc_id: rank
'''
rank = int(os.environ['RANK'])
# rank = 0
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend='nccl')
world_size = dist.get_world_size()
return rank, world_size
def _get_local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def dist_init():
global rank, world_size, inited
try:
rank, world_size = _dist_init()
except RuntimeError as e:
if 'public' in e.args[0]:
logger.info(e)
logger.info('Warning: use single process')
rank, world_size = 0, 1
else:
raise RuntimeError(*e.args)
inited = True
return rank, world_size
def get_rank():
if not inited:
raise(Exception('dist not inited'))
return rank
def get_world_size():
if not inited:
raise(Exception('dist not inited'))
return world_size
def reduce_gradients(model, _type='sum'):
types = ['sum', 'avg']
assert _type in types, 'gradients method must be in "{}"'.format(types)
log_once("gradients method is {}".format(_type))
if get_world_size() > 1:
for param in model.parameters():
if param.requires_grad:
if param.grad is None:
pass
# print(param.size())
else:
dist.all_reduce(param.grad.data)
if _type == 'avg':
param.grad.data /= get_world_size()
else:
return None | 3,606 | 23.705479 | 78 | py |
HDN | HDN-master/hdn/utils/general.py | # coding: utf-8
import argparse
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import imageio
import os
import numpy as np
import matplotlib.pyplot as plt
def geometricDistance(correspondence, h):
"""
Correspondence err
:param correspondence: Coordinate
:param h: Homography
:return: L2 distance
"""
p1 = np.transpose(np.matrix([correspondence[0][0], correspondence[0][1], 1]))
estimatep2 = np.dot(h, p1)
estimatep2 = (1/estimatep2.item(2))*estimatep2
p2 = np.transpose(np.matrix([correspondence[1][0], correspondence[1][1], 1]))
error = p2 - estimatep2
return np.linalg.norm(error)
def create_gif(image_list, gif_name, duration=0.35):
"""
create the gif
:param image_list:
:param gif_name:
:param duration:
:return:
"""
frames = []
for image_name in image_list:
frames.append(image_name)
imageio.mimsave(gif_name, frames, 'GIF', duration=0.5)
return | 983 | 23.6 | 81 | py |
HDN | HDN-master/toolkit/datasets/DeepHomo.py | from torch.utils.data import Dataset
import numpy as np
import cv2, torch
import os
def make_mesh(patch_w, patch_h):
x_flat = np.arange(0, patch_w)
x_flat = x_flat[np.newaxis, :]
y_one = np.ones(patch_h)
y_one = y_one[:, np.newaxis]
x_mesh = np.matmul(y_one, x_flat)
y_flat = np.arange(0, patch_h)
y_flat = y_flat[:, np.newaxis]
x_one = np.ones(patch_w)
x_one = x_one[np.newaxis, :]
y_mesh = np.matmul(y_flat, x_one)
return x_mesh, y_mesh
class DeepHomoTrainDataset(Dataset):
def __init__(self, name, dataset_root, exp_path, patch_w=560, patch_h=315, rho=16):
self.imgs = open(dataset_root, 'r').readlines()
self.mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
self.std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
self.patch_h = patch_h
self.patch_w = patch_w
self.WIDTH = 640
self.HEIGHT = 360
self.rho = rho
self.x_mesh, self.y_mesh = make_mesh(self.patch_w, self.patch_h)
self.train_path = os.path.join(exp_path, 'Data/Train/')
def __getitem__(self, index):
value = self.imgs[index]
img_names = value.split(' ')
img_1 = cv2.imread(self.train_path + img_names[0])
height, width = img_1.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_1 = cv2.resize(img_1, (self.WIDTH, self.HEIGHT))
img_1 = (img_1 - self.mean_I) / self.std_I
img_1 = np.mean(img_1, axis=2, keepdims=True)
img_1 = np.transpose(img_1, [2, 0, 1])
img_2 = cv2.imread(self.train_path + img_names[1][:-1])
height, width = img_2.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_2 = cv2.resize(img_2, (self.WIDTH, self.HEIGHT))
img_2 = (img_2 - self.mean_I) / self.std_I
img_2 = np.mean(img_2, axis=2, keepdims=True)
img_2 = np.transpose(img_2, [2, 0, 1])
org_img = np.concatenate([img_1, img_2], axis=0)
x = np.random.randint(self.rho, self.WIDTH - self.rho - self.patch_w)
y = np.random.randint(self.rho, self.HEIGHT - self.rho - self.patch_h)
input_tesnor = org_img[:, y: y + self.patch_h, x: x + self.patch_w]
y_t_flat = np.reshape(self.y_mesh, (-1))
x_t_flat = np.reshape(self.x_mesh, (-1))
patch_indices = (y_t_flat + y) * self.WIDTH + (x_t_flat + x)
top_left_point = (x, y)
bottom_left_point = (x, y + self.patch_h)
bottom_right_point = (self.patch_w + x, self.patch_h + y)
top_right_point = (x + self.patch_w, y)
h4p = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]
h4p = np.reshape(h4p, (-1))
org_img = torch.tensor(org_img)
input_tesnor = torch.tensor(input_tesnor)
patch_indices = torch.tensor(patch_indices)
h4p = torch.tensor(h4p)
return (org_img, input_tesnor, patch_indices, h4p) # original_image, croped_image, grid, 4pointsofthe image.
def __len__(self):
return len(self.imgs)
class DeepHomoTestDataset(Dataset):
# name, dataset_root, load_img = False
def __init__(self, name, dataset_root, patch_w=560, patch_h=315, rho=16, WIDTH=640, HEIGHT=360):
self.mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
self.std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
self.patch_h = patch_h
self.patch_w = patch_w
self.WIDTH = WIDTH
self.HEIGHT = HEIGHT
self.rho = rho
self.x_mesh, self.y_mesh = make_mesh(self.patch_w, self.patch_h)
self.work_dir = os.path.join(dataset_root, 'Data')
self.pair_list = list(open(os.path.join(self.work_dir, 'Test_List.txt')))
print(len(self.pair_list))
self.img_path = os.path.join(self.work_dir, 'Test/')
self.npy_path = os.path.join(self.work_dir, 'Coordinate/')
print('img_path,', self.img_path)
print('npy_path', self.npy_path)
def __getitem__(self, index):
img_pair = self.pair_list[index]
print('img_pair', img_pair)
pari_id = img_pair.split(' ')
print('pari_id', pari_id)
npy_id = pari_id[0].split('/')[1] + '_' + pari_id[1].split('/')[1][:-1] + '.npy'
npy_id = self.npy_path + npy_id
video_name = img_pair.split('/')[0]
# load img1
if pari_id[0][-1] == 'M':
img_1 = cv2.imread(self.img_path + pari_id[0][:-2])
else:
img_1 = cv2.imread(self.img_path + pari_id[0])
# load img2
if pari_id[1][-2] == 'M':
img_2 = cv2.imread(self.img_path + pari_id[1][:-3])
else:
img_2 = cv2.imread(self.img_path + pari_id[1][:-1])
height, width = img_1.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_1 = cv2.resize(img_1, (self.WIDTH, self.HEIGHT))
print_img_1 = img_1.copy()
print_img_1 = np.transpose(print_img_1, [2, 0, 1])
img_1 = (img_1 - self.mean_I) / self.std_I
img_1 = np.mean(img_1, axis=2, keepdims=True)
img_1 = np.transpose(img_1, [2, 0, 1])
height, width = img_2.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_2 = cv2.resize(img_2, (self.WIDTH, self.HEIGHT))
print_img_2 = img_2.copy()
print_img_2 = np.transpose(print_img_2, [2, 0, 1])
img_2 = (img_2 - self.mean_I) / self.std_I
img_2 = np.mean(img_2, axis=2, keepdims=True)
img_2 = np.transpose(img_2, [2, 0, 1])
org_img = np.concatenate([img_1, img_2], axis=0)
WIDTH = org_img.shape[2]
HEIGHT = org_img.shape[1]
x = np.random.randint(self.rho, WIDTH - self.rho - self.patch_w)
x = 40 # patch should in the middle of full img when testing
y = np.random.randint(self.rho, HEIGHT - self.rho - self.patch_h)
y = 23 # patch should in the middle of full img when testing
input_tesnor = org_img[:, y: y + self.patch_h, x: x + self.patch_w]
y_t_flat = np.reshape(self.y_mesh, [-1])
x_t_flat = np.reshape(self.x_mesh, [-1])
patch_indices = (y_t_flat + y) * WIDTH + (x_t_flat + x)
top_left_point = (x, y)
bottom_left_point = (x, y + self.patch_h)
bottom_right_point = (self.patch_w + x, self.patch_h + y)
top_right_point = (x + self.patch_w, y)
four_points = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]
four_points = np.reshape(four_points, (-1))
return (org_img, input_tesnor, patch_indices, four_points, print_img_1, print_img_2, video_name, npy_id)
def __len__(self):
return len(self.pair_list)
| 6,774 | 36.021858 | 118 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/resnet.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch, imageio
from homo_estimator.Deep_homography.Oneline_DLTv1.utils import transform, DLT_solve
import matplotlib.pyplot as plt
criterion_l2 = nn.MSELoss(reduce=True, size_average=True)
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=1, reduce=False, size_average=False)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def create_gif(image_list, gif_name, duration=0.35):
frames = []
for image_name in image_list:
frames.append(image_name)
imageio.mimsave(gif_name, frames, 'GIF', duration=0.5)
return
def getPatchFromFullimg(patch_size_h, patch_size_w, patchIndices, batch_indices_tensor, img_full):
num_batch, num_channels, height, width = img_full.size()
warped_images_flat = img_full.reshape(-1)
patch_indices_flat = patchIndices.reshape(-1)
pixel_indices = patch_indices_flat.long() + batch_indices_tensor
mask_patch = torch.gather(warped_images_flat, 0, pixel_indices)
mask_patch = mask_patch.reshape([num_batch, 1, patch_size_h, patch_size_w])
return mask_patch
def normMask(mask, strenth=0.5):
"""
:return: to attention more region
"""
batch_size, c_m, c_h, c_w = mask.size()
max_value = mask.reshape(batch_size, -1).max(1)[0]
max_value = max_value.reshape(batch_size, 1, 1, 1)
mask = mask / (max_value * strenth)
mask = torch.clamp(mask, 0, 1)
return mask
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# define and forward ( Because of the load is unbalanced when use torch.nn.DataParallel, we define warp in forward)
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.ShareFeature = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(4),
nn.ReLU(inplace=True),
nn.Conv2d(4, 8, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.Conv2d(8, 1, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
)
self.genMask = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(4),
nn.ReLU(inplace=True),
nn.Conv2d(4, 8, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.Conv2d(8, 16, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 1, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(1),
nn.Sigmoid(),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, org_imges, input_tesnors, h4p, patch_indices):
batch_size, _, img_h, img_w = org_imges.size()
_, _, patch_size_h, patch_size_w = input_tesnors.size()
y_t = torch.arange(0, batch_size * img_w * img_h,
img_w * img_h)
batch_indices_tensor = y_t.unsqueeze(1).expand(y_t.shape[0], patch_size_h * patch_size_w).reshape(-1)
# M_tensor = torch.tensor([[img_w / 2.0, 0., img_w / 2.0],
# [0., img_h / 2.0, img_h / 2.0],
# [0., 0., 1.]])
M_tensor = torch.tensor([[63.5000, 0.0000, 63.5000],
[ 0.0000, 63.5000, 63.5000],
[ 0.0000, 0.0000, 1.0000]])# assume 127*127 image.
if torch.cuda.is_available():
M_tensor = M_tensor.cuda()
batch_indices_tensor = batch_indices_tensor.cuda()
M_tile = M_tensor.unsqueeze(0).expand(batch_size, M_tensor.shape[-2], M_tensor.shape[-1])
# Inverse of M
if img_h == 0 or img_h == None:
print('M_tensor have problem!')
print('M_tensor',M_tensor)
M_tensor_inv = torch.tensor([[0.0157, 0.0000, -1.0000],
[0.0000, 0.0157, -1.0000],
[0.0000, 0.0000, 1.0000]])
M_tile_inv = M_tensor_inv.unsqueeze(0).expand(batch_size, M_tensor_inv.shape[-2],
M_tensor_inv.shape[-1])
# try to remove the mask cause we trak the foreground
mask_I1_full = self.genMask(org_imges[:, :1, ...]) # mask
mask_I2_full = self.genMask(org_imges[:, 1:, ...]) # mask
mask_I1 = getPatchFromFullimg(patch_size_h, patch_size_w, patch_indices, batch_indices_tensor,
mask_I1_full) # mask
mask_I2 = getPatchFromFullimg(patch_size_h, patch_size_w, patch_indices, batch_indices_tensor,
mask_I2_full) # mask
mask_I1 = normMask(mask_I1) # mask
mask_I2 = normMask(mask_I2) # mask
patch_1 = self.ShareFeature(input_tesnors[:, :1, ...])
patch_2 = self.ShareFeature(input_tesnors[:, 1:, ...])
patch_1_res = torch.mul(patch_1, mask_I1) # mask
patch_2_res = torch.mul(patch_2, mask_I2) # mask
x = torch.cat((patch_1_res, patch_2_res), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
H_mat = DLT_solve(h4p, x).squeeze(1) #
#'DLT_solve'
pred_I2 = transform(patch_size_h, patch_size_w, M_tile_inv, H_mat, M_tile,
org_imges[:, :1, ...], patch_indices, batch_indices_tensor)
pred_Mask = transform(patch_size_h, patch_size_w, M_tile_inv, H_mat, M_tile,
mask_I1_full, patch_indices, batch_indices_tensor)
pred_Mask = normMask(pred_Mask)
mask_ap = torch.mul(mask_I2, pred_Mask)
sum_value = torch.sum(mask_ap)
pred_I2_CnnFeature = self.ShareFeature(pred_I2)
feature_loss_mat = triplet_loss(patch_2, pred_I2_CnnFeature, patch_1)
batch_size = feature_loss_mat.shape[0]
feature_loss = torch.sum(torch.mul(feature_loss_mat, mask_ap)) / sum_value / batch_size
feature_loss = torch.unsqueeze(feature_loss, 0)
pred_I2_d = pred_I2[:1, ...]
patch_2_res_d = patch_2_res[:1, ...]
pred_I2_CnnFeature_d = pred_I2_CnnFeature[:1, ...]
mask_ap_d = mask_ap[:1, ...]
feature_loss_mat_d = feature_loss_mat[:1, ...]
out_dict = {}
out_dict.update(feature_loss=feature_loss, pred_I2_d=pred_I2_d, x=x, H_mat=H_mat, patch_2_res_d=patch_2_res_d,
pred_I2_CnnFeature_d=pred_I2_CnnFeature_d, mask_ap_d=mask_ap_d.squeeze(1),
feature_loss_mat_d=feature_loss_mat_d)
return out_dict
def inference(self, org_imges, input_tesnors, h4p, patch_indices):
batch_size, _, img_h, img_w = org_imges.size()
_, _, patch_size_h, patch_size_w = input_tesnors.size()
y_t = torch.arange(0, batch_size * img_w * img_h,
img_w * img_h)
batch_indices_tensor = y_t.unsqueeze(1).expand(y_t.shape[0], patch_size_h * patch_size_w).reshape(-1)
M_tensor = torch.tensor([[img_w / 2.0, 0., img_w / 2.0],
[0., img_h / 2.0, img_h / 2.0],
[0., 0., 1.]])
if torch.cuda.is_available():
M_tensor = M_tensor.cuda()
batch_indices_tensor = batch_indices_tensor.cuda()
M_tile = M_tensor.unsqueeze(0).expand(batch_size, M_tensor.shape[-2], M_tensor.shape[-1])
# Inverse of M
M_tensor_inv = torch.inverse(M_tensor)
M_tile_inv = M_tensor_inv.unsqueeze(0).expand(batch_size, M_tensor_inv.shape[-2],
M_tensor_inv.shape[-1])
mask_I1_full = self.genMask(org_imges[:, :1, ...])
mask_I2_full = self.genMask(org_imges[:, 1:, ...])
mask_I1 = getPatchFromFullimg(patch_size_h, patch_size_w, patch_indices, batch_indices_tensor, mask_I1_full)
mask_I2 = getPatchFromFullimg(patch_size_h, patch_size_w, patch_indices, batch_indices_tensor, mask_I2_full)
mask_I1 = normMask(mask_I1)
mask_I2 = normMask(mask_I2)
patch_1 = self.ShareFeature(input_tesnors[:, :1, ...])
patch_2 = self.ShareFeature(input_tesnors[:, 1:, ...])
patch_1_res = torch.mul(patch_1, mask_I1)
patch_2_res = torch.mul(patch_2, mask_I2)
x = torch.cat((patch_1_res, patch_2_res), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
H_mat = DLT_solve(h4p, x).squeeze(1)
# 'DLT_solve'
pred_I2 = transform(patch_size_h, patch_size_w, M_tile_inv, H_mat, M_tile,
org_imges[:, :1, ...], patch_indices, batch_indices_tensor)
pred_Mask = transform(patch_size_h, patch_size_w, M_tile_inv, H_mat, M_tile,
mask_I1_full, patch_indices, batch_indices_tensor)
pred_Mask = normMask(pred_Mask)
mask_ap = torch.mul(mask_I2, pred_Mask)
# step1 freeze the mask_ap use "mask_ap = torch.ones_like(mask_ap)" ,thus gradient do not update and mask=1
# step2 delete this line ("mask_ap = torch.ones_like(mask_ap)") to update gradient of genMask
# ######
# mask_ap = torch.ones_like(mask_ap)
# ######
sum_value = torch.sum(mask_ap)
pred_I2_CnnFeature = self.ShareFeature(pred_I2)
feature_loss_mat = triplet_loss(patch_2, pred_I2_CnnFeature, patch_1)
feature_loss = torch.sum(torch.mul(feature_loss_mat, mask_ap)) / sum_value
feature_loss = torch.unsqueeze(feature_loss, 0)
pred_I2_d = pred_I2[:1, ...]
patch_2_res_d = patch_2_res[:1, ...]
pred_I2_CnnFeature_d = pred_I2_CnnFeature[:1, ...]
mask_ap_d = mask_ap[:1, ...]
feature_loss_mat_d = feature_loss_mat[:1, ...]
out_dict = {}
out_dict.update(feature_loss=feature_loss, pred_I2_d=pred_I2_d, x=x, H_mat=H_mat,
patch_2_res_d=patch_2_res_d,
pred_I2_CnnFeature_d=pred_I2_CnnFeature_d, mask_ap_d=mask_ap_d.squeeze(1),
feature_loss_mat_d=feature_loss_mat_d)
return out_dict
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 16,688 | 36.672686 | 118 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/utils.py | import torch
import numpy as np
import cv2
import subprocess
import psutil
def DLT_solve(src_p, off_set):
# src_p: shape=(bs, n, 4, 2)
# off_set: shape=(bs, n, 4, 2)
# can be used to compute mesh points (multi-H)
bs, _ = src_p.shape
divide = int(np.sqrt(len(src_p[0])/2)-1)
row_num = (divide+1)*2
for i in range(divide):
for j in range(divide):
h4p = src_p[:,[2*j+row_num*i, 2*j+row_num*i+1,
2*(j+1)+row_num*i, 2*(j+1)+row_num*i+1,
2*(j+1)+row_num*i+row_num, 2*(j+1)+row_num*i+row_num+1,
2*j+row_num*i+row_num, 2*j+row_num*i+row_num+1]].reshape(bs, 1, 4, 2)
pred_h4p = off_set[:,[2*j+row_num*i, 2*j+row_num*i+1,
2*(j+1)+row_num*i, 2*(j+1)+row_num*i+1,
2*(j+1)+row_num*i+row_num, 2*(j+1)+row_num*i+row_num+1,
2*j+row_num*i+row_num, 2*j+row_num*i+row_num+1]].reshape(bs, 1, 4, 2)
if i+j==0:
src_ps = h4p
off_sets = pred_h4p
else:
src_ps = torch.cat((src_ps, h4p), axis = 1)
off_sets = torch.cat((off_sets, pred_h4p), axis = 1)
bs, n, h, w = src_ps.shape
N = bs*n
src_ps = src_ps.reshape(N, h, w)
off_sets = off_sets.reshape(N, h, w)
dst_p = src_ps + off_sets
ones = torch.ones(N, 4, 1)
if torch.cuda.is_available():
ones = ones.cuda()
xy1 = torch.cat((src_ps, ones), 2)
zeros = torch.zeros_like(xy1)
if torch.cuda.is_available():
zeros = zeros.cuda()
xyu, xyd = torch.cat((xy1, zeros), 2), torch.cat((zeros, xy1), 2)
M1 = torch.cat((xyu, xyd), 2).reshape(N, -1, 6)
M2 = torch.matmul(
dst_p.reshape(-1, 2, 1),
src_ps.reshape(-1, 1, 2),
).reshape(N, -1, 2)
A = torch.cat((M1, -M2), 2)
b = dst_p.reshape(N, -1, 1)
Ainv = torch.inverse(A)
h8 = torch.matmul(Ainv, b).reshape(N, 8)
H = torch.cat((h8, ones[:,0,:]), 1).reshape(N, 3, 3)
H = H.reshape(bs, n, 3, 3)
return H
def transformer(U, theta, out_size, **kwargs):
"""Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Based on [2]_ and edited by David Dao for Tensorflow.
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
theta: float
The output of the
localisation network should be [num_batch, 6].
out_size: tuple of two ints
The size of the output of the network (height, width)
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
Notes
-----
To initialize the network to the identity transform init
``theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
"""
def _repeat(x, n_repeats):
rep = torch.ones([n_repeats, ]).unsqueeze(0)
rep = rep.int()
x = x.int()
x = torch.matmul(x.reshape([-1,1]), rep)
return x.reshape([-1])
def _interpolate(im, x, y, out_size, scale_h):
num_batch, num_channels , height, width = im.size()
height_f = height
width_f = width
out_height, out_width = out_size[0], out_size[1]
zero = 0
max_y = height - 1
max_x = width - 1
if scale_h:
x = (x + 1.0)*(width_f) / 2.0
y = (y + 1.0) * (height_f) / 2.0
# do sampling
x0 = torch.floor(x).int()
x1 = x0 + 1
y0 = torch.floor(y).int()
y1 = y0 + 1
x0 = torch.clamp(x0, zero, max_x)
x1 = torch.clamp(x1, zero, max_x)
y0 = torch.clamp(y0, zero, max_y)
y1 = torch.clamp(y1, zero, max_y)
dim2 = torch.from_numpy( np.array(width) )
dim1 = torch.from_numpy( np.array(width * height) )
base = _repeat(torch.arange(0,num_batch) * dim1, out_height * out_width)
if torch.cuda.is_available():
dim2 = dim2.cuda()
dim1 = dim1.cuda()
y0 = y0.cuda()
y1 = y1.cuda()
x0 = x0.cuda()
x1 = x1.cuda()
base = base.cuda()
base_y0 = base + y0 * dim2
base_y1 = base + y1 * dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# channels dim
im = im.permute(0,2,3,1)
im_flat = im.reshape([-1, num_channels]).float()
idx_a = idx_a.unsqueeze(-1).long()
idx_a = idx_a.expand(height * width * num_batch,num_channels)
Ia = torch.gather(im_flat, 0, idx_a)
idx_b = idx_b.unsqueeze(-1).long()
idx_b = idx_b.expand(height * width * num_batch, num_channels)
Ib = torch.gather(im_flat, 0, idx_b)
idx_c = idx_c.unsqueeze(-1).long()
idx_c = idx_c.expand(height * width * num_batch, num_channels)
Ic = torch.gather(im_flat, 0, idx_c)
idx_d = idx_d.unsqueeze(-1).long()
idx_d = idx_d.expand(height * width * num_batch, num_channels)
Id = torch.gather(im_flat, 0, idx_d)
x0_f = x0.float()
x1_f = x1.float()
y0_f = y0.float()
y1_f = y1.float()
wa = torch.unsqueeze(((x1_f - x) * (y1_f - y)), 1)
wb = torch.unsqueeze(((x1_f - x) * (y - y0_f)), 1)
wc = torch.unsqueeze(((x - x0_f) * (y1_f - y)), 1)
wd = torch.unsqueeze(((x - x0_f) * (y - y0_f)), 1)
output = wa*Ia+wb*Ib+wc*Ic+wd*Id
return output
def _meshgrid(height, width, scale_h):
if scale_h:
x_t = torch.matmul(torch.ones([height, 1]),
torch.transpose(torch.unsqueeze(torch.linspace(-1.0, 1.0, width), 1), 1, 0))
y_t = torch.matmul(torch.unsqueeze(torch.linspace(-1.0, 1.0, height), 1),
torch.ones([1, width]))#grid:[-1, 1]
else:
x_t = torch.matmul(torch.ones([height, 1]),
torch.transpose(torch.unsqueeze(torch.linspace(0.0, width.float(), width), 1), 1, 0))
y_t = torch.matmul(torch.unsqueeze(torch.linspace(0.0, height.float(), height), 1),
torch.ones([1, width]))#grid:[0, width(or height)]
x_t_flat = x_t.reshape((1, -1)).float()
y_t_flat = y_t.reshape((1, -1)).float()
ones = torch.ones_like(x_t_flat)
grid = torch.cat([x_t_flat, y_t_flat, ones], 0)
if torch.cuda.is_available():
grid = grid.cuda()
return grid
def _transform(theta, input_dim, out_size, scale_h):
num_batch, num_channels , height, width = input_dim.size()
# Changed
theta = theta.reshape([-1, 3, 3]).float()
out_height, out_width = out_size[0], out_size[1]
grid = _meshgrid(out_height, out_width, scale_h)
grid = grid.unsqueeze(0).reshape([1,-1])
shape = grid.size()
grid = grid.expand(num_batch,shape[1])
grid = grid.reshape([num_batch, 3, -1])
T_g = torch.matmul(theta, grid)
x_s = T_g[:,0,:]
y_s = T_g[:,1,:]
t_s = T_g[:,2,:]
t_s_flat = t_s.reshape([-1])
# smaller
small = 1e-7
smallers = 1e-6*(1.0 - torch.ge(torch.abs(t_s_flat), small).float())
t_s_flat = t_s_flat + smallers
condition = torch.sum(torch.gt(torch.abs(t_s_flat), small).float())
# Ty changed
x_s_flat = x_s.reshape([-1]) / t_s_flat
y_s_flat = y_s.reshape([-1]) / t_s_flat
input_transformed = _interpolate( input_dim, x_s_flat, y_s_flat,out_size,scale_h)
output = input_transformed.reshape([num_batch, out_height, out_width, num_channels ])
return output, condition
img_w = U.size()[2]
img_h = U.size()[1]
scale_h = True
output, condition = _transform(theta, U, out_size, scale_h)
return output, condition
def transform(patch_size_h,patch_size_w,M_tile_inv,H_mat,M_tile,I1,patch_indices,batch_indices_tensor):
# Transform H_mat since we scale image indices in transformer
batch_size, num_channels, img_h, img_w = I1.size()
if torch.cuda.is_available():
M_tile_inv = M_tile_inv.cuda()
H_mat = torch.matmul(torch.matmul(M_tile_inv, H_mat), M_tile)
# Transform image 1 (large image) to image 2
out_size = (img_h, img_w)
warped_images, _ = transformer(I1, H_mat, out_size)
warped_images_flat = warped_images.reshape([-1,num_channels])
patch_indices_flat = patch_indices.reshape([-1])
pixel_indices = patch_indices_flat.long() + batch_indices_tensor
pixel_indices = pixel_indices.unsqueeze(-1).long()
pixel_indices = pixel_indices.expand(patch_size_h*patch_size_w*batch_size, num_channels)
pred_I2_flat = torch.gather(warped_images_flat, 0, pixel_indices)
pred_I2 = pred_I2_flat.reshape([batch_size, patch_size_h, patch_size_w, num_channels])
return pred_I2.permute(0,3,1,2)
def display_using_tensorboard(I, I2_ori_img, I2, pred_I2, I2_dataMat_CnnFeature, pred_I2_dataMat_CnnFeature, triMask, loss_map, writer):
I1_ori_img = cv2.normalize(I.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
I2_ori_img_ = cv2.normalize(I2_ori_img.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
input_I2 = cv2.normalize(I2.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
pred_I2 = cv2.normalize(pred_I2.cpu().detach().numpy()[0, 0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
I2_channel_1 = cv2.normalize(I2_dataMat_CnnFeature.cpu().detach().numpy()[0, 0, ...], None, 0, 255,
cv2.NORM_MINMAX, cv2.CV_8U)
pred_I2_channel_1 = cv2.normalize(pred_I2_dataMat_CnnFeature.cpu().detach().numpy()[0, 0, ...], None, 0,
255, cv2.NORM_MINMAX, cv2.CV_8U)
mask_1 = cv2.normalize(triMask.cpu().detach().numpy()[0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
loss_fig = cv2.normalize(loss_map.cpu().detach().numpy()[0, ...], None, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8U)
writer.add_image('I1 and I2',
I1_ori_img,
global_step=1,
dataformats='HW')
writer.add_image('I1 and I2',
I2_ori_img_,
global_step=2,
dataformats='HW')
writer.add_image('I2 and pred_I2',
input_I2,
global_step=1,
dataformats='HW')
writer.add_image('I2 and pred_I2',
pred_I2,
global_step=2,
dataformats='HW')
writer.add_image('I2 and pred I2 feature_1',
I2_channel_1,
global_step=1,
dataformats='HW')
writer.add_image('I2 and pred I2 feature_1',
pred_I2_channel_1,
global_step=2,
dataformats='HW')
writer.add_image('loss_map and mask',
loss_fig,
global_step=1,
dataformats='HW')
writer.add_image('loss_map and mask',
mask_1,
global_step=2,
dataformats='HW')
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def get_memory_map():
mem = psutil.virtual_memory()
# total mem
total = float(mem.total) / 1024 / 1024 / 1024
# used mem
used = float(mem.used) / 1024 / 1024 / 1024
# free mem
free = float(mem.free) / 1024 / 1024 / 1024
#page cache
page_cache = float(mem.cached) / 1024 / 1024 / 1024
#buffer cache
buffer_cache = float(mem.buffers) / 1024 / 1024 / 1024
return {'total': total,
'used':used,
'free':free,
'page_cache':page_cache,
'buffer_cache':buffer_cache}
| 12,962 | 33.293651 | 136 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/dataset.py | from torch.utils.data import Dataset
import numpy as np
import cv2, torch
import os
"""
Train_dataset+test_dataset. [Deep_Homography](https://github.com/JirongZhang/DeepHomography)provided dataset,
for training two homography estimation for two images we do not use this
"""
def make_mesh(patch_w,patch_h):
x_flat = np.arange(0,patch_w)
x_flat = x_flat[np.newaxis,:]
y_one = np.ones(patch_h)
y_one = y_one[:,np.newaxis]
x_mesh = np.matmul(y_one , x_flat)
y_flat = np.arange(0,patch_h)
y_flat = y_flat[:,np.newaxis]
x_one = np.ones(patch_w)
x_one = x_one[np.newaxis,:]
y_mesh = np.matmul(y_flat, x_one)
return x_mesh,y_mesh
class TrainDataset(Dataset):
def __init__(self, data_path, exp_path, patch_w=560, patch_h=315, rho=16):
self.imgs = open(data_path, 'r').readlines()
self.mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
self.std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
self.patch_h = patch_h
self.patch_w = patch_w
self.WIDTH = 640
self.HEIGHT = 360
self.rho = rho
self.x_mesh, self.y_mesh = make_mesh(self.patch_w, self.patch_h)
self.train_path = os.path.join(exp_path, 'Data/Train/')
def __getitem__(self, index):
value = self.imgs[index]
img_names = value.split(' ')
img_1 = cv2.imread(self.train_path + img_names[0])
height, width = img_1.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_1 = cv2.resize(img_1, (self.WIDTH, self.HEIGHT))
img_1 = (img_1 - self.mean_I) / self.std_I
img_1 = np.mean(img_1, axis=2, keepdims=True)
img_1 = np.transpose(img_1, [2, 0, 1])
img_2 = cv2.imread(self.train_path + img_names[1][:-1])
height, width = img_2.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_2 = cv2.resize(img_2, (self.WIDTH, self.HEIGHT))
img_2 = (img_2 - self.mean_I) / self.std_I
img_2 = np.mean(img_2, axis=2, keepdims=True)
img_2 = np.transpose(img_2, [2, 0, 1])
org_img = np.concatenate([img_1, img_2], axis=0)
x = np.random.randint(self.rho, self.WIDTH - self.rho - self.patch_w)
y = np.random.randint(self.rho, self.HEIGHT - self.rho - self.patch_h)
input_tesnor = org_img[:, y: y + self.patch_h, x: x + self.patch_w]
y_t_flat = np.reshape(self.y_mesh, (-1))
x_t_flat = np.reshape(self.x_mesh, (-1))
patch_indices = (y_t_flat + y) * self.WIDTH + (x_t_flat + x)
top_left_point = (x, y)
bottom_left_point = (x, y + self.patch_h)
bottom_right_point = (self.patch_w + x, self.patch_h + y)
top_right_point = (x + self.patch_w, y)
h4p = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]
h4p = np.reshape(h4p, (-1))
org_img = torch.tensor(org_img)
input_tesnor = torch.tensor(input_tesnor)
patch_indices = torch.tensor(patch_indices)
h4p = torch.tensor(h4p)
return (org_img, input_tesnor, patch_indices, h4p)#original_image, croped_image, grid, 4pointsofthe image.
def __len__(self):
return len(self.imgs)
class TestDataset(Dataset):
def __init__(self, data_path, patch_w=560, patch_h=315, rho=16, WIDTH=640, HEIGHT=360):
self.mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
self.std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
self.patch_h = patch_h
self.patch_w = patch_w
self.WIDTH = WIDTH
self.HEIGHT = HEIGHT
self.rho = rho
self.x_mesh, self.y_mesh = make_mesh(self.patch_w,self.patch_h)
self.work_dir = os.path.join(data_path, 'Data')
self.pair_list = list(open(os.path.join(self.work_dir, 'Test_List.txt')))
self.img_path = os.path.join(self.work_dir, 'Test/')
self.npy_path = os.path.join(self.work_dir, 'Coordinate/')
def __getitem__(self, index):
img_pair = self.pair_list[index]
pari_id = img_pair.split(' ')
npy_id = pari_id[0].split('/')[1] + '_' + pari_id[1].split('/')[1][:-1] + '.npy'
npy_id = self.npy_path + npy_id
video_name = img_pair.split('/')[0]
# load img1
if pari_id[0][-1] == 'M':
img_1 = cv2.imread(self.img_path + pari_id[0][:-2])
else:
img_1 = cv2.imread(self.img_path + pari_id[0])
# load img2
if pari_id[1][-2] == 'M':
img_2 = cv2.imread(self.img_path + pari_id[1][:-3])
else:
img_2 = cv2.imread(self.img_path + pari_id[1][:-1])
height, width = img_1.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_1 = cv2.resize(img_1, (self.WIDTH, self.HEIGHT))
print_img_1 = img_1.copy()
print_img_1 = np.transpose(print_img_1, [2, 0, 1])
img_1 = (img_1 - self.mean_I) / self.std_I
img_1 = np.mean(img_1, axis=2, keepdims=True)
img_1 = np.transpose(img_1, [2, 0, 1])
height, width = img_2.shape[:2]
if height != self.HEIGHT or width != self.WIDTH:
img_2 = cv2.resize(img_2, (self.WIDTH, self.HEIGHT))
print_img_2 = img_2.copy()
print_img_2 = np.transpose(print_img_2, [2, 0, 1])
img_2 = (img_2 - self.mean_I) / self.std_I
img_2 = np.mean(img_2, axis=2, keepdims=True)
img_2 = np.transpose(img_2, [2, 0, 1])
org_img = np.concatenate([img_1, img_2], axis=0)
WIDTH = org_img.shape[2]
HEIGHT = org_img.shape[1]
x = np.random.randint(self.rho, WIDTH - self.rho - self.patch_w)
y = np.random.randint(self.rho, HEIGHT - self.rho - self.patch_h)
input_tesnor = org_img[:, y: y + self.patch_h, x: x + self.patch_w]
y_t_flat = np.reshape(self.y_mesh, [-1])
x_t_flat = np.reshape(self.x_mesh, [-1])
patch_indices = (y_t_flat + y) * WIDTH + (x_t_flat + x)
top_left_point = (x, y)
bottom_left_point = (x, y + self.patch_h)
bottom_right_point = (self.patch_w + x, self.patch_h + y)
top_right_point = (x + self.patch_w, y)
four_points = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]
four_points = np.reshape(four_points, (-1))
return (org_img, input_tesnor, patch_indices, four_points,print_img_1, print_img_2, video_name, npy_id)
def __len__(self):
return len(self.pair_list)
| 6,550 | 36.221591 | 115 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/backbone/resnet.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch, imageio
# from utils import transform, DLT_solve
import matplotlib.pyplot as plt
"""
homo-estimator's backbone, reconstruction of the original Deephomography
"""
criterion_l2 = nn.MSELoss(reduce=True, size_average=True)
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=1, reduce=False, size_average=False)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def create_gif(image_list, gif_name, duration=0.35):
frames = []
for image_name in image_list:
frames.append(image_name)
imageio.mimsave(gif_name, frames, 'GIF', duration=0.5)
return
def getPatchFromFullimg(patch_size_h, patch_size_w, patchIndices, batch_indices_tensor, img_full):
num_batch, num_channels, height, width = img_full.size()
warped_images_flat = img_full.reshape(-1)
patch_indices_flat = patchIndices.reshape(-1)
pixel_indices = patch_indices_flat.long() + batch_indices_tensor
mask_patch = torch.gather(warped_images_flat, 0, pixel_indices)
mask_patch = mask_patch.reshape([num_batch, 1, patch_size_h, patch_size_w])
return mask_patch
def normMask(mask, strenth=0.5):
"""
:return: to attention more region
"""
batch_size, c_m, c_h, c_w = mask.size()
max_value = mask.reshape(batch_size, -1).max(1)[0]
max_value = max_value.reshape(batch_size, 1, 1, 1)
mask = mask / (max_value * strenth)
mask = torch.clamp(mask, 0, 1)
return mask
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):#for resnet34, 18
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride) # p=1
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)# p=1
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):#
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# define and forward ( Because of the load is unbalanced when use torch.nn.DataParallel, we define warp in forward)
class ResNet(nn.Module):
def __init__(self, block, layers, used_layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.used_layers = used_layers
self.conv1 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# self.avgpool = nn.AvgPool2d(7, stride=1)
print('block.expansion', block.expansion)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
p4 = self.layer4(p3)
out = [x, p1, p2, p3, p4]
out = [out[i] for i in self.used_layers]
if len(out) == 1:
return out[0]
else:
return out
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 8,165 | 30.774319 | 115 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/backbone/__init__.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from torch import nn
import homo_estimator.Deep_homography.Oneline_DLTv1.backbone.resnet as resnet
import torch.utils.model_zoo as model_zoo
# from test_ideas.net.unet import UNet_fuse
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def get_backbone(model_name, pretrained=False, **kwargs):
print('**kwargs',kwargs)
if model_name == 'resnet34':
model = resnet.resnet34(pretrained=False, **kwargs)
elif model_name == 'resnet50':
model = resnet.resnet50(pretrained=False, **kwargs)
elif model_name == 'resnet101':
model = resnet.resnet101(pretrained=False, **kwargs)
elif model_name == 'resnet152':
model = resnet.resnet152(pretrained=False, **kwargs)
if model_name == 'resnet18':
model.conv1 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3,
bias=False)
elif model_name == 'resnet34':
model.conv1 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3,
bias=False)
else:
model.conv1 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3,
bias=False)
if pretrained == True:
print('load_pretrained from',model_urls[model_name])
exclude_dict = ['conv1.weight','fc.weight','fc.bias']
pretrained_dict = model_zoo.load_url(model_urls[model_name])
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k not in exclude_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
# def get_backbone_unet(name, **kwargs):
# if name == 'Unet_fuse':
# return UNet_fuse(**kwargs)
# return UNet_fuse(**kwargs) | 2,249 | 38.473684 | 93 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/tools/get_img_info.py | # coding: utf-8
import argparse
from homo_estimator.Deep_homography.Oneline_DLTv1.dataset import *
import numpy as np
"""
In order to get template and search images info as input of homo-estiamtor network.
"""
def get_template_info(template):
"""
In order to preserve time, we separate the procedure of obtaining template&search info
:param template: template image
:return: search info (search_tmp[1, H, W]: numpy.ndarray , print_tmp[3, H, W]: numpy.ndarray)
"""
#fixme remove norm
_mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
_std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
_patch_w = 127 #560
_patch_h = 127 #315
_rho = 0 #8 16
_WIDTH = 127 # we still leave to space to adjust the patch and original image. 640 560
_HEIGHT = 127 # 360 315
_x_mesh, _y_mesh = make_mesh(_patch_w, _patch_h)
tmp_tensor = template[0] # template 1*3*127*127, already croped
tmp = tmp_tensor.cpu().permute(1, 2, 0).numpy()# HwC
# load img2
height, width = tmp.shape[:2]
if height != _HEIGHT or width != _WIDTH:
tmp = cv2.resize(tmp, (_WIDTH, _HEIGHT))
print_tmp = tmp.copy()
print_tmp = np.transpose(print_tmp, [2, 0, 1]) # C,H,W
tmp = (tmp - _mean_I) / _std_I
tmp = np.mean(tmp, axis=2, keepdims=True)
tmp = np.transpose(tmp, [2, 0, 1])
return tmp, print_tmp
def get_search_info(search):
# it is the same as get_temp_info method currently
"""
In order to preserve time, we separate the procedure of obtaining template&search info
:param search: search image [N, 3, H, W] torch.tensor
:return: search info (search_image[1, H, W]: numpy.ndarray , print_search[3, H, W]: numpy.ndarray)
"""
#fixme remove norm
_mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
_std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
_patch_w = 127 #560
_patch_h = 127 #315
_rho = 0 #16 8
_WIDTH = 127 # 640 560
_HEIGHT = 127 # 360 315
search_tensor = search[0] # template 1*3*127*127, already croped
search = search_tensor.cpu().permute(1, 2, 0).numpy()# HwCf
# load img2
height, width = search.shape[:2]
if height != _HEIGHT or width != _WIDTH:
search = cv2.resize(search, (_WIDTH, _HEIGHT))
print_search = search.copy()
print_search = np.transpose(print_search, [2, 0, 1]) # C,H,W
# search = search/255
#fixme remove norm
search = (search - _mean_I) / _std_I
search = np.mean(search, axis=2, keepdims=True)
search = np.transpose(search, [2, 0, 1])
return search, print_search
def merge_tmp_search(tmp,search):
"""
:param tmp: template_image[1, H, W]
:param search: search image[1, H, W]
:return: (org_img, input_tensor, patch_indices, four_points)
"""
_patch_w = 127
_patch_h = 127
# merge
org_img = np.concatenate([tmp, search], axis=0)
WIDTH = org_img.shape[2]
HEIGHT = org_img.shape[1]
_x_mesh, _y_mesh = make_mesh(_patch_w, _patch_h) #[_patch_w, _patch_h]
_rho = 0 #16
x, y = 0, 0 #4,4
input_tensor = org_img[:, y: y + _patch_h, x: x + _patch_w]
y_t_flat = np.reshape(_y_mesh, [-1])
x_t_flat = np.reshape(_x_mesh, [-1])
patch_indices = (y_t_flat + y) * WIDTH + (x_t_flat + x)
top_left_point = (x, y)
bottom_left_point = (x, y + _patch_h)
bottom_right_point = (_patch_w + x, _patch_h + y)
top_right_point = (x + _patch_w, y)
four_points = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]
four_points = np.reshape(four_points, (-1))
return {'org_imgs': org_img,
'input_tensors':input_tensor,
'patch_indices': patch_indices,
'four_points': four_points
}
def get_img_info_from_dir(img_dir, img_pair_path):
"""
:param img_dir: dataset dir("DeepHomography/Data/POT/")
:param img_pair_path: ( "V02_2/img/0015.jpg V02_2/img/0018.jpg\n")
:return: list ( (org_img, input_tensor, patch_indices, four_points, print_img_1, print_img_2) )
"""
_mean_I = np.reshape(np.array([118.93, 113.97, 102.60]), (1, 1, 3))
_std_I = np.reshape(np.array([69.85, 68.81, 72.45]), (1, 1, 3))
_patch_w = 560
_patch_h = 315
_rho = 16
_WIDTH = 640
_HEIGHT = 360
_x_mesh, _y_mesh = make_mesh(_patch_w, _patch_h)
img_pair = img_pair_path
_img_path = img_dir
pari_id = img_pair.split(' ')
# video_name = img_pair.split('/')[0]
print('img_path = ',_img_path + pari_id[0])
# load img1
if pari_id[0][-1] == 'M':
img_1 = cv2.imread(_img_path + pari_id[0][:-2])
else:
img_1 = cv2.imread(_img_path + pari_id[0])
print('img_path = ',_img_path + pari_id[1][:-1])
# load img2
if pari_id[1][-2] == 'M':
img_2 = cv2.imread(_img_path + pari_id[1][:-3])
else:
img_2 = cv2.imread(_img_path + pari_id[1][:-1])
height, width = img_1.shape[:2]
if height != _HEIGHT or width != _WIDTH:
img_1 = cv2.resize(img_1, (_WIDTH, _HEIGHT))
print_img_1 = img_1.copy()
print_img_1 = np.transpose(print_img_1, [2, 0, 1])
img_1 = (img_1 - _mean_I) / _std_I
img_1 = np.mean(img_1, axis=2, keepdims=True)
img_1 = np.transpose(img_1, [2, 0, 1])
height, width = img_2.shape[:2]
if height != _HEIGHT or width != _WIDTH:
img_2 = cv2.resize(img_2, (_WIDTH, _HEIGHT))
print_img_2 = img_2.copy()
print_img_2 = np.transpose(print_img_2, [2, 0, 1])
img_2 = (img_2 - _mean_I) / _std_I
img_2 = np.mean(img_2, axis=2, keepdims=True)
img_2 = np.transpose(img_2, [2, 0, 1])
org_img = np.concatenate([img_1, img_2], axis=0)
WIDTH = org_img.shape[2]
HEIGHT = org_img.shape[1]
x = np.random.randint(_rho, WIDTH - _rho - _patch_w)
y = np.random.randint(_rho, HEIGHT - _rho - _patch_h)
input_tensor = org_img[:, y: y + _patch_h, x: x + _patch_w]
y_t_flat = np.reshape(_y_mesh, [-1])
x_t_flat = np.reshape(_x_mesh, [-1])
patch_indices = (y_t_flat + y) * WIDTH + (x_t_flat + x)
top_left_point = (x, y)
bottom_left_point = (x, y + _patch_h)
bottom_right_point = (_patch_w + x, _patch_h + y)
top_right_point = (x + _patch_w, y)
four_points = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]
four_points = np.reshape(four_points, (-1))
return (org_img, input_tensor, patch_indices, four_points, print_img_1, print_img_2)
| 6,470 | 36.842105 | 104 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/models/homo_model_builder.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch.nn as nn
import torch.nn.functional as F
import imageio
from hdn.core.config import cfg
from homo_estimator.Deep_homography.Oneline_DLTv1.backbone import get_backbone
from homo_estimator.Deep_homography.Oneline_DLTv1.preprocess import get_pre
import torch
from homo_estimator.Deep_homography.Oneline_DLTv1.utils import transform, DLT_solve
import matplotlib.pyplot as plt
"""
The model_builder we use right now.
"""
criterion_l2 = nn.MSELoss(reduce=True, size_average=True)
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=1, reduce=False, size_average=False)#anchor p, n
'''
try to use huber loss to enhance the robustness
>>> # Custom Distance Function
>>> def l_infinity(x1, x2):
>>> return torch.max(torch.abs(x1 - x2), dim=1).values
>>>
>>> triplet_loss = \
>>> nn.TripletMarginWithDistanceLoss(distance_function=l_infinity, margin=1.5)
>>> output = triplet_loss(anchor, positive, negative)
>>> output.backward()
'''
def triplet_loss_xr(anchor, positive, negative, p_mask, n_mask, mask_ap, margin=0.5):#a, p, n, p_mask, n_mask
"""
:param anchor:
:param positive:
:param negative:
:param a_mask: anchor weight window
:param p_mask: positive weight window
:param n_mask: negative weight window
:param mask_ap: mask from the mask-generator
:return: loss (scalar)
"""
loss_neg = smooth_l1_loss(anchor, negative, p_mask, mask_ap)
loss_pos = smooth_l1_loss(anchor, positive, n_mask, mask_ap)
loss = loss_pos - loss_neg + margin
return loss
def smooth_l1_loss(output, target, o_mask, mask_ap):
absolute_loss = torch.abs(target - output) * o_mask * mask_ap
square_loss = 0.5 * (target - output) ** 2 * o_mask * mask_ap
inds = absolute_loss.lt(1).float()
reg_loss = (inds * square_loss + (1 - inds) * (absolute_loss - 0.5))
tot = (mask_ap).sum()
loss = reg_loss.sum() / tot
return loss
def create_gif(image_list, gif_name, duration=0.35):
frames = []
for image_name in image_list:
frames.append(image_name)
imageio.mimsave(gif_name, frames, 'GIF', duration=0.5)
return
def getPatchFromFullimg(patch_size_h, patch_size_w, patchIndices, batch_indices_tensor, img_full):
num_batch, num_channels, height, width = img_full.size()
warped_images_flat = img_full.reshape(-1)
patch_indices_flat = patchIndices.reshape(-1)
pixel_indices = patch_indices_flat.long() + batch_indices_tensor
mask_patch = torch.gather(warped_images_flat, 0, pixel_indices)
mask_patch = mask_patch.reshape([num_batch, 1, patch_size_h, patch_size_w])
return mask_patch
def normMask(mask, strenth=0.5):
"""
:return: to attention more region
"""
batch_size, c_m, c_h, c_w = mask.size()
max_value = mask.reshape(batch_size, -1).max(1)[0]
# print('max_value.shape',max_value.shape)
max_value = max_value.reshape(batch_size, 1, 1, 1)
mask = mask / (max_value * strenth)
mask = torch.clamp(mask, 0, 1)
return mask
class HomoModelBuilder(nn.Module):
def __init__(self, pretrained = False):
super(HomoModelBuilder, self).__init__()
# build head
self.ShareFeature = get_pre('PreShareFeature')
model_name = cfg.BACKBONE_HOMO.TYPE
print('pretrained:',pretrained)
self.backbone = get_backbone(model_name,
pretrained, **cfg.BACKBONE_HOMO.KWARGS)
self.avgpool = nn.AdaptiveAvgPool2d(1)
print('self.avgpool',self.avgpool)
if model_name == 'resnet18' or model_name == 'resnet34':
self.fc = nn.Linear(512, 8)
elif model_name == 'resnet50':
self.fc = nn.Linear(2048, 8)
def forward(self, data):
org_imgs = data['org_imgs']
input_tensors = data['input_tensors']
h4p = data['h4p']
patch_inds = data['patch_indices']
_device = 'cuda' if str(org_imgs.device)[:4] =='cuda' else 'cpu'
# tmp_window = data['template_mask'] #[8,127,127]
if 'search_windowx' in data: #acturally search_window
sear_window = data['search_window'].squeeze(1) #[8,127,127]
else:
sear_window = torch.ones([input_tensors.shape[0], 127,127]).to(_device)
if 'if_pos' in data:
if_pos = data['if_pos']
else:
if_pos = torch.ones([input_tensors.shape[0],1, 127,127]).float().to(_device)
if 'if_unsup' in data:
if_unsup = data['if_unsup']
else:
if_unsup = torch.ones([input_tensors.shape[0],1, 127,127]).float().to(_device)
batch_size, _, img_h, img_w = org_imgs.size()
_, _, patch_size_h, patch_size_w = input_tensors.size()
y_t = torch.arange(0, batch_size * img_w * img_h,
img_w * img_h)
batch_inds_tensor = y_t.unsqueeze(1).expand(y_t.shape[0], patch_size_h * patch_size_w).reshape(-1)
w_h_scala = torch.tensor(63.5)
M_tensor = torch.tensor([[w_h_scala, 0., w_h_scala],
[0., w_h_scala, w_h_scala],
[0., 0., 1.]])
if torch.cuda.is_available():
M_tensor = M_tensor.cuda()
batch_indices_tensor = batch_inds_tensor.cuda()
M_tile = M_tensor.unsqueeze(0).expand(batch_size, M_tensor.shape[-2], M_tensor.shape[-1])
# Inverse of M
M_tensor_inv = torch.inverse(M_tensor)
M_tile_inv = M_tensor_inv.unsqueeze(0).expand(batch_size, M_tensor_inv.shape[-2],
M_tensor_inv.shape[-1])
#original feature
patch_1 = self.ShareFeature(input_tensors[:, :1, ...])
patch_2 = self.ShareFeature(input_tensors[:, 1:, ...])
#feature normed
patch_1_res = patch_1
patch_2_res = patch_2
x = torch.cat((patch_1_res, patch_2_res), dim=1)
x = self.backbone(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)#[bsz, 8]
H_mat = DLT_solve(h4p, x).squeeze(1) #H: search -> template
# 'DLT_solve'
pred_I2 = transform(patch_size_h, patch_size_w, M_tile_inv, H_mat, M_tile,
org_imgs[:, :1, ...], patch_inds, batch_indices_tensor)
pred_I2_CnnFeature = self.ShareFeature(pred_I2)
## handle the negative samples loss
neg_ids = if_pos.eq(0).nonzero().squeeze(1)
#only unsupervised homo
pos_ids = (if_pos*if_unsup).eq(1).nonzero().squeeze(1)
#add center mask
mask_sear = sear_window.gt(0).unsqueeze(1).float()
#do not use mask at all
patch_1_m = patch_1
patch_2_m = patch_2
pred_I2_CnnFeature_m = pred_I2_CnnFeature
## use neg samples, it seems loss doesn't descend
if neg_ids.shape[0] != 0:
tmp_pos = patch_1_m[pos_ids]
sear_pos = patch_2_m[pos_ids]
pred_pos = pred_I2_CnnFeature_m[pos_ids]
#only use the pos samples
tmp_replace = tmp_pos
sear_replace = sear_pos
pred_replace = pred_pos
else:
mask_num = mask_sear.nonzero().shape[0]
tmp_replace = patch_1_m
sear_replace = patch_2_m
pred_replace = pred_I2_CnnFeature_m
feature_loss_mat = triplet_loss(sear_replace, pred_replace, tmp_replace)
feature_loss = torch.sum(feature_loss_mat) / pos_ids.shape[0] /(127*127)
feature_loss = torch.unsqueeze(feature_loss, 0)
#neg loss
cur_device = feature_loss.device
homo_neg_loss = torch.tensor(0.0).to(cur_device)
if neg_ids.shape[0] > 0:
homo_neg_loss = torch.sum(torch.norm(x[neg_ids,:], p=2, dim=1)) / neg_ids.shape[0]
pred_I2_d = pred_I2[:1, ...]
patch_2_res_d = patch_2_res[:1, ...]
pred_I2_CnnFeature_d = pred_I2_CnnFeature[:1, ...]
out_dict = {}
out_dict.update(feature_loss=feature_loss, pred_I2_d=pred_I2_d, x=x, H_mat=H_mat, patch_2_res_d=patch_2_res_d,
pred_I2_CnnFeature_d=pred_I2_CnnFeature_d, homo_neg_loss=homo_neg_loss)
return out_dict
| 8,336 | 37.243119 | 118 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/preprocess/input_mask_generator.py | import torch.nn as nn
class MaskGenerator(nn.Module):
def __init__(self, ):
super(MaskGenerator, self).__init__()
self.genMask = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(4),
nn.ReLU(inplace=True),
nn.Conv2d(4, 8, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.Conv2d(8, 16, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 1, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(1),
nn.Sigmoid(),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.genMask(x)
return out
| 1,164 | 30.486486 | 68 | py |
HDN | HDN-master/homo_estimator/Deep_homography/Oneline_DLTv1/preprocess/input_feature_extractor.py | import torch.nn as nn
class PreShareFeature(nn.Module):
def __init__(self, ):
super(PreShareFeature, self).__init__()
self.ShareFeature = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(4),
nn.ReLU(inplace=True),
nn.Conv2d(4, 8, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.Conv2d(8, 1, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
)
# print('ShareFeature.param', self.ShareFeature[0].weight)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.ShareFeature(x)
return out
| 982 | 29.71875 | 66 | py |
SIGIR2021 | SIGIR2021-master/src/utils.py | import os
import torch
import datetime
def print_message(*s):
s = ' '.join([str(x) for x in s])
print("[{}] {}".format(datetime.datetime.utcnow().strftime("%b %d, %H:%M:%S"), s), flush=True)
def save_checkpoint(path, epoch_idx, mb_idx, model, optimizer):
print("#> Saving a checkpoint..")
checkpoint = {}
checkpoint['epoch'] = epoch_idx
checkpoint['batch'] = mb_idx
checkpoint['model_state_dict'] = model.state_dict()
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, path)
def load_checkpoint(path, model, optimizer=None):
print_message("#> Loading checkpoint", path)
checkpoint = torch.load(path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
print_message("#> checkpoint['epoch'] =", checkpoint['epoch'])
print_message("#> checkpoint['batch'] =", checkpoint['batch'])
return checkpoint
def create_directory(path):
if not os.path.exists(path):
print_message("#> Creating", path)
os.makedirs(path)
def batch(group, bsize):
offset = 0
while offset < len(group):
L = group[offset: offset + bsize]
yield L
offset += len(L)
return
| 1,309 | 24.686275 | 98 | py |
SIGIR2021 | SIGIR2021-master/src/model.py | import torch
import torch.nn as nn
from nltk.stem import PorterStemmer
from random import sample, shuffle, randint
from itertools import accumulate
from transformers import *
import re
from src.parameters import DEVICE
from src.utils2 import cleanQ, cleanD
stem = PorterStemmer().stem
MAX_LENGTH = 300
def unique(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
class MultiBERT(BertPreTrainedModel):
def __init__(self, config):
super(MultiBERT, self).__init__(config)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.regex_drop_char = re.compile('[^a-z0-9\s]+')
self.regex_multi_space = re.compile('\s+')
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def myforward(self, input_ids, attention_mask, token_type_ids):
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.bert.embeddings(input_ids, position_ids=None, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
return (sequence_output,)
def encoder(self, hidden_states, attention_mask, head_mask):
for i, layer_module in enumerate(self.bert.encoder.layer):
if i == 7:
break
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
return (hidden_states,)
def convert_example(self, d, max_seq_length):
max_length = min(MAX_LENGTH, max_seq_length)
inputs = self.tokenizer.encode_plus(d, add_special_tokens=True, max_length=max_length, truncation=True)
padding_length = max_length - len(inputs["input_ids"])
attention_mask = ([1] * len(inputs["input_ids"])) + ([0] * padding_length)
input_ids = inputs["input_ids"] + ([0] * padding_length)
token_type_ids = inputs["token_type_ids"] + ([0] * padding_length)
return {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}
def tokenize(self, q, d):
query_tokens = list(set(cleanQ(q).strip().split())) # [:10]
content = cleanD(d).strip()
doc_tokens = content.split()
# NOTE: The following line accounts for CLS!
tokenized = self.tokenizer.tokenize(content)
word_indexes = list(accumulate([-1] + tokenized, lambda a, b: a + int(not b.startswith('##'))))
match_indexes = list(set([doc_tokens.index(t) for t in query_tokens if t in doc_tokens]))
term_indexes = [word_indexes.index(idx) for idx in match_indexes]
a = [idx for i, idx in enumerate(match_indexes) if term_indexes[i] < MAX_LENGTH]
b = [idx for idx in term_indexes if idx < MAX_LENGTH]
return content, tokenized, a, b, len(word_indexes) + 2
def forward(self, Q, D):
bsize = len(Q)
pairs = []
X, pfx_sum, pfx_sumX = [], [], []
total_size, total_sizeX, max_seq_length = 0, 0, 0
doc_partials = []
pre_pairs = []
for q, d in zip(Q, D):
tokens, tokenized, term_idxs, token_idxs, seq_length = self.tokenize(q, d)
max_seq_length = max(max_seq_length, seq_length)
pfx_sumX.append(total_sizeX)
total_sizeX += len(term_idxs)
tokens_split = tokens.split()
doc_partials.append([(total_size + idx, tokens_split[i]) for idx, i in enumerate(term_idxs)])
total_size += len(doc_partials[-1])
pfx_sum.append(total_size)
pre_pairs.append((tokenized, token_idxs))
if max_seq_length % 10 == 0:
print("#>>> max_seq_length = ", max_seq_length)
for tokenized, token_idxs in pre_pairs:
pairs.append(self.convert_example(tokenized, max_seq_length))
X.append(token_idxs)
input_ids = torch.tensor([f['input_ids'] for f in pairs], dtype=torch.long).to(DEVICE)
attention_mask = torch.tensor([f['attention_mask'] for f in pairs], dtype=torch.long).to(DEVICE)
token_type_ids = torch.tensor([f['token_type_ids'] for f in pairs], dtype=torch.long).to(DEVICE)
outputs = self.bert.forward(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
hidden_state = outputs[0]
def one(i):
if len(X[i]) > 0:
l = [hidden_state[i, j] for j in X[i]] # + [mismatch_scores[i, j] for j in all_mismatches[i]]
return torch.stack(l)
return torch.tensor([]).to(DEVICE)
pooled_output = torch.cat([one(i) for i in range(bsize)])
bsize = len(pooled_output)
if bsize == 0:
term_scores = []
for doc in doc_partials:
term_scores.append([])
for (idx, term) in doc:
term_scores[-1].append((term, 0.0))
return torch.tensor([[0.0]] * len(Q)).to(DEVICE), term_scores
pooled_output = self.pre_classifier(pooled_output)
pooled_output = nn.ReLU()(pooled_output)
pooled_output = self.dropout(pooled_output)
y_score = self.classifier(pooled_output)
y_score = torch.nn.functional.relu(y_score)
x = torch.arange(bsize).expand(len(pfx_sum), bsize) < torch.tensor(pfx_sum).unsqueeze(1)
y = torch.arange(bsize).expand(len(pfx_sum), bsize) >= torch.tensor([0] + pfx_sum[:-1]).unsqueeze(1)
mask = (x & y).to(DEVICE)
y_scorex = list(y_score.cpu())
term_scores = []
for doc in doc_partials:
term_scores.append([])
for (idx, term) in doc:
term_scores[-1].append((term, y_scorex[idx]))
return (mask.type(torch.float32) @ y_score), term_scores #, ordered_terms #, num_exceeding_fifth
| 6,377 | 37.421687 | 119 | py |
SIGIR2021 | SIGIR2021-master/src/model_multibert.py | import torch
import torch.nn as nn
from nltk.stem import PorterStemmer
from random import sample, shuffle, randint
from transformers import *
import re
from itertools import accumulate
from src.parameters import DEVICE
from src.utils2 import cleanQ, cleanD
stem = PorterStemmer().stem
MAX_LENGTH = 300
def unique(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
class MultiBERT(BertPreTrainedModel):
def __init__(self, config):
super(MultiBERT, self).__init__(config)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.regex_drop_char = re.compile('[^a-z0-9\s]+')
self.regex_multi_space = re.compile('\s+')
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def encoder(self, hidden_states, attention_mask, head_mask):
for i, layer_module in enumerate(self.bert.encoder.layer):
if i == 7:
break
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
return (hidden_states,)
def myforward(self, input_ids, attention_mask, token_type_ids):
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.bert.embeddings(input_ids, position_ids=None, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
return (sequence_output,)
def convert_example(self, d, max_seq_length):
max_length = min(MAX_LENGTH, max_seq_length)
inputs = self.tokenizer.encode_plus(d, add_special_tokens=True, max_length=max_length, truncation=True)
padding_length = max_length - len(inputs["input_ids"])
attention_mask = ([1] * len(inputs["input_ids"])) + ([0] * padding_length)
input_ids = inputs["input_ids"] + ([0] * padding_length)
token_type_ids = inputs["token_type_ids"] + ([0] * padding_length)
return {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}
def index(self, D, max_seq_length):
if max_seq_length % 10 == 0:
print("#>>> max_seq_length = ", max_seq_length)
bsize = len(D)
offset = 0
pairs, X = [], []
for tokenized_content, terms in D:
terms = [(t, idx, offset + pos) for pos, (t, idx) in enumerate(terms)]
offset += len(terms)
pairs.append(self.convert_example(tokenized_content, max_seq_length))
X.append(terms)
input_ids = torch.tensor([f['input_ids'] for f in pairs], dtype=torch.long).to(DEVICE)
attention_mask = torch.tensor([f['attention_mask'] for f in pairs], dtype=torch.long).to(DEVICE)
token_type_ids = torch.tensor([f['token_type_ids'] for f in pairs], dtype=torch.long).to(DEVICE)
outputs = self.bert.forward(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
hidden_state = outputs[0]
pooled_output = torch.cat([hidden_state[i, list(map(lambda x: x[1], X[i]))] for i in range(bsize)])
pooled_output = self.pre_classifier(pooled_output)
pooled_output = nn.ReLU()(pooled_output)
pooled_output = self.dropout(pooled_output)
y_score = self.classifier(pooled_output)
y_score = torch.nn.functional.relu(y_score)
y_score = y_score.squeeze().cpu().numpy().tolist()
term_scores = [[(term, y_score[pos]) for term, _, pos in terms] for terms in X]
return term_scores
| 4,114 | 38.951456 | 119 | py |
SIGIR2021 | SIGIR2021-master/src/parameters.py | import torch
DEVICE = torch.device("cuda:0")
DEFAULT_DATA_DIR = './data_download/'
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
| 166 | 19.875 | 79 | py |
SIGIR2021 | SIGIR2021-master/src/train.py | import os
import random
import torch
from argparse import ArgumentParser
from src.training.data_reader import train
from src.utils import print_message, create_directory
def main():
random.seed(12345)
torch.manual_seed(1)
parser = ArgumentParser(description='Training ColBERT with <query, positive passage, negative passage> triples.')
parser.add_argument('--lr', dest='lr', default=3e-06, type=float)
parser.add_argument('--maxsteps', dest='maxsteps', default=400000, type=int)
parser.add_argument('--bsize', dest='bsize', default=32, type=int)
parser.add_argument('--accum', dest='accumsteps', default=2, type=int)
parser.add_argument('--triples', dest='triples', default='triples.train.small.tsv')
parser.add_argument('--output_dir', dest='output_dir', default='outputs.train/')
parser.add_argument('--similarity', dest='similarity', default='cosine', choices=['cosine', 'l2'])
parser.add_argument('--dim', dest='dim', default=128, type=int)
parser.add_argument('--query_maxlen', dest='query_maxlen', default=32, type=int)
parser.add_argument('--doc_maxlen', dest='doc_maxlen', default=180, type=int)
# TODO: Add resume functionality
# TODO: Save the configuration to the checkpoint.
# TODO: Extract common parser arguments/behavior into a class.
args = parser.parse_args()
args.input_arguments = args
create_directory(args.output_dir)
assert args.bsize % args.accumsteps == 0, ((args.bsize, args.accumsteps),
"The batch size must be divisible by the number of gradient accumulation steps.")
assert args.query_maxlen <= 512
assert args.doc_maxlen <= 512
train(args)
if __name__ == "__main__":
main()
| 1,761 | 34.959184 | 128 | py |
SIGIR2021 | SIGIR2021-master/src/index.py | import random
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from time import time
from math import ceil
from src.model_multibert import *
from multiprocessing import Pool
from src.evaluation.loaders import load_checkpoint
MB_SIZE = 1024
def print_message(*s):
s = ' '.join(map(str, s))
print("[{}] {}".format(datetime.datetime.utcnow().strftime("%b %d, %H:%M:%S"), s), flush=True)
print_message("#> Loading model checkpoint.")
net = MultiBERT.from_pretrained('bert-base-uncased')
net = net.to(DEVICE)
load_checkpoint("/scratch/am8949/MultiBERT/colbert-12layers-max300-50000.dnn", net)
net.eval()
def tok(d):
d = cleanD(d, join=False)
content = ' '.join(d)
tokenized_content = net.tokenizer.tokenize(content)
terms = list(set([(t, d.index(t)) for t in d])) # Quadratic!
word_indexes = list(accumulate([-1] + tokenized_content, lambda a, b: a + int(not b.startswith('##'))))
terms = [(t, word_indexes.index(idx)) for t, idx in terms]
terms = [(t, idx) for (t, idx) in terms if idx < MAX_LENGTH]
return tokenized_content, terms
def process_batch(g, super_batch):
print_message("Start process_batch()", "")
with torch.no_grad():
super_batch = list(p.map(tok, super_batch))
sorted_super_batch = sorted([(v, idx) for idx, v in enumerate(super_batch)], key=lambda x: len(x[0][0]))
super_batch = [v for v, _ in sorted_super_batch]
super_batch_indices = [idx for _, idx in sorted_super_batch]
print_message("Done sorting", "")
every_term_score = []
for batch_idx in range(ceil(len(super_batch) / MB_SIZE)):
D = super_batch[batch_idx * MB_SIZE: (batch_idx + 1) * MB_SIZE]
IDXs = super_batch_indices[batch_idx * MB_SIZE: (batch_idx + 1) * MB_SIZE]
all_term_scores = net.index(D, len(D[-1][0])+2)
every_term_score += zip(IDXs, all_term_scores)
every_term_score = sorted(every_term_score)
lines = []
for _, term_scores in every_term_score:
term_scores = ', '.join([term + ": " + str(round(score, 3)) for term, score in term_scores])
lines.append(term_scores)
g.write('\n'.join(lines) + "\n")
g.flush()
p = Pool(16)
start_time = time()
COLLECTION = "/scratch/am8949"
with open(COLLECTION + '/index-Feb23.txt', 'w') as g:
with open(COLLECTION + '/collection-dT5q-newterms_unique.tsv') as f:
for idx, passage in enumerate(f):
if idx % (50*1024) == 0:
if idx > 0:
process_batch(g, super_batch)
throughput = round(idx / (time() - start_time), 1)
print_message("Processed", str(idx), "passages so far [rate:", str(throughput), "passages per second]")
super_batch = []
passage = passage.strip()
pid, passage = passage.split('\t')
super_batch.append(passage)
assert int(pid) == idx
process_batch(g, super_batch)
| 3,047 | 29.787879 | 119 | py |
SIGIR2021 | SIGIR2021-master/src/evaluation/ranking.py | import os
import random
import time
import torch
from src.utils import print_message, load_checkpoint, batch
from src.evaluation.metrics import Metrics
def rerank(args, query, pids, passages, index=None):
colbert = args.colbert
#tokenized_passages = list(args.pool.map(colbert.tokenizer.tokenize, passages))
scores = [colbert.forward([query] * len(D), D)[0].cpu() for D in batch(passages, args.bsize)]
scores = torch.cat(scores).squeeze(1).sort(descending=True)
ranked = scores.indices.tolist()
ranked_scores = scores.values.tolist()
ranked_pids = [pids[position] for position in ranked]
ranked_passages = [passages[position] for position in ranked]
assert len(ranked_pids) == len(set(ranked_pids))
return list(zip(ranked_scores, ranked_pids, ranked_passages))
def evaluate(args, index=None):
qrels, queries, topK_docs, topK_pids = args.qrels, args.queries, args.topK_docs, args.topK_pids
metrics = Metrics(mrr_depths={10}, recall_depths={50, 200, 1000}, total_queries=None)
if index:
args.buffer = torch.zeros(1000, args.doc_maxlen, args.dim, dtype=index[0].dtype)
output_path = '.'.join([str(x) for x in [args.run_name, 'tsv', int(time.time())]])
output_path = os.path.join(args.output_dir, output_path)
# TODO: Save an associated metadata file with the args.input_args
with open(output_path, 'w') as outputfile:
with torch.no_grad():
keys = sorted(list(queries.keys()))
random.shuffle(keys)
for query_idx, qid in enumerate(keys):
query = queries[qid]
print_message(query_idx, qid, query, '\n')
if qrels and args.shortcircuit and len(set.intersection(set(qrels[qid]), set(topK_pids[qid]))) == 0:
continue
ranking = rerank(args, query, topK_pids[qid], topK_docs[qid], index)
for i, (score, pid, passage) in enumerate(ranking):
outputfile.write('\t'.join([str(x) for x in [qid, pid, i+1]]) + "\n")
if i+1 in [1, 2, 5, 10, 20, 100]:
print("#> " + str(i+1) + ") ", pid, ":", score, ' ', passage)
if qrels:
metrics.add(query_idx, qid, ranking, qrels[qid])
for i, (score, pid, passage) in enumerate(ranking):
if pid in qrels[qid]:
print("\n#> Found", pid, "at position", i+1, "with score", score)
print(passage)
metrics.print_metrics(query_idx)
print_message("#> checkpoint['batch'] =", args.checkpoint['batch'], '\n')
print("output_path =", output_path)
print("\n\n")
| 2,777 | 38.126761 | 116 | py |
SIGIR2021 | SIGIR2021-master/src/training/data_reader.py | import os
import random
import torch
import torch.nn as nn
from argparse import ArgumentParser
from transformers import AdamW
from src.parameters import DEVICE, SAVED_CHECKPOINTS
from src.model import MultiBERT
from src.utils import print_message, save_checkpoint
import re
import datetime
class TrainReader:
def __init__(self, data_file):
print_message("#> Training with the triples in", data_file, "...\n\n")
self.reader = open(data_file, mode='r', encoding="utf-8")
def get_minibatch(self, bsize):
return [self.reader.readline().split('\t') for _ in range(bsize)]
def manage_checkpoints(colbert, optimizer, batch_idx):
if batch_idx % 2000 == 0:
save_checkpoint("colbert-12layers-max300.dnn", 0, batch_idx, colbert, optimizer)
if batch_idx in SAVED_CHECKPOINTS:
save_checkpoint("colbert-12layers-max300-" + str(batch_idx) + ".dnn", 0, batch_idx, colbert, optimizer)
def train(args):
colbert = MultiBERT.from_pretrained('bert-base-uncased')
colbert = colbert.to(DEVICE)
colbert.train()
criterion = nn.CrossEntropyLoss()
optimizer = AdamW(colbert.parameters(), lr=args.lr, eps=1e-8)
optimizer.zero_grad()
labels = torch.zeros(args.bsize, dtype=torch.long, device=DEVICE)
reader = TrainReader(args.triples)
train_loss = 0.0
for batch_idx in range(args.maxsteps):
Batch = reader.get_minibatch(args.bsize)
Batch = sorted(Batch, key=lambda x: max(len(x[1]), len(x[2])))
for B_idx in range(args.accumsteps):
size = args.bsize // args.accumsteps
B = Batch[B_idx * size: (B_idx+1) * size]
Q, D1, D2 = zip(*B)
colbert_out, _ = colbert(Q + Q, D1 + D2)
colbert_out= colbert_out.squeeze(1)
colbert_out1, colbert_out2 = colbert_out[:len(Q)], colbert_out[len(Q):]
out = torch.stack((colbert_out1, colbert_out2), dim=-1)
positive_score, negative_score = round(colbert_out1.mean().item(), 2), round(colbert_out2.mean().item(), 2)
print("#>>> ", positive_score, negative_score, '\t\t|\t\t', positive_score - negative_score)
loss = criterion(out, labels[:out.size(0)])
loss = loss / args.accumsteps
loss.backward()
train_loss += loss.item()
torch.nn.utils.clip_grad_norm_(colbert.parameters(), 2.0)
optimizer.step()
optimizer.zero_grad()
print_message(batch_idx, train_loss / (batch_idx+1))
manage_checkpoints(colbert, optimizer, batch_idx+1)
| 2,567 | 31.923077 | 119 | py |
LoGo | LoGo-main/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.9
import os
import sys
import json
import random
import copy
import pickle
import numpy as np
import pandas as pd
import medmnist
from medmnist import INFO
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from models import get_model
from fl_methods import get_fl_method_class
from query_strategies import random_query_samples, algo_query_samples
from util.args import args_parser
from util.path import set_result_dir, set_dict_user_path
from util.data_simulator import shard_balance, dir_balance
from util.longtail_dataset import IMBALANCECIFAR10, IMBALANCECIFAR100
from util.misc import adjust_learning_rate
def get_dataset(args):
MEAN = {'mnist': (0.1307,), 'fmnist': (0.5,), 'emnist': (0.5,), 'svhn': [0.4376821, 0.4437697, 0.47280442],
'cifar10': [0.485, 0.456, 0.406], 'cifar100': [0.507, 0.487, 0.441], 'pathmnist': (0.5,),
'octmnist': (0.5,), 'organamnist': (0.5,), 'dermamnist': (0.5,), 'bloodmnist': (0.5,)}
STD = {'mnist': (0.3081,), 'fmnist': (0.5,), 'emnist': (0.5,), 'svhn': [0.19803012, 0.20101562, 0.19703614],
'cifar10': [0.229, 0.224, 0.225], 'cifar100': [0.267, 0.256, 0.276], 'pathmnist': (0.5,),
'octmnist': (0.5,), 'organamnist': (0.5,), 'dermamnist': (0.5,), 'bloodmnist': (0.5,)}
if 'lt' not in args.dataset:
noaug = [transforms.ToTensor(),
transforms.Normalize(mean=MEAN[args.dataset], std=STD[args.dataset])]
weakaug = [transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN[args.dataset], std=STD[args.dataset])]
trans_noaug = transforms.Compose(noaug)
trans_weakaug = transforms.Compose(weakaug)
# standard benchmarks
print('Load Dataset {}'.format(args.dataset))
if args.dataset == 'mnist':
dataset_train = datasets.MNIST(args.data_dir, train=True, download=True, transform=trans_weakaug)
dataset_query = datasets.MNIST(args.data_dir, train=True, download=True, transform=trans_noaug)
dataset_test = datasets.MNIST(args.data_dir, train=False, download=True, transform=trans_noaug)
elif args.dataset == "fmnist":
dataset_train = datasets.FashionMNIST(args.data_dir, download=True, train=True, transform=trans_weakaug)
dataset_query = datasets.FashionMNIST(args.data_dir, download=True, train=True, transform=trans_noaug)
dataset_test = datasets.FashionMNIST(args.data_dir, download=True, train=False, transform=trans_noaug)
elif args.dataset == 'emnist':
dataset_train = datasets.EMNIST(args.data_dir, split='byclass', train=True, download=True, transform=trans_weakaug)
dataset_query = datasets.EMNIST(args.data_dir, split='byclass', train=True, download=True, transform=trans_noaug)
dataset_test = datasets.EMNIST(args.data_dir, split='byclass', train=False, download=True, transform=trans_noaug)
elif args.dataset == 'svhn':
dataset_train = datasets.SVHN(args.data_dir, 'train', download=True, transform=trans_weakaug)
dataset_query = datasets.SVHN(args.data_dir, 'train', download=True, transform=trans_noaug)
dataset_test = datasets.SVHN(args.data_dir, 'test', download=True, transform=trans_noaug)
elif args.dataset == 'cifar10':
dataset_train = datasets.CIFAR10(args.data_dir, train=True, download=True, transform=trans_weakaug)
dataset_query = datasets.CIFAR10(args.data_dir, train=True, download=True, transform=trans_noaug)
dataset_test = datasets.CIFAR10(args.data_dir, train=False, download=True, transform=trans_noaug)
elif args.dataset == 'cifar10_lt':
dataset_train = IMBALANCECIFAR10('train', args.imb_ratio, args.data_dir)
dataset_query = IMBALANCECIFAR10('train', args.imb_ratio, args.data_dir, train_aug=False)
dataset_test = IMBALANCECIFAR10('test', args.imb_ratio, args.data_dir)
elif args.dataset == 'cifar100':
dataset_train = datasets.CIFAR100(args.data_dir, train=True, download=True, transform=trans_weakaug)
dataset_query = datasets.CIFAR100(args.data_dir, train=True, download=True, transform=trans_noaug)
dataset_test = datasets.CIFAR100(args.data_dir, train=False, download=True, transform=trans_noaug)
elif args.dataset == 'cifar10_lt':
dataset_train = IMBALANCECIFAR100('train', args.imb_ratio, args.data_dir)
dataset_query = IMBALANCECIFAR100('train', args.imb_ratio, args.data_dir, train_aug=False)
dataset_test = IMBALANCECIFAR100('test', args.imb_ratio, args.data_dir)
# medical benchmarks
elif args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
DataClass = getattr(medmnist, INFO[args.dataset]['python_class'])
dataset_train = DataClass(download=True, split='train', transform=trans_weakaug)
dataset_query = DataClass(download=True, split='train', transform=trans_noaug)
dataset_test = DataClass(download=True, split='test', transform=trans_noaug)
else:
exit('Error: unrecognized dataset')
args.dataset_train = dataset_train
args.total_data = len(dataset_train)
if args.partition == "shard_balance":
dict_users_train_total = shard_balance(dataset_train, args)
dict_users_test_total = shard_balance(dataset_test, args)
elif args.partition == "dir_balance":
dict_users_train_total, sample = dir_balance(dataset_train, args)
dict_users_test_total, _ = dir_balance(dataset_test, args, sample)
args.n_query = round(args.total_data, -2) * args.query_ratio
args.n_data = round(args.total_data, -2) * args.current_ratio
return dataset_train, dataset_query, dataset_test, dict_users_train_total, dict_users_test_total, args
def train_test(net_glob, dataset_train, dataset_test, dict_users_train_label, args):
results_save_path = os.path.join(args.result_dir, 'results.csv')
fl_method = get_fl_method_class(args.fl_algo)(args, dict_users_train_label)
if args.fl_algo == 'scaffold':
fl_method.init_c_nets(net_glob)
results = []
for round in range(args.rounds):
w_glob = None
loss_locals = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
lr = adjust_learning_rate(args, round)
print("Round {}, lr: {:.6f}, momentum:{}, weight decay:{}, idx_users: {}".format(round+1, lr, args.momentum, args.weight_decay, idxs_users))
total_data_num = sum([len(dict_users_train_label[idx]) for idx in idxs_users])
fl_method.on_round_start(net_glob=net_glob)
for idx in idxs_users:
fl_method.on_user_iter_start(dataset_train, idx)
net_local = copy.deepcopy(net_glob)
w_local, loss = fl_method.train(net=net_local.to(args.device),
user_idx=idx,
lr=lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
loss_locals.append(copy.deepcopy(loss))
fl_method.on_user_iter_end()
w_glob = fl_method.aggregate(w_glob=w_glob, w_local=w_local, idx_user=idx, total_data_num=total_data_num)
fl_method.on_round_end(idxs_users)
net_glob.load_state_dict(w_glob, strict=False)
acc_test, loss_test = fl_method.test(net_glob, dataset_test)
loss_avg = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}, Test loss {:.3f}, Test accuracy: {:.2f}'.format(
round+1, loss_avg, loss_test, acc_test))
results.append(np.array([round, loss_avg, loss_test, acc_test]))
last_save_path = os.path.join(args.result_dir, 'last.pt')
torch.save(net_glob.state_dict(), last_save_path)
final_results = np.array(results)
final_results = pd.DataFrame(final_results, columns=['epoch', 'loss_avg', 'loss_test', 'acc_test'])
final_results.to_csv(results_save_path, index=False)
return net_glob.state_dict()
if __name__ == '__main__':
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# print("device:", args.device)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
np.random.seed(args.seed)
random.seed(args.seed)
args = set_result_dir(args)
args = set_dict_user_path(args)
# total dataset for each client
dataset_train, dataset_query, dataset_test, dict_users_train_total, dict_users_test_total, args = get_dataset(args)
dict_users_train_label = None
while round(args.current_ratio, 2) <= args.end_ratio:
print('[Current data ratio] %.3f' % args.current_ratio)
net_glob = get_model(args)
if args.query_ratio == args.current_ratio:
dict_users_train_label, args = random_query_samples(dict_users_train_total, dict_users_test_total, args)
else:
if dict_users_train_label is None:
path = os.path.join(args.dict_user_path, 'dict_users_train_label_{:.3f}.pkl'.format(args.current_ratio - args.query_ratio))
with open(path, 'rb') as f:
dict_users_train_label = pickle.load(f)
args.dict_users_total_path = os.path.join(args.dict_user_path, 'dict_users_train_test_total.pkl'.format(args.seed))
last_ckpt = torch.load(args.query_model)
print("Load Total Data Idxs from {}".format(args.dict_users_total_path))
with open(args.dict_users_total_path, 'rb') as f:
dict_users_train_total, dict_users_test_total = pickle.load(f)
dict_users_train_label = algo_query_samples(dataset_train, dataset_query, dict_users_train_total, args)
if args.reset == 'continue' and args.query_model:
query_net_state_dict = torch.load(args.query_model)
net_glob.load_state_dict(query_net_state_dict)
last_ckpt = train_test(net_glob, dataset_train, dataset_test, dict_users_train_label, args)
args.current_ratio += args.query_ratio
# update path
args = set_result_dir(args)
args = set_dict_user_path(args) | 10,805 | 47.457399 | 148 | py |
LoGo | LoGo-main/models/resnet.py | import torch
import torch.nn as nn
__all__ = ['resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'wide_resnet50_2', 'wide_resnet101_2']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class Block(nn.Module):
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, expansion=1, block_type='basic'):
super(Block, self).__init__()
if block_type not in ['basic', 'bottleneck']:
raise ValueError('Block_Type only supports basic and bottleneck')
self.block_type = block_type
self.expansion = expansion
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if block_type == 'basic':
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
width = int(planes * (base_width / 64.)) * groups
# Both conv3*3 with stride and self.downsample layers downsample the input when stride != 1
if block_type == 'basic':
self.conv1 = conv3x3(inplanes, width, stride)
self.conv2 = conv3x3(width, width)
if block_type == 'bottleneck':
self.conv1 = conv1x1(inplanes, width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.bn1 = norm_layer(width)
self.bn2 = norm_layer(width)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.block_type == 'bottleneck':
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
BasicBlock_arch = ['resnet10', 'resnet18', 'resnet34']
Bottleneck_arch = ['resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
def __init__(self, arch, repeats, in_channels=3, num_classes=100, zero_init_residual=True,
groups=1, width_per_group=64, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.groups = groups
self.base_width = width_per_group
if arch in self.BasicBlock_arch:
self.expansion = 1
self.block_type = 'basic'
elif arch in self.Bottleneck_arch:
self.expansion = 4
self.block_type = 'bottleneck'
else:
raise NotImplementedError('%s arch is not supported in ResNet' % arch)
self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.planes = [64, 128, 256, 512] # first plane is for input channel
self.strides = [1, 2, 2, 2]
self.block_layers = self._make_layer(self.planes, repeats, self.strides)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * self.expansion, num_classes)
self.linear.bias.data.fill_(0)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Block):
if self.block_type == 'basic':
nn.init.constant_(m.bn2.weight, 0)
if self.block_type == 'bottleneck':
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, planes, repeats, strides):
assert len(planes) == len(repeats) == len(strides) == 4, 'Number of Block should be 4'
block_layers = []
norm_layer = self._norm_layer
for i in range(4):
plane = planes[i]
repeat = repeats[i]
stride = strides[i]
downsample = None
if stride != 1 or self.inplanes != plane * self.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, plane * self.expansion, stride),
norm_layer(plane * self.expansion),
)
layers = []
layers.append(nn.Sequential(*[Block(self.inplanes, plane, stride, downsample, self.groups,
self.base_width, self.dilation, norm_layer, self.expansion,
self.block_type)]))
self.inplanes = plane * self.expansion
for _ in range(1, repeat):
layers.append(nn.Sequential(*[Block(self.inplanes, plane, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, expansion=self.expansion,
block_type=self.block_type)]))
block_layers.append(nn.Sequential(*layers))
return nn.Sequential(*block_layers)
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.block_layers(x)
features = self.avgpool(x)
features = features.view(x.size(0), -1)
logits = self.linear(features)
return logits, features
def get_embedding_dim(self):
return 512 * self.expansion
def _resnet(arch, repeats, **kwargs):
model = ResNet(arch, repeats, **kwargs)
return model
def resnet10(**kwargs):
return _resnet('resnet10', [1, 1, 1, 1], **kwargs)
def resnet18(**kwargs):
return _resnet('resnet18', [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return _resnet('resnet34', [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return _resnet('resnet50', [3, 4, 6, 3], **kwargs)
def resnet101( **kwargs):
return _resnet('resnet101', [3, 4, 23, 3], **kwargs)
def resnet152(**kwargs):
return _resnet('resnet152', [3, 8, 36, 3], **kwargs)
def wide_resnet50_2(**kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', [3, 4, 6, 3], **kwargs)
def wide_resnet101_2(**kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', [3, 4, 23, 3], **kwargs)
| 9,005 | 37.323404 | 109 | py |
LoGo | LoGo-main/models/mobilenet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.9
import torch
from torch import nn
import torch.nn.functional as F
'''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes, track_running_stats=True)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes, track_running_stats=True)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNetCifar(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, in_channels, num_classes):
super(MobileNetCifar, self).__init__()
self.emb_dim = 1024
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32, track_running_stats=True)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(self.emb_dim, num_classes)
self.linear.bias.data.fill_(0)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
features = out.view(out.size(0), -1)
logits = self.linear(features)
return logits, features
def get_embedding_dim(self):
return self.emb_dim | 2,277 | 35.15873 | 123 | py |
LoGo | LoGo-main/models/cnn4conv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.9
import torch
from torch import nn
def conv3x3(in_channels, out_channels, **kwargs):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, **kwargs),
nn.BatchNorm2d(out_channels, track_running_stats=True),
nn.ReLU(),
nn.MaxPool2d(2)
)
class CNN4Conv(nn.Module):
def __init__(self, in_channels, num_classes, args):
super(CNN4Conv, self).__init__()
in_channels = in_channels
num_classes = num_classes
hidden_size = 64
if args.img_size == 32:
self.emb_dim = hidden_size * 2 * 2
elif args.img_size == 28:
self.emb_dim = hidden_size
else:
raise NotImplemented
self.features = nn.Sequential(
conv3x3(in_channels, hidden_size),
conv3x3(hidden_size, hidden_size),
conv3x3(hidden_size, hidden_size),
conv3x3(hidden_size, hidden_size)
)
self.linear = nn.Linear(self.emb_dim, num_classes)
self.linear.bias.data.fill_(0)
def forward(self, x):
features = self.features(x)
features = features.view((features.size(0), -1))
logits = self.linear(features)
return logits, features
def get_embedding_dim(self):
return self.emb_dim | 1,416 | 27.34 | 81 | py |
LoGo | LoGo-main/util/longtail_dataset.py | import numpy as np
from PIL import Image
from torchvision import datasets, transforms
class IMBALANCECIFAR10(datasets.CIFAR10):
cls_num = 10
def __init__(self, phase, imbalance_ratio, root='data/cifar10_lt/', imb_type='exp', train_aug=True):
train = True if phase == 'train' else False
super(IMBALANCECIFAR10, self).__init__(root, train, transform=None, target_transform=None, download=True)
self.train = train
if self.train:
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imbalance_ratio)
self.gen_imbalanced_data(img_num_list)
if train_aug:
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
self.labels = self.targets
print('{} Mode: Contain {} images'.format(phase, len(self.data)))
def _get_class_dict(self):
class_dict = dict()
for i, anno in enumerate(self.get_annotations()):
cat_id = anno["category_id"]
if not cat_id in class_dict:
class_dict[cat_id] = []
class_dict[cat_id].append(i)
return class_dict
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.data) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = np.unique(targets_np)
self.num_per_cls_dict = dict()
for the_class, the_img_num in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where(targets_np == the_class)[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
new_data.append(self.data[selec_idx, ...])
new_targets.extend([the_class, ] * the_img_num)
new_data = np.vstack(new_data)
self.data = new_data
self.targets = new_targets
def __getitem__(self, index):
img, label = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
label = self.target_transform(label)
return img, label #, index
def __len__(self):
return len(self.labels)
def get_num_classes(self):
return self.cls_num
def get_annotations(self):
annos = []
for label in self.labels:
annos.append({'category_id': int(label)})
return annos
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list
class IMBALANCECIFAR100(IMBALANCECIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
cls_num = 100
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
} | 4,753 | 34.214815 | 113 | py |
LoGo | LoGo-main/util/misc.py | import numpy as np
from torch.utils.data import Dataset
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return image, label
def adjust_learning_rate(args, r):
lr = args.lr
iterations = [int(args.rounds * 3 / 4)]
lr_decay_epochs = []
for it in iterations:
lr_decay_epochs.append(int(it))
steps = np.sum(r > np.asarray(lr_decay_epochs))
if steps > 0:
lr = lr * (args.lr_decay ** steps)
return lr | 696 | 21.483871 | 52 | py |
LoGo | LoGo-main/util/data_simulator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.9
import os
import math
import pickle
import random
import numpy as np
import torch
def shard_balance(dataset, args):
K = args.num_classes
y_train_dict = {i: [] for i in range(K)}
for idx, d in enumerate(dataset):
if args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
y_train_dict[d[1][0]].append(idx)
else:
y_train_dict[d[1]].append(idx)
allocate_data_cnt_dict = {i: int(len(y_train_dict[i]) / args.num_classes_per_user) for i in range(K)}
all_label_lst = list(range(K)) * args.num_classes_per_user
labels_lst = []
for _ in range(args.num_users):
temp_lst = []
idx = np.random.choice(all_label_lst)
temp_lst.append(idx)
while len(temp_lst) != args.num_classes_per_user:
idx = np.random.choice(all_label_lst)
if idx not in temp_lst:
temp_lst.append(idx)
labels_lst.append(temp_lst)
for idx in temp_lst:
del all_label_lst[all_label_lst.index(idx)]
net_dataidx_map = {i: [] for i in range(args.num_users)}
for user_idx, labels in enumerate(labels_lst):
for label in labels:
allocate_idx = np.random.choice(y_train_dict[label], allocate_data_cnt_dict[label], replace=False)
y_train_dict[label] = list(set(y_train_dict[label]) - set(allocate_idx))
net_dataidx_map[user_idx] += list(allocate_idx)
return dict(net_dataidx_map)
def dir_balance(dataset, args, sample=None):
""" for the fairness of annotation cost, each client has same number of samples
"""
C = args.num_classes
K = args.num_users
alpha = args.dd_beta
# Generate the set of clients dataset.
clients_data = {}
for i in range(K):
clients_data[i] = []
# Divide the dataset into each class of dataset.
total_num = len(dataset)
total_data = {}
data_num = np.array([0 for _ in range(C)])
for i in range(C):
total_data[str(i)] = []
for idx, data in enumerate(dataset):
if args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
total_data[str(data[1][0])].append(idx)
data_num[int(data[1][0])] += 1
else:
total_data[str(data[1])].append(idx)
data_num[int(data[1])] += 1
clients_data_num = {}
for client in range(K):
clients_data_num[client] = [0] * C
# Distribute the data with the Dirichilet distribution.
if sample is None:
diri_dis = torch.distributions.dirichlet.Dirichlet(alpha * torch.ones(C))
sample = torch.cat([diri_dis.sample().unsqueeze(0) for _ in range(K)], 0)
# get balanced matrix
rsum = sample.sum(1)
csum = sample.sum(0)
epsilon = min(1 , K / C, C / K) / 1000
if alpha < 10:
r, c = 1, K / C
while (torch.any(rsum <= r - epsilon)) or (torch.any(csum <= c - epsilon)):
sample /= sample.sum(0)
sample /= sample.sum(1).unsqueeze(1)
rsum = sample.sum(1)
csum = sample.sum(0)
else:
r, c = C / K, 1
while (torch.any(abs(rsum - r) >= epsilon)) or (torch.any(abs(csum - c) >= epsilon)):
sample = sample / sample.sum(1).unsqueeze(1)
sample /= sample.sum(0)
rsum = sample.sum(1)
csum = sample.sum(0)
x = sample * torch.tensor(data_num)
x = torch.ceil(x).long()
x = torch.where(x <= 1, 0, x+1) if alpha < 10 else torch.where(x <= 1, 0, x)
# print(x)
print('Dataset total num', len(dataset))
print('Total dataset class num', data_num)
if alpha < 10:
remain = np.inf
nums = math.ceil(len(dataset) / K)
i = 0
while remain != 0:
i += 1
for client_idx in clients_data.keys():
for cls in total_data.keys():
tmp_set = random.sample(total_data[cls], min(len(total_data[cls]), x[client_idx, int(cls)].item()))
if len(clients_data[client_idx]) + len(tmp_set) > nums:
tmp_set = tmp_set[:nums-len(clients_data[client_idx])]
clients_data[client_idx] += tmp_set
clients_data_num[client_idx][int(cls)] += len(tmp_set)
total_data[cls] = list(set(total_data[cls])-set(tmp_set))
remain = sum([len(d) for _, d in total_data.items()])
if i == 100:
break
# to make same number of samples for each client
index = np.where(np.array([sum(clients_data_num[k]) for k in clients_data_num.keys()]) <= nums-1)[0]
for client_idx in index:
n = nums - len(clients_data[client_idx])
add = 0
for cls in total_data.keys():
tmp_set = total_data[cls][:n-add]
clients_data[client_idx] += tmp_set
clients_data_num[client_idx][int(cls)] += len(tmp_set)
total_data[cls] = list(set(total_data[cls])-set(tmp_set))
add += len(tmp_set)
else:
cumsum = x.T.cumsum(dim=1)
for cls, data in total_data.items():
cum = list(cumsum[int(cls)].numpy())
tmp = np.split(np.array(data), cum)
for client_idx in clients_data.keys():
clients_data[client_idx] += list(tmp[client_idx])
clients_data_num[client_idx][int(cls)] += len(list(tmp[client_idx]))
print('clients_data_num', clients_data_num)
print('clients_data_num', [sum(clients_data_num[k]) for k in clients_data_num.keys()])
with open(os.path.join(args.result_dir, 'clients_data_num.pickle'), 'wb') as f:
pickle.dump(clients_data_num, f)
return clients_data, sample | 6,082 | 34.782353 | 119 | py |
LoGo | LoGo-main/fl_methods/base.py | import copy
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from util.misc import DatasetSplit
class FederatedLearning:
def __init__(self, args, dict_users_train_label=None):
self.args = args
self.dict_users_train_label = dict_users_train_label
self.loss_func = nn.CrossEntropyLoss()
def train(self):
pass
def aggregate(self, w_glob, w_local, idx_user, total_data_num):
if w_glob is None:
w_glob = copy.deepcopy(w_local)
for k in w_glob.keys():
w_glob[k] = w_local[k] * len(self.dict_users_train_label[idx_user]) / total_data_num
else:
for k in w_glob.keys():
w_glob[k] += w_local[k] * len(self.dict_users_train_label[idx_user]) / total_data_num
return w_glob
def test(self, net_g, dataset):
data_loader = DataLoader(dataset, batch_size=self.args.test_bs)
data_nums = len(data_loader.dataset)
net_g.eval()
test_loss, correct = 0, 0
probs = []
for idx, (data, target) in enumerate(data_loader):
if self.args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
target = target.squeeze().long()
if self.args.gpu != -1:
data, target = data.to(self.args.device), target.to(self.args.device)
output, emb = net_g(data)
# sum up batch loss
test_loss += self.loss_func(output, target).item()
# get the index of the max log-probability
y_pred = output.data.max(1, keepdim=True)[1]
correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()
test_loss /= data_nums
accuracy = 100.00 * float(correct) / data_nums
return accuracy, test_loss
def on_round_start(self, net_glob=None):
pass
def on_user_iter_start(self, dataset, user_idx):
data_idx = self.dict_users_train_label[user_idx]
self.data_loader = DataLoader(DatasetSplit(dataset, data_idx), batch_size=self.args.local_bs, shuffle=True)
def on_round_end(self, idxs_users=None):
pass
def on_user_iter_end(self):
pass | 2,247 | 32.058824 | 115 | py |
LoGo | LoGo-main/fl_methods/fedprox.py | import copy
import torch
from .base import FederatedLearning
class FedProx(FederatedLearning):
def __init__(self, args, dict_users_train_label=None):
super().__init__(args, dict_users_train_label)
def train(self, net, user_idx=None, lr=0.01, momentum=0.9, weight_decay=0.00001):
net.train()
g_net = copy.deepcopy(net)
# train and update
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
epoch_loss = []
for epoch in range(self.args.local_ep):
batch_loss = []
for images, labels in self.data_loader:
if self.args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
labels = labels.squeeze().long()
images, labels = images.to(self.args.device), labels.to(self.args.device)
optimizer.zero_grad()
output, emb = net(images)
if output.shape[0] == 1:
labels = labels.reshape(1,)
loss = self.loss_func(output, labels)
fed_prox_reg = 0.0
for l_param, g_param in zip(net.parameters(), g_net.parameters()):
fed_prox_reg += (self.args.mu / 2 * torch.norm((l_param - g_param)) ** 2)
loss += fed_prox_reg
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
epoch_loss.append(sum(batch_loss)/len(batch_loss))
return net.state_dict(), sum(epoch_loss) / len(epoch_loss)
| 1,709 | 33.897959 | 109 | py |
LoGo | LoGo-main/fl_methods/fedavg.py | import torch
from .base import FederatedLearning
class FedAvg(FederatedLearning):
def __init__(self, args, dict_users_train_label=None):
super().__init__(args, dict_users_train_label)
def train(self, net, user_idx=None, lr=0.01, momentum=0.9, weight_decay=0.00001):
net.train()
# train and update
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
epoch_loss = []
for epoch in range(self.args.local_ep):
batch_loss = []
for images, labels in self.data_loader:
if self.args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
labels = labels.squeeze().long()
images, labels = images.to(self.args.device), labels.to(self.args.device)
optimizer.zero_grad()
output, emb = net(images)
if output.shape[0] == 1:
labels = labels.reshape(1,)
loss = self.loss_func(output, labels)
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
epoch_loss.append(sum(batch_loss)/len(batch_loss))
return net.state_dict(), sum(epoch_loss) / len(epoch_loss)
| 1,393 | 33.85 | 109 | py |
LoGo | LoGo-main/fl_methods/scaffold.py | import copy
import torch
from .base import FederatedLearning
class SCAFFOLD(FederatedLearning):
def __init__(self, args, dict_users_train_label=None):
super().__init__(args, dict_users_train_label)
def init_c_nets(self, net_glob):
self.c_nets = {}
for i in range(self.args.num_users):
self.c_nets[i] = copy.deepcopy(net_glob)
self.c_net_glob = copy.deepcopy(net_glob)
def train(self, net, user_idx=None, lr=0.01, momentum=0.9, weight_decay=0.00001):
net.train()
g_net = copy.deepcopy(net)
c_global_para = self.c_net_glob.state_dict()
c_local_para = self.c_nets[user_idx].state_dict()
cnt = 0
# train and update
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
epoch_loss = []
for epoch in range(self.args.local_ep):
batch_loss = []
for images, labels in self.data_loader:
if self.args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
labels = labels.squeeze().long()
images, labels = images.to(self.args.device), labels.to(self.args.device)
optimizer.zero_grad()
output, emb = net(images)
if output.shape[0] == 1:
labels = labels.reshape(1,)
loss = self.loss_func(output, labels)
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
net_para = net.state_dict()
for key in net_para:
net_para[key] = net_para[key] - lr * (c_global_para[key] - c_local_para[key])
net.load_state_dict(net_para)
cnt += 1
epoch_loss.append(sum(batch_loss)/len(batch_loss))
c_new_para = self.c_nets[user_idx].state_dict()
self.c_delta_para = copy.deepcopy(self.c_nets[user_idx].state_dict())
global_model_para = g_net.state_dict()
net_para = net.state_dict()
for key in net_para:
c_new_para[key] = c_new_para[key] - c_global_para[key] + (global_model_para[key] - net_para[key]) / (cnt * self.args.lr)
self.c_delta_para[key] = c_new_para[key] - c_local_para[key]
self.c_nets[user_idx].load_state_dict(c_new_para)
return net.state_dict(), sum(epoch_loss) / len(epoch_loss)
def on_round_start(self, net_glob):
self.total_delta = copy.deepcopy(net_glob.state_dict())
for key in self.total_delta:
self.total_delta[key] = 0.0
def on_round_end(self, idxs_users):
for key in self.total_delta:
self.total_delta[key] /= len(idxs_users)
c_global_para = self.c_net_glob.state_dict()
for key in c_global_para:
if c_global_para[key].type() == 'torch.LongTensor':
c_global_para[key] += self.total_delta[key].type(torch.LongTensor)
elif c_global_para[key].type() == 'torch.cuda.LongTensor':
c_global_para[key] += self.total_delta[key].type(torch.cuda.LongTensor)
else:
c_global_para[key] += self.total_delta[key]
self.c_net_glob.load_state_dict(c_global_para)
def on_user_iter_end(self):
for key in self.total_delta:
self.total_delta[key] += self.c_delta_para[key]
| 3,530 | 37.380435 | 132 | py |
LoGo | LoGo-main/query_strategies/margin_sampling.py | import copy
import numpy as np
import torch
import torch.nn as nn
from .strategy import Strategy
class MarginSampling(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
if self.args.query_model_mode == "global":
net = self.net
probs = self.predict_prob(unlabel_idxs, net)
elif self.args.query_model_mode == "local_only":
net = self.training_local_only(label_idxs)
probs = self.predict_prob(unlabel_idxs, net)
probs_sorted, idxs = probs.sort(descending=True)
U = probs_sorted[:, 0] - probs_sorted[:,1]
return unlabel_idxs[U.sort()[1].numpy()[:n_query]]
| 753 | 26.925926 | 69 | py |
LoGo | LoGo-main/query_strategies/dbal.py | import copy
import numpy as np
from tqdm import tqdm
from sklearn.cluster import KMeans
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from .strategy import Strategy
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return image, label, item
class DBAL(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
beta = 10
unlabel_idxs = np.array(unlabel_idxs)
if self.args.query_model_mode == "global":
net = self.net
probs = self.predict_prob(unlabel_idxs, self.net)
elif self.args.query_model_mode == "local_only":
net = self.training_local_only(label_idxs)
probs = self.predict_prob(unlabel_idxs, net)
probs_sorted, idxs = probs.sort(descending=True)
U = probs_sorted[:, 0] - probs_sorted[:,1]
unlabel_idxs = unlabel_idxs[U.sort()[1].numpy()[:n_query * beta]]
U = U.sort()[0].numpy()[:n_query * beta]
feats = self.get_embedding(unlabel_idxs, net=net)
# Avoids ValueErrors when we try to sample more instances than we have data points
n_clusters = min(n_query, feats.shape[0])
# Fit kmeans to data
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(feats, sample_weight=1-U)
return unlabel_idxs[np.argmin(kmeans.transform(feats), axis=0)] | 1,679 | 30.111111 | 90 | py |
LoGo | LoGo-main/query_strategies/alfa_mix.py | import copy
import math
import numpy as np
from select import select
from sklearn.cluster import KMeans
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from torch.autograd import Variable
from .strategy import Strategy, DatasetSplit
class ALFAMix(Strategy):
def __init__(self, dataset_query, dataset_train, net, args):
super(ALFAMix, self).__init__(dataset_query, dataset_train, net, args)
self.alpha_cap = 0.2
self.alpha_opt = True
self.alpha_closed_form_approx = True
self.alpha_learning_rate = 0.1
self.alpha_clf_coef = 1.0
self.alpha_learning_iters = 5
self.alpha_learn_batch_size = 1000000
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
label_idxs = np.array(label_idxs)
if self.args.query_model_mode == "global":
net = self.net
elif self.args.query_model_mode == "local_only":
net = self.training_local_only(label_idxs)
ulb_probs = self.predict_prob(unlabel_idxs, net)
org_ulb_embedding = self.get_embedding(unlabel_idxs, net)
_, probs_sort_idxs = ulb_probs.sort(descending=True)
pred_1 = probs_sort_idxs[:, 0]
org_lb_embedding = self.get_embedding(label_idxs, net)
ulb_embedding = org_ulb_embedding
lb_embedding = org_lb_embedding
unlabeled_size = ulb_embedding.size(0)
embedding_size = ulb_embedding.size(1)
min_alphas = torch.ones((unlabeled_size, embedding_size), dtype=torch.float)
candidate = torch.zeros(unlabeled_size, dtype=torch.bool)
if self.alpha_closed_form_approx:
var_emb = Variable(ulb_embedding, requires_grad=True).to(self.args.device)
out = self.net.linear(var_emb)
loss = F.cross_entropy(out, pred_1.to(self.args.device))
grads = torch.autograd.grad(loss, var_emb)[0].data.cpu()
del loss, var_emb, out
else:
grads = None
alpha_cap = 0.
while alpha_cap < 1.0:
alpha_cap += self.alpha_cap
tmp_pred_change, tmp_min_alphas = \
self.find_candidate_set(
lb_embedding, ulb_embedding, pred_1, ulb_probs, alpha_cap=alpha_cap,
Y=self.get_labels(label_idxs),
grads=grads)
is_changed = min_alphas.norm(dim=1) >= tmp_min_alphas.norm(dim=1)
min_alphas[is_changed] = tmp_min_alphas[is_changed]
candidate += tmp_pred_change
print('With alpha_cap set to %f, number of inconsistencies: %d' % (alpha_cap, int(tmp_pred_change.sum().item())))
if candidate.sum() > n_query:
break
if candidate.sum() > 0:
print('Number of inconsistencies: %d' % (int(candidate.sum().item())))
print('alpha_mean_mean: %f' % min_alphas[candidate].mean(dim=1).mean().item())
print('alpha_std_mean: %f' % min_alphas[candidate].mean(dim=1).std().item())
print('alpha_mean_std %f' % min_alphas[candidate].std(dim=1).mean().item())
c_alpha = F.normalize(org_ulb_embedding[candidate].view(candidate.sum(), -1), p=2, dim=1).detach()
selected_idxs = self.sample(min(n_query, candidate.sum().item()), feats=c_alpha)
# u_selected_idxs = candidate.nonzero(as_tuple=True)[0][selected_idxs]
selected_idxs = unlabel_idxs[candidate][selected_idxs]
else:
selected_idxs = np.array([], dtype=np.int)
if len(selected_idxs) < n_query:
remained = n_query - len(selected_idxs)
not_selected_idxs = np.array(list(set(unlabel_idxs) - set(selected_idxs)))
selected_idxs = np.concatenate([selected_idxs, np.random.choice(not_selected_idxs, remained)])
print('picked %d samples from RandomSampling.' % (remained))
return np.array(selected_idxs)
def get_labels(self, data_idxs):
loader = DataLoader(DatasetSplit(self.dataset_query, data_idxs), shuffle=False)
with torch.no_grad():
for i, (_, y, _) in enumerate(loader):
if i == 0:
labels = copy.deepcopy(y)
else:
labels = torch.cat([labels, copy.deepcopy(y)], dim=0)
return labels.numpy()
def find_candidate_set(self, lb_embedding, ulb_embedding, pred_1, ulb_probs, alpha_cap, Y, grads):
unlabeled_size = ulb_embedding.size(0)
embedding_size = ulb_embedding.size(1)
min_alphas = torch.ones((unlabeled_size, embedding_size), dtype=torch.float)
pred_change = torch.zeros(unlabeled_size, dtype=torch.bool)
if self.alpha_closed_form_approx:
alpha_cap /= math.sqrt(embedding_size)
grads = grads.to(self.args.device)
for i in range(self.args.num_classes):
y_idx = (Y == i).reshape(-1,)
emb = lb_embedding[y_idx]
if emb.size(0) == 0:
emb = lb_embedding
anchor_i = emb.mean(dim=0).view(1, -1).repeat(unlabeled_size, 1)
if self.alpha_closed_form_approx:
embed_i, ulb_embed = anchor_i.to(self.args.device), ulb_embedding.to(self.args.device)
alpha = self.calculate_optimum_alpha(alpha_cap, embed_i, ulb_embed, grads)
embedding_mix = (1 - alpha) * ulb_embed + alpha * embed_i
out = self.net.linear(embedding_mix)
out = out.detach().cpu()
alpha = alpha.cpu()
pc = out.argmax(dim=1) != pred_1
else:
alpha = self.generate_alpha(unlabeled_size, embedding_size, alpha_cap)
if self.alpha_opt:
alpha, pc = self.learn_alpha(ulb_embedding, pred_1, anchor_i, alpha, alpha_cap,
log_prefix=str(i))
else:
embedding_mix = (1 - alpha) * ulb_embedding + alpha * anchor_i
out = self.net.linear(embedding_mix.to(self.args.device))
out = out.detach().cpu()
pc = out.argmax(dim=1) != pred_1
torch.cuda.empty_cache()
alpha[~pc] = 1.
pred_change[pc] = True
is_min = min_alphas.norm(dim=1) > alpha.norm(dim=1)
min_alphas[is_min] = alpha[is_min]
return pred_change, min_alphas
def calculate_optimum_alpha(self, eps, lb_embedding, ulb_embedding, ulb_grads):
z = (lb_embedding - ulb_embedding) #* ulb_grads
alpha = (eps * z.norm(dim=1) / ulb_grads.norm(dim=1)).unsqueeze(dim=1).repeat(1, z.size(1)) * ulb_grads / (z + 1e-8)
return alpha
def sample(self, n, feats):
feats = feats.numpy()
cluster_learner = KMeans(n_clusters=n)
cluster_learner.fit(feats)
cluster_idxs = cluster_learner.predict(feats)
centers = cluster_learner.cluster_centers_[cluster_idxs]
dis = (feats - centers) ** 2
dis = dis.sum(axis=1)
return np.array(
[np.arange(feats.shape[0])[cluster_idxs == i][dis[cluster_idxs == i].argmin()] for i in range(n) if
(cluster_idxs == i).sum() > 0])
def retrieve_anchor(self, embeddings, count):
return embeddings.mean(dim=0).view(1, -1).repeat(count, 1)
def generate_alpha(self, size, embedding_size, alpha_cap):
alpha = torch.normal(
mean=alpha_cap / 2.0,
std=alpha_cap / 2.0,
size=(size, embedding_size))
alpha[torch.isnan(alpha)] = 1
return self.clamp_alpha(alpha, alpha_cap)
def clamp_alpha(self, alpha, alpha_cap):
return torch.clamp(alpha, min=1e-8, max=alpha_cap)
def learn_alpha(self, org_embed, labels, anchor_embed, alpha, alpha_cap, log_prefix=''):
labels = labels.to(self.device)
min_alpha = torch.ones(alpha.size(), dtype=torch.float)
pred_changed = torch.zeros(labels.size(0), dtype=torch.bool)
loss_func = torch.nn.CrossEntropyLoss(reduction='none')
self.net.linear.eval()
for i in range(self.alpha_learning_iters):
tot_nrm, tot_loss, tot_clf_loss = 0., 0., 0.
for b in range(math.ceil(float(alpha.size(0)) / self.alpha_learn_batch_size)):
self.net.linear.zero_grad()
start_idx = b * self.alpha_learn_batch_size
end_idx = min((b + 1) * self.alpha_learn_batch_size, alpha.size(0))
l = alpha[start_idx:end_idx]
l = torch.autograd.Variable(l.to(self.args.device), requires_grad=True)
opt = torch.optim.Adam([l], lr=self.alpha_learning_rate / (1. if i < self.alpha_learning_iters * 2 / 3 else 10.))
e = org_embed[start_idx:end_idx].to(self.args.device)
c_e = anchor_embed[start_idx:end_idx].to(self.args.device)
embedding_mix = (1 - l) * e + l * c_e
out = self.net.linear(embedding_mix)
label_change = out.argmax(dim=1) != labels[start_idx:end_idx]
tmp_pc = torch.zeros(labels.size(0), dtype=torch.bool).to(self.args.device)
tmp_pc[start_idx:end_idx] = label_change
pred_changed[start_idx:end_idx] += tmp_pc[start_idx:end_idx].detach().cpu()
tmp_pc[start_idx:end_idx] = tmp_pc[start_idx:end_idx] * (l.norm(dim=1) < min_alpha[start_idx:end_idx].norm(dim=1).to(self.args.device))
min_alpha[tmp_pc] = l[tmp_pc[start_idx:end_idx]].detach().cpu()
clf_loss = loss_func(out, labels[start_idx:end_idx].to(self.args.device))
l2_nrm = torch.norm(l, dim=1)
clf_loss *= -1
loss = self.alpha_clf_coef * clf_loss + self.alpha_l2_coef * l2_nrm
loss.sum().backward(retain_graph=True)
opt.step()
l = self.clamp_alpha(l, alpha_cap)
alpha[start_idx:end_idx] = l.detach().cpu()
tot_clf_loss += clf_loss.mean().item() * l.size(0)
tot_loss += loss.mean().item() * l.size(0)
tot_nrm += l2_nrm.mean().item() * l.size(0)
del l, e, c_e, embedding_mix
torch.cuda.empty_cache()
count = pred_changed.sum().item()
return min_alpha.cpu(), pred_changed.cpu() | 10,525 | 39.484615 | 151 | py |
LoGo | LoGo-main/query_strategies/egl.py | import copy
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from .strategy import Strategy
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return image, label, item
class EGL(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
if self.args.query_model_mode == "global":
net = self.net
elif self.args.query_model_mode == "local_only":
net = self.training_local_only(label_idxs)
# gradient for bias term
loss_func = nn.CrossEntropyLoss()
dataloader = DataLoader(DatasetSplit(self.dataset_query, unlabel_idxs), shuffle=False)
norms = torch.zeros(len(unlabel_idxs))
for x, _, idxs in tqdm(dataloader):
x = x.to(self.args.device)
output, _ = cl_net(x)
prob = F.softmax(output, dim=1)[0, y].item()
for cls in range(self.args.num_classes):
net.zero_grad()
y = torch.tensor([cls]).to(self.args.device)
loss = loss_func(output, y)
loss.backward()
for name, param in cl_net.named_parameters():
if "linear" in name and 'weight' in name:
norms[idxs.item()] += (param.grad.cpu().flatten()).norm() * prob
return unlabel_idxs[norms.sort()[1][::-1][:n_query]] | 1,812 | 31.963636 | 94 | py |
LoGo | LoGo-main/query_strategies/entropy_sampling.py | import copy
import numpy as np
import torch
from .strategy import Strategy
class EntropySampling(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
if self.args.query_model_mode == "global":
probs = self.predict_prob(unlabel_idxs, self.net)
elif self.args.query_model_mode == "local_only":
local_net = self.training_local_only(label_idxs)
probs = self.predict_prob(unlabel_idxs, local_net)
log_probs = torch.log(probs)
log_probs[log_probs == float("-inf")] = 0
log_probs[log_probs == float("inf")] = 0
U = (probs*log_probs).sum(1)
return unlabel_idxs[U.sort()[1][:n_query]]
| 766 | 28.5 | 69 | py |
LoGo | LoGo-main/query_strategies/strategy.py | import copy
import numpy as np
from copy import deepcopy
from datetime import datetime
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return image, label, item
class Strategy:
def __init__(self, dataset_query, dataset_train, net, args):
self.dataset_query = dataset_query
self.dataset_train = dataset_train
self.net = net
self.args = args
self.local_net_dict = {}
self.loss_func = nn.CrossEntropyLoss()
def query(self, label_idx, unlabel_idx):
pass
def predict_prob(self, unlabel_idxs, net=None):
loader_te = DataLoader(DatasetSplit(self.dataset_query, unlabel_idxs), shuffle=False)
if net is None:
net = self.net
net.eval()
probs = torch.zeros([len(unlabel_idxs), self.args.num_classes])
with torch.no_grad():
for x, y, idxs in loader_te:
x, y = Variable(x.to(self.args.device)), Variable(y.to(self.args.device))
output, emb = net(x)
probs[idxs] = torch.nn.functional.softmax(output, dim=1).cpu().data
return probs
def get_embedding(self, data_idxs, net=None):
loader_te = DataLoader(DatasetSplit(self.dataset_query, data_idxs), shuffle=False)
if net is None:
net = self.net
net.eval()
embedding = torch.zeros([len(data_idxs), net.get_embedding_dim()])
with torch.no_grad():
for x, y, idxs in loader_te:
x, y = Variable(x.to(self.args.device)), Variable(y.to(self.args.device))
out, e1 = net(x)
embedding[idxs] = e1.data.cpu()
return embedding
# gradient embedding (assumes cross-entropy loss)
def get_grad_embedding(self, data_idxs, net=None):
if net is None:
net = self.net
embDim = net.get_embedding_dim()
net.eval()
nLab = self.args.num_classes
embedding = np.zeros([len(data_idxs), embDim * nLab])
loader_te = DataLoader(DatasetSplit(self.dataset_query, data_idxs), shuffle=False)
with torch.no_grad():
for x, y, idxs in loader_te:
x, y = Variable(x.to(self.args.device)), Variable(y.to(self.args.device))
cout, out = net(x)
out = out.data.cpu().numpy()
batchProbs = F.softmax(cout, dim=1).data.cpu().numpy()
maxInds = np.argmax(batchProbs, 1)
for j in range(len(y)):
for c in range(nLab):
if c == maxInds[j]:
embedding[idxs[j]][embDim * c : embDim * (c+1)] = deepcopy(out[j]) * (1 - batchProbs[j][c])
else:
embedding[idxs[j]][embDim * c : embDim * (c+1)] = deepcopy(out[j]) * (-1 * batchProbs[j][c])
return torch.Tensor(embedding)
def get_grad_embedding_maxInd(self, data_idxs, net=None):
if net is None:
net = self.net
embDim = net.get_embedding_dim()
net.eval()
nLab = self.args.num_classes
embedding = np.zeros([len(data_idxs), embDim])
loader_te = DataLoader(DatasetSplit(self.dataset_query, data_idxs), shuffle=False)
with torch.no_grad():
for x, y, idxs in loader_te:
x, y = Variable(x.to(self.args.device)), Variable(y.to(self.args.device))
cout, out = net(x)
out = out.data.cpu().numpy()
batchProbs = F.softmax(cout, dim=1).data.cpu().numpy()
maxInds = np.argmax(batchProbs, 1)
for j in range(len(y)):
for c in range(nLab):
if c == maxInds[j]:
embedding[idxs] = deepcopy(out[j]) * (1 - batchProbs[j][c])
return torch.Tensor(embedding)
def training_local_only(self, label_idxs, finetune=False):
finetune_ep = 50
local_net = deepcopy(self.net)
if not finetune:
# Training Local Model from the scratch
local_net.load_state_dict(self.args.raw_ckpt)
# else: fine-tune from global model checkpoint
# train and update
label_train = DataLoader(DatasetSplit(self.dataset_train, label_idxs), batch_size=self.args.local_bs, shuffle=True)
optimizer = torch.optim.SGD(local_net.parameters(),
lr=self.args.lr,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [int(finetune_ep * 3 / 4)], gamma=self.args.lr_decay)
# start = datetime.now()
for epoch in range(finetune_ep):
local_net.train()
for images, labels, _ in label_train:
if self.args.dataset in ['pathmnist', 'octmnist', 'organamnist', 'dermamnist', 'bloodmnist']:
labels = labels.squeeze().long()
images, labels = images.to(self.args.device), labels.to(self.args.device)
optimizer.zero_grad()
output, emb = local_net(images)
if output.shape[0] == 1:
labels = labels.reshape(1,)
loss = self.loss_func(output, labels)
loss.backward()
optimizer.step()
scheduler.step()
correct, cnt = 0., 0.
local_net.eval()
with torch.no_grad():
for images, labels, _ in label_train:
images, labels = images.to(self.args.device), labels.to(self.args.device)
output, _ = local_net(images)
y_pred = output.data.max(1, keepdim=True)[1]
correct += y_pred.eq(labels.data.view_as(y_pred)).long().cpu().sum()
cnt += len(labels)
acc = correct / cnt
if acc >= 0.99:
break
# time = datetime.now() - start
# print('Local-only model fine-tuning takes {}'.format(time))
return local_net | 6,979 | 36.326203 | 123 | py |
LoGo | LoGo-main/query_strategies/gcnal.py | import math
import numpy as np
from tqdm import tqdm
from sklearn.metrics import pairwise_distances
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.nn.parameter import Parameter
from .strategy import Strategy
class GCNAL(Strategy):
def __init__(self, dataset_query, dataset_train, net, args):
super(GCNAL, self).__init__(dataset_query, dataset_train, net, args)
self.method = "CoreGCN" # UncertainGCN / CoreGCN
self.hidden_units = 128
self.dropout_rate = 0.3
self.LR_GCN = 1e-3
self.WDECAY = 5e-4
self.lambda_loss = 1.2
self.s_margin = 0.1
self.subset_size = 10000
def furthest_first(self, X, X_set, n):
m = np.shape(X)[0]
if np.shape(X_set)[0] == 0:
min_dist = np.tile(float("inf"), m)
else:
dist_ctr = pairwise_distances(X, X_set)
min_dist = np.amin(dist_ctr, axis=1)
idxs = []
for i in range(n):
idx = min_dist.argmax()
idxs.append(idx)
dist_new_ctr = pairwise_distances(X, X[[idx], :])
for j in range(m):
min_dist[j] = min(min_dist[j], dist_new_ctr[j, 0])
return idxs
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
label_idxs = np.array(label_idxs)
random_unlabel_idxs = np.random.choice(unlabel_idxs, min(self.subset_size, len(unlabel_idxs)), replace=False)
data_idxs = list(random_unlabel_idxs) + list(label_idxs)
if self.args.query_model_mode == "global":
net = self.net
elif self.args.query_model_mode == "local_only":
net = self.training_local_only(label_idxs)
embeds = self.get_embedding(data_idxs, net)
u_features = embeds[:len(random_unlabel_idxs), :]
l_features = embeds[len(random_unlabel_idxs):, :]
features = torch.cat([u_features, l_features], dim=0)
features = nn.functional.normalize(features.to(self.args.device))
adj = aff_to_adj(features)
gcn_model = GCN(nfeat=features.shape[1],
nhid=self.hidden_units,
nclass=1,
dropout=self.dropout_rate).to(self.args.device)
optim_gcn = optim.Adam(gcn_model.parameters(), lr=self.LR_GCN, weight_decay=self.WDECAY)
nlbl = np.arange(0, u_features.size(0), 1)
lbl = np.arange(u_features.size(0), features.size(0), 1)
print('Learning Graph Convolution Network...')
gcn_model.train()
for _ in tqdm(range(200)):
optim_gcn.zero_grad()
outputs, _, _ = gcn_model(features, adj)
loss = BCEAdjLoss(outputs, lbl, nlbl, self.lambda_loss)
loss.backward()
optim_gcn.step()
gcn_model.eval()
with torch.no_grad():
with torch.cuda.device(self.args.device):
inputs = features.cuda()
#labels = binary_labels.cuda()
scores, _, feat = gcn_model(inputs, adj)
if self.method == "CoreGCN":
feat = feat.detach().cpu().numpy()
chosen = self.furthest_first(feat[nlbl, :], feat[lbl, :], n_query)
else:
s_margin = self.s_margin
scores_median = np.squeeze(torch.abs(scores[nlbl] - s_margin).detach().cpu().numpy())
chosen = np.argsort(-(scores_median))[-n_query:]
del gcn_model, optim_gcn, feat, features
torch.cuda.empty_cache()
return random_unlabel_idxs[chosen]
class GCNDataset(Dataset):
def __init__(self, features, adj, labeled):
self.features = features
self.labeled = labeled
self.adj = adj
def __getitem__(self, index):
return self.features[index], self.adj[index], self.labeled[index]
def __len__(self):
return len(self.features)
def aff_to_adj(x):
x = x.detach().cpu().numpy()
adj = np.matmul(x, x.transpose())
adj += -1.0*np.eye(adj.shape[0])
adj_diag = np.sum(adj, axis=0) #rowise sum
adj = np.matmul(adj, np.diag(1/adj_diag))
adj = adj + np.eye(adj.shape[0])
adj = torch.Tensor(adj).cuda()
return adj
def BCEAdjLoss(scores, lbl, nlbl, l_adj):
lnl = torch.log(scores[lbl])
lnu = torch.log(1 - scores[nlbl])
labeled_score = torch.mean(lnl)
unlabeled_score = torch.mean(lnu)
bce_adj_loss = -labeled_score - l_adj*unlabeled_score
return bce_adj_loss
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nhid)
self.gc3 = GraphConvolution(nhid, nclass)
self.dropout = dropout
self.linear = nn.Linear(nclass, 1)
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
feat = F.dropout(x, self.dropout, training=self.training)
x = self.gc3(feat, adj)
#x = self.linear(x)
# x = F.softmax(x, dim=1)
return torch.sigmoid(x), feat, torch.cat((feat,x),1) | 6,451 | 31.918367 | 117 | py |
LoGo | LoGo-main/query_strategies/__init__.py | import os
import sys
import copy
import pickle
import random
import datetime
import numpy as np
import torch
from models import get_model
from .random_sampling import RandomSampling
from .least_confidence import LeastConfidence
from .margin_sampling import MarginSampling
from .entropy_sampling import EntropySampling
from .core_set import CoreSet
from .badge_sampling import BadgeSampling
from .adversial_deepfool import AdversarialDeepFool
from .dbal import DBAL
from .egl import EGL
from .gcnal import GCNAL
from .alfa_mix import ALFAMix
from .fal import EnsLogitEntropy, EnsLogitBadge
from .fal import EnsRankEntropy, EnsRankBadge
from .fal import FTEntropy, FTBadge
from .fal import LoGo
def random_query_samples(dict_users_train_total, dict_users_test_total, args):
""" randomly select the labeled samples at the first round
"""
args.dict_users_total_path = os.path.join(args.dict_user_path, 'dict_users_train_test_total.pkl'.format(args.seed))
with open(args.dict_users_total_path, 'wb') as handle:
pickle.dump((dict_users_train_total, dict_users_test_total), handle)
dict_users_train_label_path = os.path.join(args.dict_user_path, 'dict_users_train_label_{:.3f}.pkl'.format(args.current_ratio))
dict_users_train_label = {user_idx: [] for user_idx in dict_users_train_total.keys()}
# sample n_start example on each client
for idx in dict_users_train_total.keys():
dict_users_train_label[idx] = np.random.choice(np.array(list(dict_users_train_total[idx])), int(args.n_data / args.num_users), replace=False)
with open(dict_users_train_label_path, 'wb') as handle:
pickle.dump(dict_users_train_label, handle)
return dict_users_train_label, args
def algo_query_samples(dataset_train, dataset_query, dict_users_train_total, args):
""" query samples from the unlabeled pool
"""
previous_ratio = args.current_ratio - args.query_ratio
path = os.path.join(args.dict_user_path, 'dict_users_train_label_{:.3f}.pkl'.format(previous_ratio))
with open(path, 'rb') as f:
dict_users_train_label = pickle.load(f)
print("Before Querying")
total_data_cnt = 0
for user_idx in range(args.num_users):
print(user_idx, len(dict_users_train_label[user_idx]))
total_data_cnt += len(dict_users_train_label[user_idx])
print(total_data_cnt)
print("-" * 20)
# Build model
query_net = get_model(args)
args.raw_ckpt = copy.deepcopy(query_net.state_dict())
query_net_state_dict = torch.load(args.query_model)
query_net.load_state_dict(query_net_state_dict)
# AL baselines
if args.al_method == "random":
strategy = RandomSampling(dataset_query, dataset_train, query_net, args)
elif args.al_method == "conf":
strategy = LeastConfidence(dataset_query, dataset_train, query_net, args)
elif args.al_method == "margin":
strategy = MarginSampling(dataset_query, dataset_train, query_net, args)
elif args.al_method == "entropy":
strategy = EntropySampling(dataset_query, dataset_train, query_net, args)
elif args.al_method == "coreset":
strategy = CoreSet(dataset_query, dataset_train, query_net, args)
elif args.al_method == "badge":
strategy = BadgeSampling(dataset_query, dataset_train, query_net, args)
elif args.al_method == "gcnal":
strategy = GCNAL(dataset_query, dataset_train, query_net, args)
elif args.al_method == "alfa_mix":
strategy = ALFAMix(dataset_query, dataset_train, query_net, args)
# FAL baselines
elif args.al_method == "ens_logit_entropy":
strategy = EnsLogitEntropy(dataset_query, dataset_train, query_net, args)
elif args.al_method == "ens_logit_badge":
strategy = EnsLogitBadge(dataset_query, dataset_train, query_net, args)
elif args.al_method == "ens_rank_entropy":
strategy = EnsRankEntropy(dataset_query, dataset_train, query_net, args)
elif args.al_method == "ens_rank_badge":
strategy = EnsRankBadge(dataset_query, dataset_train, query_net, args)
elif args.al_method == "ft_entropy":
strategy = FTEntropy(dataset_query, dataset_train, query_net, args)
elif args.al_method == "ft_badge":
strategy = FTBadge(dataset_query, dataset_train, query_net, args)
# our LoGo algorithm
elif args.al_method == "logo":
strategy = LoGo(dataset_query, dataset_train, query_net, args)
else:
exit('There is no al methods')
time = datetime.timedelta()
for user_idx in dict_users_train_total.keys():
total_idxs = dict_users_train_total[user_idx]
label_idxs = dict_users_train_label[user_idx]
unlabel_idxs = list(set(total_idxs) - set(label_idxs))
start = datetime.datetime.now()
new_data = strategy.query(user_idx, label_idxs, unlabel_idxs, int(args.n_query / args.num_users))
time += datetime.datetime.now() - start
print(args.al_method, user_idx)
print("(Before) Label examples: {}".format(len(label_idxs)))
if len(new_data) < int(args.n_query / args.num_users):
sys.exit("too few remaining examples to query")
dict_users_train_label[user_idx] = np.array(list(new_data) + list(label_idxs))
print("(After) Label examples: {}".format(len(list(new_data)) + len(label_idxs)))
time /= len(dict_users_train_total)
print('Querying instances takes {}'.format(time))
# Save dict_users for next round
path = os.path.join(args.dict_user_path, 'dict_users_train_label_{:.3f}.pkl'.format(args.current_ratio))
with open(path, 'wb') as handle:
pickle.dump(dict_users_train_label, handle)
return dict_users_train_label
| 5,878 | 40.695035 | 149 | py |
LoGo | LoGo-main/query_strategies/adversial_deepfool.py | import copy
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from .strategy import Strategy
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return image, label, item
class AdversarialDeepFool(Strategy):
def __init__(self, dataset_query, dataset_train, net, args):
super(AdversarialDeepFool, self).__init__(dataset_query, dataset_train, net, args)
self.max_iter = 10
def cal_dis(self, net, x):
nx = torch.unsqueeze(x, 0)
nx.requires_grad_()
eta = torch.zeros(nx.shape)
out, e1 = net(nx+eta)
n_class = out.shape[1]
py = out.max(1)[1].item()
ny = out.max(1)[1].item()
i_iter = 0
while py == ny and i_iter < self.max_iter:
out[0, py].backward(retain_graph=True)
grad_np = nx.grad.data.clone()
value_l = np.inf
ri = None
for i in range(n_class):
if i == py:
continue
nx.grad.data.zero_()
out[0, i].backward(retain_graph=True)
grad_i = nx.grad.data.clone()
wi = grad_i - grad_np
fi = out[0, i] - out[0, py]
value_i = np.abs(fi.item()) / np.linalg.norm(wi.numpy().flatten())
if value_i < value_l:
ri = value_i/np.linalg.norm(wi.numpy().flatten()) * wi
eta += ri.clone()
nx.grad.data.zero_()
out, e1 = net(nx+eta)
py = out.max(1)[1].item()
i_iter += 1
return (eta*eta).numpy().sum()
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabeld_data = DatasetSplit(self.dataset_query, unlabel_idxs)
if self.args.query_model_mode == "global":
net = self.net
elif self.args.query_model_mode == "local_only":
net = self.training_local_only(label_idxs)
net.cpu()
net.eval()
dis = np.zeros(len(unlabel_idxs))
for i in range(len(unlabeld_data)):
x, _, _ = unlabeld_data[i]
dis[i] = self.cal_dis(net, x)
net.cuda()
unlabel_idxs = np.array(unlabel_idxs)
return unlabel_idxs[dis.argsort()[:n_query]] | 2,570 | 27.566667 | 90 | py |
LoGo | LoGo-main/query_strategies/fal/ensemble_logit.py | import pdb
import copy
import numpy as np
from scipy import stats
from sklearn.metrics import pairwise_distances
import torch
from ..strategy import Strategy
class EnsLogitConf(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
g_net = self.net
l_net = self.training_local_only(label_idxs)
probs1 = self.predict_prob(unlabel_idxs, g_net)
probs2 = self.predict_prob(unlabel_idxs, l_net)
probs = (probs1 + probs2) / 2
U = probs.max(1)[0]
return unlabel_idxs[U.sort()[1][:n_query]]
class EnsLogitMargin(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
g_net = self.net
l_net = self.training_local_only(label_idxs)
probs1 = self.predict_prob(unlabel_idxs, g_net)
probs2 = self.predict_prob(unlabel_idxs, l_net)
probs = (probs1 + probs2) / 2
probs_sorted, idxs = probs.sort(descending=True)
U = probs_sorted[:, 0] - probs_sorted[:,1]
return unlabel_idxs[U.sort()[1][:n_query]]
class EnsLogitEntropy(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
g_net = self.net
l_net = self.training_local_only(label_idxs)
probs1 = self.predict_prob(unlabel_idxs, g_net)
probs2 = self.predict_prob(unlabel_idxs, l_net)
probs = (probs1 + probs2) / 2
log_probs = torch.log(probs)
log_probs[log_probs == float("-inf")] = 0
log_probs[log_probs == float("inf")] = 0
U = (probs*log_probs).sum(1)
return unlabel_idxs[U.sort()[1][:n_query]]
class EnsLogitCoreSet(Strategy):
def furthest_first(self, X, X_set, n):
m = np.shape(X)[0]
if np.shape(X_set)[0] == 0:
min_dist = np.tile(float("inf"), m)
else:
dist_ctr = pairwise_distances(X, X_set)
min_dist = np.amin(dist_ctr, axis=1)
idxs = []
for i in range(n):
idx = min_dist.argmax()
idxs.append(idx)
dist_new_ctr = pairwise_distances(X, X[[idx], :])
for j in range(m):
min_dist[j] = min(min_dist[j], dist_new_ctr[j, 0])
return idxs
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
self.tor = 1e-4
data_idxs = list(unlabel_idxs) + list(label_idxs)
unlabel_idxs = np.array(unlabel_idxs)
label_idxs = np.array(label_idxs)
g_net = self.net
l_net = self.training_local_only(label_idxs)
embedding1 = self.get_embedding(data_idxs, g_net)
embedding2 = self.get_embedding(data_idxs, l_net)
embedding = (embedding1 + embedding2) / 2
embedding = embedding.numpy()
chosen = self.furthest_first(embedding[:len(unlabel_idxs), :], embedding[len(unlabel_idxs):, :], n_query)
return unlabel_idxs[chosen]
# kmeans ++ initialization
def init_centers(X, K):
ind = np.argmax([np.linalg.norm(s, 2) for s in X])
mu = [X[ind]]
indsAll = [ind]
centInds = [0.] * len(X)
cent = 0
# print('#Samps\tTotal Distance')
while len(mu) < K:
if len(mu) == 1:
D2 = pairwise_distances(X, mu).ravel().astype(float)
else:
newD = pairwise_distances(X, [mu[-1]]).ravel().astype(float)
for i in range(len(X)):
if D2[i] > newD[i]:
centInds[i] = cent
D2[i] = newD[i]
# print(str(len(mu)) + '\t' + str(sum(D2)), flush=True)
if sum(D2) == 0.0: pdb.set_trace()
D2 = D2.ravel().astype(float)
Ddist = (D2 ** 2)/ sum(D2 ** 2)
customDist = stats.rv_discrete(name='custm', values=(np.arange(len(D2)), Ddist))
ind = customDist.rvs(size=1)[0]
while ind in indsAll: ind = customDist.rvs(size=1)[0]
mu.append(X[ind])
indsAll.append(ind)
cent += 1
return indsAll
class EnsLogitBadge(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
g_net = self.net
l_net = self.training_local_only(label_idxs)
gradEmbedding1 = self.get_grad_embedding(list(unlabel_idxs), net=g_net)
gradEmbedding2 = self.get_grad_embedding(list(unlabel_idxs), net=l_net)
gradEmbedding = (gradEmbedding1 + gradEmbedding2) / 2
gradEmbedding = gradEmbedding.numpy()
chosen = init_centers(gradEmbedding, n_query),
unlabel_idxs = np.array(unlabel_idxs)
return unlabel_idxs[chosen]
| 4,828 | 30.769737 | 113 | py |
LoGo | LoGo-main/query_strategies/fal/logo.py | import copy
import math
import numpy as np
from copy import deepcopy
from sklearn.cluster import KMeans
import torch
import torch.nn as nn
from ..strategy import Strategy
class LoGo(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
g_net = self.net
l_net = self.training_local_only(label_idxs)
# cluster with uncertain samples by local net
embedding = self.get_grad_embedding_maxInd(list(unlabel_idxs), net=l_net)
print("Macro Step: K-Means EM algorithm with local-only model")
kmeans = KMeans(n_clusters=n_query)
kmeans.fit(embedding)
cluster_pred = kmeans.labels_
cluster_dict = {i: [] for i in range(n_query)}
for u_idx, c in zip(unlabel_idxs, cluster_pred):
cluster_dict[c].append(u_idx)
print("Micro Step: 1 step of EM algorithm with global model")
# query with uncertain samples by global net via predefined cluster
query_idx = []
for c_i in cluster_dict.keys():
cluster_idxs = np.array(cluster_dict[c_i])
probs = self.predict_prob(cluster_idxs, g_net)
log_probs = torch.log(probs)
# inf to zero
log_probs[log_probs == float('-inf')] = 0
log_probs[log_probs == float('inf')] = 0
U = (probs*log_probs).sum(1)
U = U.numpy()
try:
chosen = np.argsort(U)[0]
query_idx.append(cluster_idxs[chosen])
except:
# IndexError: index 0 is out of bounds for axis 0 with size 0 with ConvergenceWarning
continue
query_idx = list(set(query_idx))
# sometimes k-means clustering output smaller amount of centroids due to convergence errors
if len(query_idx) != n_query:
print('cluster centroids number is different from the number of query budget')
num = math.ceil((n_query - len(query_idx)) / len(np.unique(cluster_pred)))
idx, skip = 0, []
query_idx = set(query_idx)
U_dict = {c_i: None for c_i in cluster_dict.keys()}
while len(query_idx) < n_query:
for c_i in cluster_dict.keys():
if c_i in skip: continue
cluster_idxs = np.array(cluster_dict[c_i])
if len(cluster_idxs) < idx+1:
skip.append(c_i)
else:
if U_dict[c_i] is None:
# store uncertainty
probs = self.predict_prob(cluster_idxs, g_net)
log_probs = torch.log(probs)
log_probs[log_probs == float('-inf')] = 0
log_probs[log_probs == float('inf')] = 0
U = (probs*log_probs).sum(1)
U = U.numpy()
U_dict[c_i] = deepcopy(U)
else:
U = U_dict[c_i]
chosen = np.argsort(U)[idx+1:idx+1+num]
try:
query_idx = query_idx.union(set(cluster_idxs[chosen]))
except TypeError:
query_idx = query_idx.union(set([cluster_idxs[chosen]]))
idx += num
query_idx = list(query_idx)[:n_query]
return query_idx | 3,718 | 37.340206 | 101 | py |
LoGo | LoGo-main/query_strategies/fal/ensemble_rank.py | import pdb
import copy
import numpy as np
from enum import unique
from scipy import stats
from sklearn.metrics import pairwise_distances
import torch
from ..strategy import Strategy
class EnsRankEntropy(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
g_net = self.net
probs = self.predict_prob(unlabel_idxs, g_net)
log_probs = torch.log(probs)
log_probs[log_probs == float("-inf")] = 0
log_probs[log_probs == float("inf")] = 0
U = (probs*log_probs).sum(1)
g_idxs = unlabel_idxs[U.sort()[1][:n_query]]
l_net = self.training_local_only(label_idxs)
probs = self.predict_prob(unlabel_idxs, l_net)
log_probs = torch.log(probs)
log_probs[log_probs == float("-inf")] = 0
log_probs[log_probs == float("inf")] = 0
U = (probs*log_probs).sum(1)
l_idxs = unlabel_idxs[U.sort()[1][:n_query]]
# rank ensemble
unique_idxs = {i: 0 for i in list(set(g_idxs).union(set(l_idxs)))}
print('Length of Union between global and local-only model: ', len(unique_idxs))
const = 20
for i, (g, l) in enumerate(zip(g_idxs, l_idxs)):
rank = 1 / (const + i)
unique_idxs[g] += rank
unique_idxs[l] += rank
selected_idxs = np.array([k for k, _ in sorted(unique_idxs.items(), key=lambda item: item[1], reverse=True)])
return selected_idxs[:n_query]
# kmeans ++ initialization
def init_centers(X, K):
ind = np.argmax([np.linalg.norm(s, 2) for s in X])
mu = [X[ind]]
indsAll = [ind]
centInds = [0.] * len(X)
cent = 0
# print('#Samps\tTotal Distance')
while len(mu) < K:
if len(mu) == 1:
D2 = pairwise_distances(X, mu).ravel().astype(float)
else:
newD = pairwise_distances(X, [mu[-1]]).ravel().astype(float)
for i in range(len(X)):
if D2[i] > newD[i]:
centInds[i] = cent
D2[i] = newD[i]
# print(str(len(mu)) + '\t' + str(sum(D2)), flush=True)
if sum(D2) == 0.0: pdb.set_trace()
D2 = D2.ravel().astype(float)
Ddist = (D2 ** 2)/ sum(D2 ** 2)
customDist = stats.rv_discrete(name='custm', values=(np.arange(len(D2)), Ddist))
ind = customDist.rvs(size=1)[0]
while ind in indsAll: ind = customDist.rvs(size=1)[0]
mu.append(X[ind])
indsAll.append(ind)
cent += 1
return indsAll
class EnsRankBadge(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
g_net = self.net
gradEmbedding = self.get_grad_embedding(list(unlabel_idxs), net=g_net)
gradEmbedding = gradEmbedding.numpy()
chosen = init_centers(gradEmbedding, n_query),
g_idxs = np.array(unlabel_idxs)[chosen]
l_net = self.training_local_only(label_idxs)
gradEmbedding = self.get_grad_embedding(list(unlabel_idxs), net=l_net)
gradEmbedding = gradEmbedding.numpy()
chosen = init_centers(gradEmbedding, n_query),
l_idxs = np.array(unlabel_idxs)[chosen]
# rank ensemble
unique_idxs = {i: 0 for i in list(set(g_idxs).union(set(l_idxs)))}
const = 20
for i, (g, l) in enumerate(zip(g_idxs, l_idxs)):
rank = 1 / (const + i)
unique_idxs[g] += rank
unique_idxs[l] += rank
selected_idxs = np.array([k for k, _ in sorted(unique_idxs.items(), key=lambda item: item[1], reverse=True)])
return selected_idxs[:n_query]
| 3,720 | 32.522523 | 117 | py |
LoGo | LoGo-main/query_strategies/fal/finetuning.py | import pdb
import copy
import numpy as np
from enum import unique
from scipy import stats
from copy import deepcopy
from sklearn.metrics import pairwise_distances
import torch
from ..strategy import Strategy
class FTEntropy(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
unlabel_idxs = np.array(unlabel_idxs)
local_net = self.training_local_only(label_idxs, finetune=True)
probs = self.predict_prob(unlabel_idxs, local_net)
log_probs = torch.log(probs)
log_probs[log_probs == float("-inf")] = 0
log_probs[log_probs == float("inf")] = 0
U = (probs*log_probs).sum(1)
return unlabel_idxs[U.sort()[1][:n_query]]
# kmeans ++ initialization
def init_centers(X, K):
ind = np.argmax([np.linalg.norm(s, 2) for s in X])
mu = [X[ind]]
indsAll = [ind]
centInds = [0.] * len(X)
cent = 0
# print('#Samps\tTotal Distance')
while len(mu) < K:
if len(mu) == 1:
D2 = pairwise_distances(X, mu).ravel().astype(float)
else:
newD = pairwise_distances(X, [mu[-1]]).ravel().astype(float)
for i in range(len(X)):
if D2[i] > newD[i]:
centInds[i] = cent
D2[i] = newD[i]
# print(str(len(mu)) + '\t' + str(sum(D2)), flush=True)
if sum(D2) == 0.0: pdb.set_trace()
D2 = D2.ravel().astype(float)
Ddist = (D2 ** 2)/ sum(D2 ** 2)
customDist = stats.rv_discrete(name='custm', values=(np.arange(len(D2)), Ddist))
ind = customDist.rvs(size=1)[0]
while ind in indsAll: ind = customDist.rvs(size=1)[0]
mu.append(X[ind])
indsAll.append(ind)
cent += 1
return indsAll
class FTBadge(Strategy):
def query(self, user_idx, label_idxs, unlabel_idxs, n_query=100):
net = self.training_local_only(label_idxs, finetune=True)
gradEmbedding = self.get_grad_embedding(list(unlabel_idxs), net=net)
gradEmbedding = gradEmbedding.numpy()
chosen = init_centers(gradEmbedding, n_query),
unlabel_idxs = np.array(unlabel_idxs)
return unlabel_idxs[chosen] | 2,217 | 30.239437 | 88 | py |
ReChorus | ReChorus-master/src/main.py | # -*- coding: UTF-8 -*-
import os
import sys
import pickle
import logging
import argparse
import pandas as pd
import torch
from helpers import *
from models.general import *
from models.sequential import *
from models.developing import *
from utils import utils
def parse_global_args(parser):
parser.add_argument('--gpu', type=str, default='',
help='Set CUDA_VISIBLE_DEVICES, default for CPU only')
parser.add_argument('--verbose', type=int, default=logging.INFO,
help='Logging Level, 0, 10, ..., 50')
parser.add_argument('--log_file', type=str, default='',
help='Logging file path')
parser.add_argument('--random_seed', type=int, default=0,
help='Random seed of numpy and pytorch')
parser.add_argument('--load', type=int, default=0,
help='Whether load model and continue to train')
parser.add_argument('--train', type=int, default=1,
help='To train the model or not.')
parser.add_argument('--regenerate', type=int, default=0,
help='Whether to regenerate intermediate files')
return parser
def main():
logging.info('-' * 45 + ' BEGIN: ' + utils.get_time() + ' ' + '-' * 45)
exclude = ['check_epoch', 'log_file', 'model_path', 'path', 'pin_memory', 'load',
'regenerate', 'sep', 'train', 'verbose', 'metric', 'test_epoch', 'buffer']
logging.info(utils.format_arg_str(args, exclude_lst=exclude))
# Random seed
utils.init_seed(args.random_seed)
# GPU
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
args.device = torch.device('cpu')
if args.gpu != '' and torch.cuda.is_available():
args.device = torch.device('cuda')
logging.info('Device: {}'.format(args.device))
# Read data
corpus_path = os.path.join(args.path, args.dataset, model_name.reader + '.pkl')
if not args.regenerate and os.path.exists(corpus_path):
logging.info('Load corpus from {}'.format(corpus_path))
corpus = pickle.load(open(corpus_path, 'rb'))
else:
corpus = reader_name(args)
logging.info('Save corpus to {}'.format(corpus_path))
pickle.dump(corpus, open(corpus_path, 'wb'))
# Define model
model = model_name(args, corpus).to(args.device)
logging.info('#params: {}'.format(model.count_variables()))
logging.info(model)
# Run model
data_dict = dict()
for phase in ['train', 'dev', 'test']:
data_dict[phase] = model_name.Dataset(model, corpus, phase)
data_dict[phase].prepare()
runner = runner_name(args)
# logging.info('Test Before Training: ' + runner.print_res(data_dict['test']))
if args.load > 0:
model.load_model()
if args.train > 0:
runner.train(data_dict)
eval_res = runner.print_res(data_dict['test'])
logging.info(os.linesep + 'Test After Training: ' + eval_res)
# save_rec_results(data_dict['dev'], runner, 100)
model.actions_after_train()
logging.info(os.linesep + '-' * 45 + ' END: ' + utils.get_time() + ' ' + '-' * 45)
def save_rec_results(dataset, runner, topk):
result_path = os.path.join(args.path, args.dataset, 'rec-{}.csv'.format(init_args.model_name))
logging.info('Saving top-{} recommendation results to: {}'.format(topk, result_path))
predictions = runner.predict(dataset) # n_users, n_candidates
users, rec_items = list(), list()
for i in range(len(dataset)):
info = dataset[i]
users.append(info['user_id'])
item_scores = zip(info['item_id'], predictions[i])
sorted_lst = sorted(item_scores, key=lambda x: x[1], reverse=True)[:topk]
rec_items.append([x[0] for x in sorted_lst])
rec_df = pd.DataFrame(columns=['user_id', 'rec_items'])
rec_df['user_id'] = users
rec_df['rec_items'] = rec_items
rec_df.to_csv(result_path, sep=args.sep, index=False)
if __name__ == '__main__':
init_parser = argparse.ArgumentParser(description='Model')
init_parser.add_argument('--model_name', type=str, default='BPRMF', help='Choose a model to run.')
init_args, init_extras = init_parser.parse_known_args()
model_name = eval('{0}.{0}'.format(init_args.model_name))
reader_name = eval('{0}.{0}'.format(model_name.reader)) # model chooses the reader
runner_name = eval('{0}.{0}'.format(model_name.runner)) # model chooses the runner
# Args
parser = argparse.ArgumentParser(description='')
parser = parse_global_args(parser)
parser = reader_name.parse_data_args(parser)
parser = runner_name.parse_runner_args(parser)
parser = model_name.parse_model_args(parser)
args, extras = parser.parse_known_args()
# Logging configuration
log_args = [init_args.model_name, args.dataset, str(args.random_seed)]
for arg in ['lr', 'l2'] + model_name.extra_log_args:
log_args.append(arg + '=' + str(eval('args.' + arg)))
log_file_name = '__'.join(log_args).replace(' ', '__')
if args.log_file == '':
args.log_file = '../log/{}/{}.txt'.format(init_args.model_name, log_file_name)
if args.model_path == '':
args.model_path = '../model/{}/{}.pt'.format(init_args.model_name, log_file_name)
utils.check_dir(args.log_file)
logging.basicConfig(filename=args.log_file, level=args.verbose)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(init_args)
main()
| 5,462 | 39.768657 | 102 | py |
ReChorus | ReChorus-master/src/helpers/BaseRunner.py | # -*- coding: UTF-8 -*-
import os
import gc
import torch
import torch.nn as nn
import logging
import numpy as np
from time import time
from tqdm import tqdm
from torch.utils.data import DataLoader
from typing import Dict, List
from utils import utils
from models.BaseModel import BaseModel
class BaseRunner(object):
@staticmethod
def parse_runner_args(parser):
parser.add_argument('--epoch', type=int, default=200,
help='Number of epochs.')
parser.add_argument('--check_epoch', type=int, default=1,
help='Check some tensors every check_epoch.')
parser.add_argument('--test_epoch', type=int, default=-1,
help='Print test results every test_epoch (-1 means no print).')
parser.add_argument('--early_stop', type=int, default=10,
help='The number of epochs when dev results drop continuously.')
parser.add_argument('--lr', type=float, default=1e-3,
help='Learning rate.')
parser.add_argument('--l2', type=float, default=0,
help='Weight decay in optimizer.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size during training.')
parser.add_argument('--eval_batch_size', type=int, default=256,
help='Batch size during testing.')
parser.add_argument('--optimizer', type=str, default='Adam',
help='optimizer: SGD, Adam, Adagrad, Adadelta')
parser.add_argument('--num_workers', type=int, default=5,
help='Number of processors when prepare batches in DataLoader')
parser.add_argument('--pin_memory', type=int, default=0,
help='pin_memory in DataLoader')
parser.add_argument('--topk', type=str, default='5,10,20,50',
help='The number of items recommended to each user.')
parser.add_argument('--metric', type=str, default='NDCG,HR',
help='metrics: NDCG, HR')
return parser
@staticmethod
def evaluate_method(predictions: np.ndarray, topk: list, metrics: list) -> Dict[str, float]:
"""
:param predictions: (-1, n_candidates) shape, the first column is the score for ground-truth item
:param topk: top-K value list
:param metrics: metric string list
:return: a result dict, the keys are metric@topk
"""
evaluations = dict()
sort_idx = (-predictions).argsort(axis=1)
gt_rank = np.argwhere(sort_idx == 0)[:, 1] + 1
for k in topk:
hit = (gt_rank <= k)
for metric in metrics:
key = '{}@{}'.format(metric, k)
if metric == 'HR':
evaluations[key] = hit.mean()
elif metric == 'NDCG':
evaluations[key] = (hit / np.log2(gt_rank + 1)).mean()
else:
raise ValueError('Undefined evaluation metric: {}.'.format(metric))
return evaluations
def __init__(self, args):
self.epoch = args.epoch
self.check_epoch = args.check_epoch
self.test_epoch = args.test_epoch
self.early_stop = args.early_stop
self.learning_rate = args.lr
self.batch_size = args.batch_size
self.eval_batch_size = args.eval_batch_size
self.l2 = args.l2
self.optimizer_name = args.optimizer
self.num_workers = args.num_workers
self.pin_memory = args.pin_memory
self.topk = [int(x) for x in args.topk.split(',')]
self.metrics = [m.strip().upper() for m in args.metric.split(',')]
self.main_metric = '{}@{}'.format(self.metrics[0], self.topk[0]) # early stop based on main_metric
self.time = None # will store [start_time, last_step_time]
def _check_time(self, start=False):
if self.time is None or start:
self.time = [time()] * 2
return self.time[0]
tmp_time = self.time[1]
self.time[1] = time()
return self.time[1] - tmp_time
def _build_optimizer(self, model):
logging.info('Optimizer: ' + self.optimizer_name)
optimizer = eval('torch.optim.{}'.format(self.optimizer_name))(
model.customize_parameters(), lr=self.learning_rate, weight_decay=self.l2)
return optimizer
def train(self, data_dict: Dict[str, BaseModel.Dataset]):
model = data_dict['train'].model
main_metric_results, dev_results = list(), list()
self._check_time(start=True)
try:
for epoch in range(self.epoch):
# Fit
self._check_time()
gc.collect()
torch.cuda.empty_cache()
loss = self.fit(data_dict['train'], epoch=epoch + 1)
training_time = self._check_time()
# Observe selected tensors
if len(model.check_list) > 0 and self.check_epoch > 0 and epoch % self.check_epoch == 0:
utils.check(model.check_list)
# Record dev results
dev_result = self.evaluate(data_dict['dev'], self.topk[:1], self.metrics)
dev_results.append(dev_result)
main_metric_results.append(dev_result[self.main_metric])
logging_str = 'Epoch {:<5} loss={:<.4f} [{:<3.1f} s] dev=({})'.format(
epoch + 1, loss, training_time, utils.format_metric(dev_result))
# Test
if self.test_epoch > 0 and epoch % self.test_epoch == 0:
test_result = self.evaluate(data_dict['test'], self.topk[:1], self.metrics)
logging_str += ' test=({})'.format(utils.format_metric(test_result))
testing_time = self._check_time()
logging_str += ' [{:<.1f} s]'.format(testing_time)
# Save model and early stop
if max(main_metric_results) == main_metric_results[-1] or \
(hasattr(model, 'stage') and model.stage == 1):
model.save_model()
logging_str += ' *'
logging.info(logging_str)
if self.early_stop > 0 and self.eval_termination(main_metric_results):
logging.info("Early stop at %d based on dev result." % (epoch + 1))
break
except KeyboardInterrupt:
logging.info("Early stop manually")
exit_here = input("Exit completely without evaluation? (y/n) (default n):")
if exit_here.lower().startswith('y'):
logging.info(os.linesep + '-' * 45 + ' END: ' + utils.get_time() + ' ' + '-' * 45)
exit(1)
# Find the best dev result across iterations
best_epoch = main_metric_results.index(max(main_metric_results))
logging.info(os.linesep + "Best Iter(dev)={:>5}\t dev=({}) [{:<.1f} s] ".format(
best_epoch + 1, utils.format_metric(dev_results[best_epoch]), self.time[1] - self.time[0]))
model.load_model()
def fit(self, dataset: BaseModel.Dataset, epoch=-1) -> float:
model = dataset.model
if model.optimizer is None:
model.optimizer = self._build_optimizer(model)
dataset.actions_before_epoch() # must sample before multi thread start
model.train()
loss_lst = list()
dl = DataLoader(dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers,
collate_fn=dataset.collate_batch, pin_memory=self.pin_memory)
for batch in tqdm(dl, leave=False, desc='Epoch {:<3}'.format(epoch), ncols=100, mininterval=1):
batch = utils.batch_to_gpu(batch, model.device)
# randomly shuffle the items to avoid models remembering the first item being the target
item_ids = batch['item_id']
# for each row (sample), get random indices and shuffle the original items
indices = torch.argsort(torch.rand(*item_ids.shape), dim=-1)
batch['item_id'] = item_ids[torch.arange(item_ids.shape[0]).unsqueeze(-1), indices]
model.optimizer.zero_grad()
out_dict = model(batch)
# shuffle the predictions back so that the prediction scores match the original order (first item is the target)
prediction = out_dict['prediction']
restored_prediction = torch.zeros(*prediction.shape).to(prediction.device)
# use the random indices to shuffle back
restored_prediction[torch.arange(item_ids.shape[0]).unsqueeze(-1), indices] = prediction
out_dict['prediction'] = restored_prediction
loss = model.loss(out_dict)
loss.backward()
model.optimizer.step()
loss_lst.append(loss.detach().cpu().data.numpy())
return np.mean(loss_lst).item()
def eval_termination(self, criterion: List[float]) -> bool:
if len(criterion) > 20 and utils.non_increasing(criterion[-self.early_stop:]):
return True
elif len(criterion) - criterion.index(max(criterion)) > 20:
return True
return False
def evaluate(self, dataset: BaseModel.Dataset, topks: list, metrics: list) -> Dict[str, float]:
"""
Evaluate the results for an input dataset.
:return: result dict (key: metric@k)
"""
predictions = self.predict(dataset)
return self.evaluate_method(predictions, topks, metrics)
def predict(self, dataset: BaseModel.Dataset) -> np.ndarray:
"""
The returned prediction is a 2D-array, each row corresponds to all the candidates,
and the ground-truth item poses the first.
Example: ground-truth items: [1, 2], 2 negative items for each instance: [[3,4], [5,6]]
predictions like: [[1,3,4], [2,5,6]]
"""
dataset.model.eval()
predictions = list()
dl = DataLoader(dataset, batch_size=self.eval_batch_size, shuffle=False, num_workers=self.num_workers,
collate_fn=dataset.collate_batch, pin_memory=self.pin_memory)
for batch in tqdm(dl, leave=False, ncols=100, mininterval=1, desc='Predict'):
prediction = dataset.model(utils.batch_to_gpu(batch, dataset.model.device))['prediction']
predictions.extend(prediction.cpu().data.numpy())
predictions = np.array(predictions)
if dataset.model.test_all:
rows, cols = list(), list()
for i, u in enumerate(dataset.data['user_id']):
clicked_items = list(dataset.corpus.train_clicked_set[u] | dataset.corpus.residual_clicked_set[u])
idx = list(np.ones_like(clicked_items) * i)
rows.extend(idx)
cols.extend(clicked_items)
predictions[rows, cols] = -np.inf
return predictions
def print_res(self, dataset: BaseModel.Dataset) -> str:
"""
Construct the final result string before/after training
:return: test result string
"""
result_dict = self.evaluate(dataset, self.topk, self.metrics)
res_str = '(' + utils.format_metric(result_dict) + ')'
return res_str
| 11,452 | 46.131687 | 124 | py |
ReChorus | ReChorus-master/src/helpers/BUIRRunner.py | # -*- coding: UTF-8 -*-
import os
import gc
import torch
import torch.nn as nn
import logging
import numpy as np
from time import time
from tqdm import tqdm
from torch.utils.data import DataLoader
from utils import utils
from models.BaseModel import BaseModel
from helpers.BaseRunner import BaseRunner
class BUIRRunner(BaseRunner):
def fit(self, dataset: BaseModel.Dataset, epoch=-1) -> float:
model = dataset.model
if model.optimizer is None:
model.optimizer = self._build_optimizer(model)
dataset.actions_before_epoch() # must sample before multi thread start
model.train()
loss_lst = list()
dl = DataLoader(dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers,
collate_fn=dataset.collate_batch, pin_memory=self.pin_memory)
for batch in tqdm(dl, leave=False, desc='Epoch {:<3}'.format(epoch), ncols=100, mininterval=1):
batch = utils.batch_to_gpu(batch, model.device)
model.optimizer.zero_grad()
out_dict = model(batch)
loss = model.loss(out_dict)
loss.backward()
model.optimizer.step()
model._update_target()
loss_lst.append(loss.detach().cpu().data.numpy())
return np.mean(loss_lst).item()
| 1,327 | 33.051282 | 104 | py |
ReChorus | ReChorus-master/src/models/BaseModel.py | # -*- coding: UTF-8 -*-
import torch
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset as BaseDataset
from torch.nn.utils.rnn import pad_sequence
from typing import List
from utils import utils
from helpers.BaseReader import BaseReader
class BaseModel(nn.Module):
reader, runner = None, None # choose helpers in specific model classes
extra_log_args = []
@staticmethod
def parse_model_args(parser):
parser.add_argument('--model_path', type=str, default='',
help='Model save path.')
parser.add_argument('--buffer', type=int, default=1,
help='Whether to buffer feed dicts for dev/test')
return parser
@staticmethod
def init_weights(m):
if 'Linear' in str(type(m)):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
if m.bias is not None:
nn.init.normal_(m.bias, mean=0.0, std=0.01)
elif 'Embedding' in str(type(m)):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
def __init__(self, args, corpus: BaseReader):
super(BaseModel, self).__init__()
self.device = args.device
self.model_path = args.model_path
self.buffer = args.buffer
self.optimizer = None
self.check_list = list() # observe tensors in check_list every check_epoch
"""
Key Methods
"""
def _define_params(self):
pass
def forward(self, feed_dict: dict) -> dict:
"""
:param feed_dict: batch prepared in Dataset
:return: out_dict, including prediction with shape [batch_size, n_candidates]
"""
pass
def loss(self, out_dict: dict) -> torch.Tensor:
pass
"""
Auxiliary Methods
"""
def customize_parameters(self) -> list:
# customize optimizer settings for different parameters
weight_p, bias_p = [], []
for name, p in filter(lambda x: x[1].requires_grad, self.named_parameters()):
if 'bias' in name:
bias_p.append(p)
else:
weight_p.append(p)
optimize_dict = [{'params': weight_p}, {'params': bias_p, 'weight_decay': 0}]
return optimize_dict
def save_model(self, model_path=None):
if model_path is None:
model_path = self.model_path
utils.check_dir(model_path)
torch.save(self.state_dict(), model_path)
# logging.info('Save model to ' + model_path[:50] + '...')
def load_model(self, model_path=None):
if model_path is None:
model_path = self.model_path
self.load_state_dict(torch.load(model_path))
logging.info('Load model from ' + model_path)
def count_variables(self) -> int:
total_parameters = sum(p.numel() for p in self.parameters() if p.requires_grad)
return total_parameters
def actions_after_train(self): # e.g., save selected parameters
pass
"""
Define Dataset Class
"""
class Dataset(BaseDataset):
def __init__(self, model, corpus, phase: str):
self.model = model # model object reference
self.corpus = corpus # reader object reference
self.phase = phase # train / dev / test
self.buffer_dict = dict()
self.data = utils.df_to_dict(corpus.data_df[phase])
# ↑ DataFrame is not compatible with multi-thread operations
def __len__(self):
if type(self.data) == dict:
for key in self.data:
return len(self.data[key])
return len(self.data)
def __getitem__(self, index: int) -> dict:
if self.model.buffer and self.phase != 'train':
return self.buffer_dict[index]
return self._get_feed_dict(index)
# ! Key method to construct input data for a single instance
def _get_feed_dict(self, index: int) -> dict:
pass
# Called after initialization
def prepare(self):
if self.model.buffer and self.phase != 'train':
for i in tqdm(range(len(self)), leave=False, desc=('Prepare ' + self.phase)):
self.buffer_dict[i] = self._get_feed_dict(i)
# Called before each training epoch (only for the training dataset)
def actions_before_epoch(self):
pass
# Collate a batch according to the list of feed dicts
def collate_batch(self, feed_dicts: List[dict]) -> dict:
feed_dict = dict()
for key in feed_dicts[0]:
if isinstance(feed_dicts[0][key], np.ndarray):
tmp_list = [len(d[key]) for d in feed_dicts]
if any([tmp_list[0] != l for l in tmp_list]):
stack_val = np.array([d[key] for d in feed_dicts], dtype=np.object)
else:
stack_val = np.array([d[key] for d in feed_dicts])
else:
stack_val = np.array([d[key] for d in feed_dicts])
if stack_val.dtype == np.object: # inconsistent length (e.g., history)
feed_dict[key] = pad_sequence([torch.from_numpy(x) for x in stack_val], batch_first=True)
else:
feed_dict[key] = torch.from_numpy(stack_val)
feed_dict['batch_size'] = len(feed_dicts)
feed_dict['phase'] = self.phase
return feed_dict
class GeneralModel(BaseModel):
reader, runner = 'BaseReader', 'BaseRunner'
@staticmethod
def parse_model_args(parser):
parser.add_argument('--num_neg', type=int, default=1,
help='The number of negative items during training.')
parser.add_argument('--dropout', type=float, default=0,
help='Dropout probability for each deep layer')
parser.add_argument('--test_all', type=int, default=0,
help='Whether testing on all the items.')
return BaseModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.user_num = corpus.n_users
self.item_num = corpus.n_items
self.num_neg = args.num_neg
self.dropout = args.dropout
self.test_all = args.test_all
def loss(self, out_dict: dict) -> torch.Tensor:
"""
BPR ranking loss with optimization on multiple negative samples (a little different now)
"Recurrent neural networks with top-k gains for session-based recommendations"
:param out_dict: contain prediction with [batch_size, -1], the first column for positive, the rest for negative
:return:
"""
predictions = out_dict['prediction']
pos_pred, neg_pred = predictions[:, 0], predictions[:, 1:]
neg_softmax = (neg_pred - neg_pred.max()).softmax(dim=1)
loss = -((pos_pred[:, None] - neg_pred).sigmoid() * neg_softmax).sum(dim=1).log().mean()
# neg_pred = (neg_pred * neg_softmax).sum(dim=1)
# loss = F.softplus(-(pos_pred - neg_pred)).mean()
# ↑ For numerical stability, use 'softplus(-x)' instead of '-log_sigmoid(x)'
return loss
class Dataset(BaseModel.Dataset):
def _get_feed_dict(self, index):
user_id, target_item = self.data['user_id'][index], self.data['item_id'][index]
if self.phase != 'train' and self.model.test_all:
neg_items = np.arange(1, self.corpus.n_items)
else:
neg_items = self.data['neg_items'][index]
item_ids = np.concatenate([[target_item], neg_items]).astype(int)
feed_dict = {
'user_id': user_id,
'item_id': item_ids
}
return feed_dict
# Sample negative items for all the instances
def actions_before_epoch(self):
neg_items = np.random.randint(1, self.corpus.n_items, size=(len(self), self.model.num_neg))
for i, u in enumerate(self.data['user_id']):
clicked_set = self.corpus.train_clicked_set[u] # neg items are possible to appear in dev/test set
# clicked_set = self.corpus.clicked_set[u] # neg items will not include dev/test set
for j in range(self.model.num_neg):
while neg_items[i][j] in clicked_set:
neg_items[i][j] = np.random.randint(1, self.corpus.n_items)
self.data['neg_items'] = neg_items
class SequentialModel(GeneralModel):
reader = 'SeqReader'
@staticmethod
def parse_model_args(parser):
parser.add_argument('--history_max', type=int, default=20,
help='Maximum length of history.')
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.history_max = args.history_max
class Dataset(GeneralModel.Dataset):
def __init__(self, model, corpus, phase):
super().__init__(model, corpus, phase)
idx_select = np.array(self.data['position']) > 0 # history length must be non-zero
for key in self.data:
self.data[key] = np.array(self.data[key])[idx_select]
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
pos = self.data['position'][index]
user_seq = self.corpus.user_his[feed_dict['user_id']][:pos]
if self.model.history_max > 0:
user_seq = user_seq[-self.model.history_max:]
feed_dict['history_items'] = np.array([x[0] for x in user_seq])
feed_dict['history_times'] = np.array([x[1] for x in user_seq])
feed_dict['lengths'] = len(feed_dict['history_items'])
return feed_dict
| 9,962 | 39.173387 | 119 | py |
ReChorus | ReChorus-master/src/models/general/NeuMF.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" NeuMF
Reference:
"Neural Collaborative Filtering"
Xiangnan He et al., WWW'2017.
Reference code:
The authors' tensorflow implementation https://github.com/hexiangnan/neural_collaborative_filtering
CMD example:
python main.py --model_name NeuMF --emb_size 64 --layers '[64]' --lr 5e-4 --l2 1e-7 --dropout 0.2 \
--dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
from models.BaseModel import GeneralModel
class NeuMF(GeneralModel):
reader = 'BaseReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'layers']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--layers', type=str, default='[64]',
help="Size of each layer.")
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.layers = eval(args.layers)
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.mf_u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.mf_i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.mlp_u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.mlp_i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.mlp = nn.ModuleList([])
pre_size = 2 * self.emb_size
for i, layer_size in enumerate(self.layers):
self.mlp.append(nn.Linear(pre_size, layer_size))
pre_size = layer_size
self.dropout_layer = nn.Dropout(p=self.dropout)
self.prediction = nn.Linear(pre_size + self.emb_size, 1, bias=False)
def forward(self, feed_dict):
self.check_list = []
u_ids = feed_dict['user_id'] # [batch_size]
i_ids = feed_dict['item_id'] # [batch_size, -1]
u_ids = u_ids.unsqueeze(-1).repeat((1, i_ids.shape[1])) # [batch_size, -1]
mf_u_vectors = self.mf_u_embeddings(u_ids)
mf_i_vectors = self.mf_i_embeddings(i_ids)
mlp_u_vectors = self.mlp_u_embeddings(u_ids)
mlp_i_vectors = self.mlp_i_embeddings(i_ids)
mf_vector = mf_u_vectors * mf_i_vectors
mlp_vector = torch.cat([mlp_u_vectors, mlp_i_vectors], dim=-1)
for layer in self.mlp:
mlp_vector = layer(mlp_vector).relu()
mlp_vector = self.dropout_layer(mlp_vector)
output_vector = torch.cat([mf_vector, mlp_vector], dim=-1)
prediction = self.prediction(output_vector)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
| 2,848 | 36 | 103 | py |
ReChorus | ReChorus-master/src/models/general/BPRMF.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" BPRMF
Reference:
"Bayesian personalized ranking from implicit feedback"
Rendle et al., UAI'2009.
CMD example:
python main.py --model_name BPRMF --emb_size 64 --lr 1e-3 --l2 1e-6 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch.nn as nn
from models.BaseModel import GeneralModel
class BPRMF(GeneralModel):
reader = 'BaseReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
def forward(self, feed_dict):
self.check_list = []
u_ids = feed_dict['user_id'] # [batch_size]
i_ids = feed_dict['item_id'] # [batch_size, -1]
cf_u_vectors = self.u_embeddings(u_ids)
cf_i_vectors = self.i_embeddings(i_ids)
prediction = (cf_u_vectors[:, None, :] * cf_i_vectors).sum(dim=-1) # [batch_size, -1]
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
| 1,534 | 30.326531 | 108 | py |
ReChorus | ReChorus-master/src/models/general/BUIR.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" BUIR
Reference:
"Bootstrapping User and Item Representations for One-Class Collaborative Filtering"
Lee et al., SIGIR'2021.
CMD example:
python main.py --model_name BUIR --emb_size 64 --lr 1e-3 --l2 1e-6 --dataset 'Grocery_and_Gourmet_Food'
"""
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from models.BaseModel import GeneralModel
class BUIR(GeneralModel):
reader = 'BaseReader'
runner = 'BUIRRunner'
extra_log_args = ['emb_size', 'momentum']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--momentum', type=float, default=0.995,
help='Momentum update.')
return GeneralModel.parse_model_args(parser)
@staticmethod
def init_weights(m):
if 'Linear' in str(type(m)):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.normal_(m.bias.data)
elif 'Embedding' in str(type(m)):
nn.init.xavier_normal_(m.weight.data)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.momentum = args.momentum
self._define_params()
self.apply(self.init_weights)
for param_o, param_t in zip(self.user_online.parameters(), self.user_target.parameters()):
param_t.data.copy_(param_o.data)
param_t.requires_grad = False
for param_o, param_t in zip(self.item_online.parameters(), self.item_target.parameters()):
param_t.data.copy_(param_o.data)
param_t.requires_grad = False
def _define_params(self):
self.user_online = nn.Embedding(self.user_num, self.emb_size)
self.user_target = nn.Embedding(self.user_num, self.emb_size)
self.item_online = nn.Embedding(self.item_num, self.emb_size)
self.item_target = nn.Embedding(self.item_num, self.emb_size)
self.predictor = nn.Linear(self.emb_size, self.emb_size)
self.bn = nn.BatchNorm1d(self.emb_size, eps=0, affine=False, track_running_stats=False)
# will be called by BUIRRunner
def _update_target(self):
for param_o, param_t in zip(self.user_online.parameters(), self.user_target.parameters()):
param_t.data = param_t.data * self.momentum + param_o.data * (1. - self.momentum)
for param_o, param_t in zip(self.item_online.parameters(), self.item_target.parameters()):
param_t.data = param_t.data * self.momentum + param_o.data * (1. - self.momentum)
def forward(self, feed_dict):
self.check_list = []
user, items = feed_dict['user_id'], feed_dict['item_id']
# prediction = (self.item_online(items) * self.user_online(user)[:, None, :]).sum(-1)
prediction = (self.predictor(self.item_online(items)) * self.user_online(user)[:, None, :]).sum(dim=-1) + \
(self.predictor(self.user_online(user))[:, None, :] * self.item_online(items)).sum(dim=-1)
out_dict = {'prediction': prediction}
if feed_dict['phase'] == 'train':
u_online = self.user_online(user)
u_online = self.predictor(u_online)
u_target = self.user_target(user)
i_online = self.item_online(items).squeeze(1)
i_online = self.predictor(i_online)
i_target = self.item_target(items).squeeze(1)
out_dict.update({
'u_online': u_online,
'u_target': u_target,
'i_online': i_online,
'i_target': i_target
})
return out_dict
def loss(self, output):
u_online, u_target = output['u_online'], output['u_target']
i_online, i_target = output['i_online'], output['i_target']
u_online = F.normalize(u_online, dim=-1)
u_target = F.normalize(u_target, dim=-1)
i_online = F.normalize(i_online, dim=-1)
i_target = F.normalize(i_target, dim=-1)
# Euclidean distance between normalized vectors can be replaced with their negative inner product
loss_ui = 2 - 2 * (u_online * i_target.detach()).sum(dim=-1)
loss_iu = 2 - 2 * (i_online * u_target.detach()).sum(dim=-1)
return (loss_ui + loss_iu).mean()
class Dataset(GeneralModel.Dataset):
# No need to sample negative items
def actions_before_epoch(self):
self.data['neg_items'] = [[] for _ in range(len(self))]
| 4,676 | 39.318966 | 115 | py |
ReChorus | ReChorus-master/src/models/general/CFKG.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" CFKG
Reference:
"Learning over Knowledge-Base Embeddings for Recommendation"
Yongfeng Zhang et al., SIGIR'2018.
Note:
In the built-in dataset, we have four kinds of relations: buy, category, complement, substitute, where 'buy' is
a special relation indexed by 0. And there are three kinds of nodes in KG: user, item, category, among which
users are placed ahead of other entities when indexing.
CMD example:
python main.py --model_name CFKG --emb_size 64 --margin 1 --include_attr 1 --lr 1e-4 --l2 1e-6 \
--dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from utils import utils
from models.BaseModel import GeneralModel
from helpers.KGReader import KGReader
class CFKG(GeneralModel):
reader = 'KGReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'margin', 'include_attr']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--margin', type=float, default=0,
help='Margin in hinge loss.')
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus: KGReader):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.margin = args.margin
self.relation_num = corpus.n_relations
self.entity_num = corpus.n_entities
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.e_embeddings = nn.Embedding(self.user_num + self.entity_num, self.emb_size)
# ↑ user and entity embeddings, user first
self.r_embeddings = nn.Embedding(self.relation_num, self.emb_size)
# ↑ relation embedding: 0 is used for "buy" between users and items
self.loss_function = nn.MarginRankingLoss(margin=self.margin)
def forward(self, feed_dict):
self.check_list = []
head_ids = feed_dict['head_id'] # [batch_size, -1]
tail_ids = feed_dict['tail_id'] # [batch_size, -1]
relation_ids = feed_dict['relation_id'] # [batch_size, -1]
head_vectors = self.e_embeddings(head_ids)
tail_vectors = self.e_embeddings(tail_ids)
relation_vectors = self.r_embeddings(relation_ids)
prediction = -((head_vectors + relation_vectors - tail_vectors)**2).sum(-1)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
def loss(self, out_dict):
predictions = out_dict['prediction']
batch_size = predictions.shape[0]
pos_pred, neg_pred = predictions[:, :2].flatten(), predictions[:, 2:].flatten()
target = torch.from_numpy(np.ones(batch_size * 2, dtype=np.float32)).to(self.device)
loss = self.loss_function(pos_pred, neg_pred, target)
return loss
class Dataset(GeneralModel.Dataset):
def __init__(self, model, corpus, phase):
super().__init__(model, corpus, phase)
if self.phase == 'train':
interaction_df = pd.DataFrame({
'head': self.data['user_id'],
'tail': self.data['item_id'],
'relation': np.zeros_like(self.data['user_id']) # "buy" relation
})
self.data = utils.df_to_dict(pd.concat((self.corpus.relation_df, interaction_df), axis=0))
self.neg_heads = np.zeros(len(self), dtype=int)
self.neg_tails = np.zeros(len(self), dtype=int)
def _get_feed_dict(self, index):
if self.phase == 'train':
head, tail = self.data['head'][index], self.data['tail'][index]
relation = self.data['relation'][index]
head_id = np.array([head, head, head, self.neg_heads[index]])
tail_id = np.array([tail, tail, self.neg_tails[index], tail])
relation_id = np.array([relation] * 4)
if relation > 0: # head is not a user
head_id = head_id + self.corpus.n_users
else:
target_item = self.data['item_id'][index]
if self.model.test_all:
neg_items = np.arange(1, self.corpus.n_items)
else:
neg_items = self.data['neg_items'][index]
tail_id = np.concatenate([[target_item], neg_items])
head_id = self.data['user_id'][index] * np.ones_like(tail_id)
relation_id = np.zeros_like(tail_id)
tail_id += self.corpus.n_users # tail must be a non-user entity
feed_dict = {'head_id': head_id, 'tail_id': tail_id, 'relation_id': relation_id}
return feed_dict
def actions_before_epoch(self):
for i in range(len(self)):
head, tail, relation = self.data['head'][i], self.data['tail'][i], self.data['relation'][i]
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
if relation == 0: # "buy" relation
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
while self.neg_tails[i] in self.corpus.train_clicked_set[head]:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while tail in self.corpus.train_clicked_set[self.neg_heads[i]]:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
else:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_entities)
while (head, relation, self.neg_tails[i]) in self.corpus.triplet_set:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_entities)
while (self.neg_heads[i], relation, tail) in self.corpus.triplet_set:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_entities)
| 6,084 | 45.807692 | 115 | py |
ReChorus | ReChorus-master/src/models/general/DirectAU.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" DirectAU
Reference:
"Towards Representation Alignment and Uniformity in Collaborative Filtering"
Wang et al., KDD'2022.
CMD example:
python main.py --model_name DirectAU --dataset Grocery_and_Gourmet_Food \
--emb_size 64 --lr 1e-3 --l2 1e-6 --epoch 500 --gamma 0.3
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.BaseModel import GeneralModel
class DirectAU(GeneralModel):
reader = 'BaseReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'gamma']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--gamma', type=float, default=1,
help='Weight of the uniformity loss.')
return GeneralModel.parse_model_args(parser)
@staticmethod
def init_weights(m):
if 'Linear' in str(type(m)):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.normal_(m.bias.data)
elif 'Embedding' in str(type(m)):
nn.init.xavier_normal_(m.weight.data)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.gamma = args.gamma
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
@staticmethod
def alignment(x, y):
x, y = F.normalize(x, dim=-1), F.normalize(y, dim=-1)
return (x - y).norm(p=2, dim=1).pow(2).mean()
@staticmethod
def uniformity(x):
x = F.normalize(x, dim=-1)
return torch.pdist(x, p=2).pow(2).mul(-2).exp().mean().log()
def forward(self, feed_dict):
self.check_list = []
user, items = feed_dict['user_id'], feed_dict['item_id']
user_e = self.u_embeddings(user)
item_e = self.i_embeddings(items)
prediction = (user_e[:, None, :] * item_e).sum(dim=-1) # [batch_size, -1]
out_dict = {'prediction': prediction}
if feed_dict['phase'] == 'train':
out_dict.update({
'user_e': user_e,
'item_e': item_e.squeeze(1)
})
return out_dict
def loss(self, output):
user_e, item_e = output['user_e'], output['item_e']
align = self.alignment(user_e, item_e)
uniform = (self.uniformity(user_e) + self.uniformity(item_e)) / 2
loss = align + self.gamma * uniform
return loss
class Dataset(GeneralModel.Dataset):
# No need to sample negative items
def actions_before_epoch(self):
self.data['neg_items'] = [[] for _ in range(len(self))]
| 2,990 | 30.484211 | 82 | py |
ReChorus | ReChorus-master/src/models/general/LightGCN.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" LightGCN
Reference:
"LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation"
He et al., SIGIR'2020.
CMD example:
python main.py --model_name LightGCN --emb_size 64 --n_layers 3 --lr 1e-3 --l2 1e-8 \
--dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import numpy as np
import torch.nn as nn
import scipy.sparse as sp
from models.BaseModel import GeneralModel
class LightGCN(GeneralModel):
reader = 'BaseReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'n_layers']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--n_layers', type=int, default=3,
help='Number of LightGCN layers.')
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.n_layers = args.n_layers
self.norm_adj = self.build_adjmat(corpus.n_users, corpus.n_items, corpus.train_clicked_set)
self._define_params()
self.apply(self.init_weights)
@staticmethod
def build_adjmat(user_count, item_count, train_mat, selfloop_flag=False):
R = sp.dok_matrix((user_count, item_count), dtype=np.float32)
for user in train_mat:
for item in train_mat[user]:
R[user, item] = 1
R = R.tolil()
adj_mat = sp.dok_matrix((user_count + item_count, user_count + item_count), dtype=np.float32)
adj_mat = adj_mat.tolil()
adj_mat[:user_count, user_count:] = R
adj_mat[user_count:, :user_count] = R.T
adj_mat = adj_mat.todok()
def normalized_adj_single(adj):
# D^-1/2 * A * D^-1/2
rowsum = np.array(adj.sum(1)) + 1e-10
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
bi_lap = d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)
return bi_lap.tocoo()
if selfloop_flag:
norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))
else:
norm_adj_mat = normalized_adj_single(adj_mat)
return norm_adj_mat.tocsr()
def _define_params(self):
self.encoder = LGCNEncoder(self.user_num, self.item_num, self.emb_size, self.norm_adj, self.n_layers)
def forward(self, feed_dict):
self.check_list = []
user, items = feed_dict['user_id'], feed_dict['item_id']
u_embed, i_embed = self.encoder(user, items)
prediction = (u_embed[:, None, :] * i_embed).sum(dim=-1)
out_dict = {'prediction': prediction}
return out_dict
class LGCNEncoder(nn.Module):
def __init__(self, user_count, item_count, emb_size, norm_adj, n_layers=3):
super(LGCNEncoder, self).__init__()
self.user_count = user_count
self.item_count = item_count
self.emb_size = emb_size
self.layers = [emb_size] * n_layers
self.norm_adj = norm_adj
self.embedding_dict = self._init_model()
self.sparse_norm_adj = self._convert_sp_mat_to_sp_tensor(self.norm_adj).cuda()
def _init_model(self):
initializer = nn.init.xavier_uniform_
embedding_dict = nn.ParameterDict({
'user_emb': nn.Parameter(initializer(torch.empty(self.user_count, self.emb_size))),
'item_emb': nn.Parameter(initializer(torch.empty(self.item_count, self.emb_size))),
})
return embedding_dict
@staticmethod
def _convert_sp_mat_to_sp_tensor(X):
coo = X.tocoo()
i = torch.LongTensor([coo.row, coo.col])
v = torch.from_numpy(coo.data).float()
return torch.sparse.FloatTensor(i, v, coo.shape)
def forward(self, users, items):
ego_embeddings = torch.cat([self.embedding_dict['user_emb'], self.embedding_dict['item_emb']], 0)
all_embeddings = [ego_embeddings]
for k in range(len(self.layers)):
ego_embeddings = torch.sparse.mm(self.sparse_norm_adj, ego_embeddings)
all_embeddings += [ego_embeddings]
all_embeddings = torch.stack(all_embeddings, dim=1)
all_embeddings = torch.mean(all_embeddings, dim=1)
user_all_embeddings = all_embeddings[:self.user_count, :]
item_all_embeddings = all_embeddings[self.user_count:, :]
user_embeddings = user_all_embeddings[users, :]
item_embeddings = item_all_embeddings[items, :]
return user_embeddings, item_embeddings
| 4,769 | 34.597015 | 109 | py |
ReChorus | ReChorus-master/src/models/general/POP.py | # -*- coding: UTF-8 -*-
import torch
import numpy as np
from models.BaseModel import GeneralModel
class POP(GeneralModel):
"""
Recommendation according to item's popularity.
Should run with --train 0
"""
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.popularity = np.zeros(corpus.n_items)
for i in corpus.data_df['train']['item_id'].values:
self.popularity[i] += 1
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
prediction = self.popularity[i_ids.cpu().data.numpy()]
prediction = torch.from_numpy(prediction).to(self.device)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
| 774 | 28.807692 | 75 | py |
ReChorus | ReChorus-master/src/models/developing/SRGNN.py | # -*- coding: UTF-8 -*-
import torch
from torch import nn
from torch.nn import Parameter
from torch.nn import functional as F
import numpy as np
from models.BaseModel import SequentialModel
class SRGNN(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['num_layers']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--num_layers', type=int, default=1,
help='Number of self-attention layers.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.num_layers = args.num_layers
self._define_params()
std = 1.0 / np.sqrt(self.emb_size)
for weight in self.parameters():
weight.data.uniform_(-std, std)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size, padding_idx=0)
self.linear1 = nn.Linear(self.emb_size, self.emb_size, bias=True)
self.linear2 = nn.Linear(self.emb_size, self.emb_size, bias=True)
self.linear3 = nn.Linear(self.emb_size, 1, bias=False)
self.linear_transform = nn.Linear(self.emb_size * 2, self.emb_size, bias=True)
self.gnn = GNN(self.emb_size, self.num_layers)
def _get_slice(self, item_seq):
items, n_node, A, alias_inputs = [], [], [], []
max_n_node = item_seq.size(1)
item_seq = item_seq.cpu().numpy()
for u_input in item_seq:
node = np.unique(u_input)
items.append(node.tolist() + [0] * (max_n_node - len(node)))
u_A = np.zeros((max_n_node, max_n_node))
for i in np.arange(len(u_input) - 1):
if u_input[i + 1] == 0:
break
u = np.where(node == u_input[i])[0][0]
v = np.where(node == u_input[i + 1])[0][0]
u_A[u][v] = 1
u_sum_in = np.sum(u_A, 0)
u_sum_in[np.where(u_sum_in == 0)] = 1
u_A_in = np.divide(u_A, u_sum_in)
u_sum_out = np.sum(u_A, 1)
u_sum_out[np.where(u_sum_out == 0)] = 1
u_A_out = np.divide(u_A.transpose(), u_sum_out)
u_A = np.concatenate([u_A_in, u_A_out]).transpose()
A.append(u_A)
alias_inputs.append([np.where(node == i)[0][0] for i in u_input])
# The relative coordinates of the item node, shape of [batch_size, max_session_len]
alias_inputs = torch.LongTensor(alias_inputs).to(self.device)
# The connecting matrix, shape of [batch_size, max_session_len, 2 * max_session_len]
A = torch.FloatTensor(A).to(self.device)
# The unique item nodes, shape of [batch_size, max_session_len]
items = torch.LongTensor(items).to(self.device)
return alias_inputs, A, items
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
history = feed_dict['history_items'] # [batch_size, history_max]
lengths = feed_dict['lengths'] # [batch_size]
batch_size, seq_len = history.shape
valid_his = (history > 0).long()
alias_inputs, A, items = self._get_slice(history)
hidden = self.i_embeddings(items)
hidden = self.gnn(A, hidden)
alias_inputs = alias_inputs.unsqueeze(-1).expand(-1, -1, self.emb_size)
seq_hidden = torch.gather(hidden, dim=1, index=alias_inputs)
# fetch the last hidden state of last timestamp
ht = seq_hidden[torch.arange(batch_size), lengths - 1]
alpha = self.linear3((self.linear1(ht)[:, None, :] + self.linear2(seq_hidden)).sigmoid())
a = torch.sum(alpha * seq_hidden * valid_his[:, :, None].float(), 1)
his_vector = self.linear_transform(torch.cat([a, ht], dim=1))
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
return {'prediction': prediction.view(batch_size, -1)}
class GNN(nn.Module):
"""
Graph neural networks are well-suited for session-based recommendation,
because it can automatically extract features of session graphs with considerations of rich node connections.
"""
def __init__(self, embedding_size, step=1):
super(GNN, self).__init__()
self.step = step
self.embedding_size = embedding_size
self.input_size = embedding_size * 2
self.gate_size = embedding_size * 3
self.w_ih = Parameter(torch.Tensor(self.gate_size, self.input_size))
self.w_hh = Parameter(torch.Tensor(self.gate_size, self.embedding_size))
self.b_ih = Parameter(torch.Tensor(self.gate_size))
self.b_hh = Parameter(torch.Tensor(self.gate_size))
self.b_iah = Parameter(torch.Tensor(self.embedding_size))
self.b_ioh = Parameter(torch.Tensor(self.embedding_size))
self.linear_edge_in = nn.Linear(self.embedding_size, self.embedding_size, bias=True)
self.linear_edge_out = nn.Linear(self.embedding_size, self.embedding_size, bias=True)
def gnn_cell(self, A, hidden):
"""Obtain latent vectors of nodes via graph neural networks.
Args:
A(torch.FloatTensor):The connection matrix,shape of [batch_size, max_session_len, 2 * max_session_len]
hidden(torch.FloatTensor):The item node embedding matrix, shape of
[batch_size, max_session_len, embedding_size]
Returns:
torch.FloatTensor:Latent vectors of nodes,shape of [batch_size, max_session_len, embedding_size]
"""
input_in = torch.matmul(A[:, :, :A.size(1)], self.linear_edge_in(hidden)) + self.b_iah
input_out = torch.matmul(A[:, :, A.size(1): 2 * A.size(1)], self.linear_edge_out(hidden)) + self.b_ioh
# [batch_size, max_session_len, embedding_size * 2]
inputs = torch.cat([input_in, input_out], 2)
# gi.size equals to gh.size, shape of [batch_size, max_session_len, embdding_size * 3]
gi = F.linear(inputs, self.w_ih, self.b_ih)
gh = F.linear(hidden, self.w_hh, self.b_hh)
# (batch_size, max_session_len, embedding_size)
i_r, i_i, i_n = gi.chunk(3, 2)
h_r, h_i, h_n = gh.chunk(3, 2)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = (1 - inputgate) * hidden + inputgate * newgate
return hy
def forward(self, A, hidden):
for i in range(self.step):
hidden = self.gnn_cell(A, hidden)
return hidden
| 6,782 | 43.045455 | 114 | py |
ReChorus | ReChorus-master/src/models/developing/S3Rec.py | # -*- coding: UTF-8 -*-
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from models.BaseModel import SequentialModel
from utils import layers
class S3Rec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'mip_weight', 'sp_weight', 'mask_ratio', 'stage']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--mip_weight', type=float, default=0.2,
help='Coefficient of the MIP loss.')
parser.add_argument('--sp_weight', type=float, default=0.5,
help='Coefficient of the SP loss.')
parser.add_argument('--mask_ratio', type=float, default=0.2,
help='Proportion of masked positions in the sequence.')
parser.add_argument('--stage', type=int, default=1,
help='Stage of training: 1-pretrain, 2-finetune, default-from_scratch.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.mip_weight = args.mip_weight
self.sp_weight = args.sp_weight
self.mask_ratio = args.mask_ratio
self.stage = args.stage
self.max_his = args.history_max
self._define_params()
self.apply(self.init_weights)
# assert(self.stage in [1, 2])
self.pre_path = '../model/S3Rec/Pre__{}.pt'.format(corpus.dataset)
self.model_path = self.pre_path if self.stage == 1 else self.model_path
if self.stage == 2: # fine-tune
if os.path.exists(self.pre_path):
self.load_model(self.pre_path)
else:
logging.info('Train from scratch!')
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num + 1, self.emb_size, padding_idx=0)
self.encoder = BERT4RecEncoder(self.emb_size, self.max_his, num_layers=2, num_heads=2, dropout=0.2)
self.mip_norm = nn.Linear(self.emb_size, self.emb_size)
self.sp_norm = nn.Linear(self.emb_size, self.emb_size)
def _masked_item_prediction(self, seq_output, target_item_emb):
return (self.mip_norm(seq_output)[:, None, :] * target_item_emb).sum(-1).sigmoid().view(-1) # [B*L]
def _segment_prediction(self, context, segment_emb):
return (self.sp_norm(context) * segment_emb).sum(-1).sigmoid() # [B]
def forward(self, feed_dict):
self.check_list = []
if self.stage == 1 and feed_dict['phase'] == 'train':
mask_token = self.item_num
# MIP
mask_seq, seq_len = feed_dict['mask_seq'], feed_dict['seq_len']
seq_vectors = self.i_embeddings(mask_seq)
seq_output = self.encoder(seq_vectors, seq_len)
pos_vectors = self.i_embeddings(feed_dict['pos_item'])
neg_vectors = self.i_embeddings(feed_dict['neg_item'])
pos_score = self._masked_item_prediction(seq_output, pos_vectors)
neg_score = self._masked_item_prediction(seq_output, neg_vectors)
mip_distance = torch.sigmoid(pos_score - neg_score)
valid_mask = torch.arange(mask_seq.size(1)).to(self.device)[None, :] < seq_len[:, None]
mip_mask = (feed_dict['mask_seq'] == mask_token).float() * valid_mask.float()
# SP
seg_seq_vectors = self.i_embeddings(feed_dict['mask_seg_seq'])
pos_seg_vectors = self.i_embeddings(feed_dict['pos_seg'])
neg_seg_vectors = self.i_embeddings(feed_dict['neg_seg'])
segment_context = self.encoder(seg_seq_vectors, seq_len)
pos_segment_emb = self.encoder(pos_seg_vectors, seq_len)
neg_segment_emb = self.encoder(neg_seg_vectors, seq_len)
pos_segment_score = self._segment_prediction(segment_context, pos_segment_emb)
neg_segment_score = self._segment_prediction(segment_context, neg_segment_emb)
sp_distance = torch.sigmoid(pos_segment_score - neg_segment_score)
out_dict = {'mip_dis': mip_distance, 'mip_mask': mip_mask, 'sp_dis': sp_distance}
else:
i_ids = feed_dict['item_id'] # bsz, n_candidate
history = feed_dict['history_items'] # bsz, history_max
lengths = feed_dict['lengths'] # bsz
his_vectors = self.i_embeddings(history)
his_vector = self.encoder(his_vectors, lengths)
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
out_dict = {'prediction': prediction}
return out_dict
def loss(self, out_dict):
if self.stage == 1:
loss_fct = nn.BCELoss(reduction='none')
mip_dis, mip_mask = out_dict['mip_dis'], out_dict['mip_mask']
mip_loss = loss_fct(mip_dis, torch.ones_like(mip_dis, dtype=torch.float32))
mip_loss = torch.sum(mip_loss * mip_mask.flatten())
sp_dis = out_dict['sp_dis']
sp_loss = torch.sum(loss_fct(sp_dis, torch.ones_like(sp_dis, dtype=torch.float32)))
loss = self.mip_weight * mip_loss + self.sp_weight * sp_loss
else:
loss = super().loss(out_dict)
return loss
class Dataset(SequentialModel.Dataset):
def __init__(self, model, corpus, phase):
super().__init__(model, corpus, phase)
self.pre_train = self.model.stage == 1 and self.phase == 'train'
if self.pre_train:
self.long_seq = list()
item_seq, seq_len = list(), list()
for seq in self.corpus.user_his.values():
instance = [x[0] for x in seq]
self.long_seq.extend(instance)
for i in range((len(instance) - 1) // self.model.max_his + 1):
start = i * self.model.max_his
end = (i + 1) * self.model.max_his
trunc_instance = instance[start: end]
item_seq.append(trunc_instance)
seq_len.append(len(trunc_instance))
self.data = {'item_seq': item_seq, 'seq_len': seq_len}
def actions_before_epoch(self):
if self.model.stage != 1:
super().actions_before_epoch()
def _neg_sample(self, item_set):
item = np.random.randint(1, self.corpus.n_items)
while item in item_set:
item = np.random.randint(1, self.corpus.n_items)
return item
def _get_mask_seq(self, seq):
mask_token = self.model.item_num # 0 is reserved for padding
# MIP
mask_seq, pos_item, neg_item = seq.copy(), seq.copy(), seq.copy()
for idx, item in enumerate(seq):
prob = np.random.random()
if prob < self.model.mask_ratio:
mask_seq[idx] = mask_token
neg_item[idx] = self._neg_sample(seq)
# SP
if len(seq) < 2:
mask_seg_seq, pos_seg, neg_seg = seq.copy(), seq.copy(), seq.copy()
else:
sample_len = np.random.randint(1, len(seq) // 2 + 1)
start_id = np.random.randint(0, len(seq) - sample_len)
neg_start_id = np.random.randint(0, len(self.long_seq) - sample_len)
pos_segment = seq[start_id:start_id + sample_len]
neg_segment = self.long_seq[neg_start_id:neg_start_id + sample_len]
mask_seg_seq = seq[:start_id] + [mask_token] * sample_len + seq[start_id + sample_len:]
pos_seg = [mask_token] * start_id + pos_segment + [mask_token] * (len(seq) - (start_id + sample_len))
neg_seg = [mask_token] * start_id + neg_segment + [mask_token] * (len(seq) - (start_id + sample_len))
return mask_seq, pos_item, neg_item, mask_seg_seq, pos_seg, neg_seg
def _get_feed_dict(self, index):
if self.pre_train:
item_seq = self.data['item_seq'][index]
mask_seq, pos_item, neg_item, mask_seg_seq, pos_seg, neg_seg = self._get_mask_seq(item_seq)
feed_dict = {
'mask_seq': np.array(mask_seq),
'pos_item': np.array(pos_item),
'neg_item': np.array(neg_item),
'mask_seg_seq': np.array(mask_seg_seq),
'pos_seg': np.array(pos_seg),
'neg_seg': np.array(neg_seg),
'seq_len': self.data['seq_len'][index]
}
else:
feed_dict = super()._get_feed_dict(index)
return feed_dict
""" Encoder Layer """
class BERT4RecEncoder(nn.Module):
def __init__(self, emb_size, max_his, num_layers=2, num_heads=2, dropout=0.2):
super().__init__()
self.p_embeddings = nn.Embedding(max_his + 1, emb_size)
self.transformer_block = nn.ModuleList([
layers.TransformerLayer(d_model=emb_size, d_ff=emb_size, n_heads=num_heads)
for _ in range(num_layers)
])
self.layer_norm = nn.LayerNorm(emb_size)
self.dropout = nn.Dropout(dropout)
def forward(self, seq, lengths):
batch_size, seq_len = seq.size(0), seq.size(1)
len_range = torch.from_numpy(np.arange(seq_len)).to(seq.device)
valid_mask = len_range[None, :] < lengths[:, None]
# Position embedding
position = len_range[None, :] * valid_mask.long()
pos_vectors = self.p_embeddings(position)
seq = seq + pos_vectors
seq = self.dropout(self.layer_norm(seq))
# Self-attention
attn_mask = valid_mask.view(batch_size, 1, 1, seq_len)
for block in self.transformer_block:
seq = block(seq, attn_mask)
seq = seq * valid_mask[:, :, None].float()
his_vector = seq[torch.arange(batch_size), lengths - 1]
return his_vector
| 10,204 | 46.465116 | 117 | py |
ReChorus | ReChorus-master/src/models/developing/FourierTA.py | # -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import numpy as np
from utils import layers
from models.BaseModel import SequentialModel
from helpers.KDAReader import KDAReader
class FourierTA(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['t_scalar']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--t_scalar', type=int, default=60,
help='Time interval scalar.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.freq_dim = args.emb_size
self.emb_size = args.emb_size
self.t_scalar = args.t_scalar
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.user_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.item_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.fourier_attn = FourierTemporalAttention(self.emb_size, self.freq_dim, self.device)
self.W1 = nn.Linear(self.emb_size, self.emb_size)
self.W2 = nn.Linear(self.emb_size, self.emb_size)
self.dropout_layer = nn.Dropout(self.dropout)
self.layer_norm = nn.LayerNorm(self.emb_size)
self.item_bias = nn.Embedding(self.item_num, 1)
def forward(self, feed_dict):
self.check_list = []
u_ids = feed_dict['user_id'] # B
i_ids = feed_dict['item_id'] # B * -1
history = feed_dict['history_items'] # B * H
delta_t_n = feed_dict['history_delta_t'].float() # B * H
batch_size, seq_len = history.shape
u_vectors = self.user_embeddings(u_ids)
i_vectors = self.item_embeddings(i_ids)
his_vectors = self.item_embeddings(history) # B * H * V
valid_mask = (history > 0).view(batch_size, 1, seq_len)
context = self.fourier_attn(his_vectors, delta_t_n, i_vectors, valid_mask) # B * -1 * V
residual = context
# feed forward
context = self.W1(context)
context = self.W2(context.relu())
# dropout, residual and layer_norm
context = self.dropout_layer(context)
context = self.layer_norm(residual + context)
# context = self.layer_norm(context)
i_bias = self.item_bias(i_ids).squeeze(-1)
prediction = ((u_vectors[:, None, :] + context) * i_vectors).sum(dim=-1)
prediction = prediction + i_bias
out_dict = {'prediction': prediction}
return out_dict
class Dataset(SequentialModel.Dataset):
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
delta_t = self.data['time'][index] - feed_dict['history_times']
feed_dict['history_delta_t'] = KDAReader.norm_time(delta_t, self.model.t_scalar)
return feed_dict
class FourierTemporalAttention(nn.Module):
def __init__(self, emb_size: int, freq_dim: int, device):
super().__init__()
self.d = emb_size
self.d_f = freq_dim
self.freq_real = nn.Parameter(torch.zeros(self.d_f))
self.freq_imag = nn.Parameter(torch.zeros(self.d_f))
self.A = nn.Linear(self.d, 10)
self.A_out = nn.Linear(10, 1, bias=False)
nn.init.normal_(self.freq_real.data, mean=0.0, std=0.01)
nn.init.normal_(self.freq_imag.data, mean=0.0, std=0.01)
freq = np.linspace(0, 1, self.d_f) / 2.
self.freqs = torch.from_numpy(np.concatenate((freq, -freq))).to(device).float()
def idft_decay(self, delta_t):
# create conjugate symmetric to ensure real number output
x_real = torch.cat([self.freq_real, self.freq_real], dim=-1)
x_imag = torch.cat([self.freq_imag, -self.freq_imag], dim=-1)
w = 2. * np.pi * self.freqs * delta_t.unsqueeze(-1) # B * H * n_freq
real_part = w.cos() * x_real[None, None, :] # B * H * n_freq
imag_part = w.sin() * x_imag[None, None, :]
decay = (real_part - imag_part).mean(dim=-1) / 2. # B * H
return decay.clamp(0, 1).float()
def forward(self, seq, delta_t_n, target, valid_mask):
query_vector = seq[:, None, :, :] * target[:, :, None, :]
attention = self.A_out(self.A(query_vector).tanh()).squeeze(-1) # B * -1 * H
# attention = torch.matmul(target, seq.transpose(-2, -1)) / self.d ** 0.5 # B * -1 * H
# shift masked softmax
attention = attention - attention.max()
attention = attention.masked_fill(valid_mask==0, -np.inf).softmax(dim=-1)
# temporal evolution
decay = self.idft_decay(delta_t_n).unsqueeze(1).masked_fill(valid_mask==0, 0.) # B * 1 * H
attention = attention * decay
# attentional aggregation of history items
context = torch.matmul(attention, seq) # B * -1 * V
return context
| 5,011 | 40.421488 | 99 | py |
ReChorus | ReChorus-master/src/models/developing/CLRec.py | # -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from models.BaseModel import SequentialModel
from utils import layers
class CLRec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['batch_size', 'temp']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--temp', type=float, default=0.2,
help='Temperature in contrastive loss.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.temp = args.temp
self.max_his = args.history_max
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.encoder = BERT4RecEncoder(self.emb_size, self.max_his, num_layers=2, num_heads=2)
self.contra_loss = ContraLoss(temperature=self.temp)
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # bsz, n_candidate
history = feed_dict['history_items'] # bsz, history_max
lengths = feed_dict['lengths'] # bsz
his_vectors = self.i_embeddings(history)
his_vector = self.encoder(his_vectors, lengths)
i_vectors = self.i_embeddings(i_ids)
# his_vector = F.normalize(his_vector, dim=-1)
# i_vectors = F.normalize(i_vectors, dim=-1)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
out_dict = {'prediction': prediction}
if feed_dict['phase'] == 'train':
target_vector = i_vectors[:, 0, :]
features = torch.stack([his_vector, target_vector], dim=1) # bsz, 2, emb
features = F.normalize(features, dim=-1)
out_dict['features'] = features # bsz, 2, emb
return out_dict
def loss(self, out_dict):
return self.contra_loss(out_dict['features'])
class Dataset(SequentialModel.Dataset):
# No need to sample negative items
def actions_before_epoch(self):
self.data['neg_items'] = [[] for _ in range(len(self))]
""" Contrastive Loss """
class ContraLoss(nn.Module):
def __init__(self, temperature=0.2):
super(ContraLoss, self).__init__()
self.temperature = temperature
def forward(self, features, mask=None):
"""
Args:
features: hidden vector of shape [bsz, n_views, ...].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sequence j
has the same target item as sequence i. Can be asymmetric.
Returns:
A loss scalar.
"""
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size, device = features.shape[0], features.device
if mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
# compute logits
dot_contrast = torch.matmul(features[:, 0], features[:, 1].transpose(0, 1)) / self.temperature
# for numerical stability
logits_max, _ = torch.max(dot_contrast, dim=1, keepdim=True)
logits = dot_contrast - logits_max.detach() # bsz, bsz
# compute log_prob
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-10)
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1)
return -mean_log_prob_pos.mean()
""" Encoder Layer """
class BERT4RecEncoder(nn.Module):
def __init__(self, emb_size, max_his, num_layers=2, num_heads=2):
super().__init__()
self.p_embeddings = nn.Embedding(max_his + 1, emb_size)
self.transformer_block = nn.ModuleList([
layers.TransformerLayer(d_model=emb_size, d_ff=emb_size, n_heads=num_heads)
for _ in range(num_layers)
])
def forward(self, seq, lengths):
batch_size, seq_len = seq.size(0), seq.size(1)
len_range = torch.from_numpy(np.arange(seq_len)).to(seq.device)
valid_mask = len_range[None, :] < lengths[:, None]
# Position embedding
position = len_range[None, :] * valid_mask.long()
pos_vectors = self.p_embeddings(position)
seq = seq + pos_vectors
# Self-attention
attn_mask = valid_mask.view(batch_size, 1, 1, seq_len)
for block in self.transformer_block:
seq = block(seq, attn_mask)
seq = seq * valid_mask[:, :, None].float()
his_vector = seq[torch.arange(batch_size), lengths - 1]
return his_vector
| 5,081 | 35.826087 | 102 | py |
ReChorus | ReChorus-master/src/models/sequential/FPMC.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" FPMC
Reference:
"Factorizing Personalized Markov Chains for Next-Basket Recommendation"
Rendle et al., WWW'2010.
CMD example:
python main.py --model_name FPMC --emb_size 64 --lr 1e-3 --l2 1e-6 --history_max 20 \
--dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import numpy as np
from models.BaseModel import SequentialModel
class FPMC(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.ui_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.iu_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.li_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.il_embeddings = nn.Embedding(self.item_num, self.emb_size)
def forward(self, feed_dict):
self.check_list = []
u_id = feed_dict['user_id'] # [batch_size]
i_ids = feed_dict['item_id'] # [batch_size, -1]
li_id = feed_dict['last_item_id'] # [batch_size]
ui_vectors = self.ui_embeddings(u_id)
iu_vectors = self.iu_embeddings(i_ids)
li_vectors = self.li_embeddings(li_id)
il_vectors = self.il_embeddings(i_ids)
prediction = (ui_vectors[:, None, :] * iu_vectors).sum(-1) + (li_vectors[:, None, :] * il_vectors).sum(-1)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
class Dataset(SequentialModel.Dataset):
def _get_feed_dict(self, index):
user_id, target_item = self.data['user_id'][index], self.data['item_id'][index]
if self.phase != 'train' and self.model.test_all:
neg_items = np.arange(1, self.corpus.n_items)
else:
neg_items = self.data['neg_items'][index]
item_ids = np.concatenate([[target_item], neg_items]).astype(int)
pos = self.data['position'][index]
last_item_id = self.corpus.user_his[user_id][pos - 1][0]
feed_dict = {
'user_id': user_id,
'item_id': item_ids,
'last_item_id': last_item_id
}
return feed_dict
| 2,684 | 35.283784 | 114 | py |
ReChorus | ReChorus-master/src/models/sequential/SASRec.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" SASRec
Reference:
"Self-attentive Sequential Recommendation"
Kang et al., IEEE'2018.
Note:
When incorporating position embedding, we make the position index start from the most recent interaction.
CMD example:
python main.py --model_name SASRec --emb_size 64 --num_layers 1 --num_heads 1 --lr 1e-4 --l2 1e-6 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import numpy as np
from models.BaseModel import SequentialModel
from utils import layers
class SASRec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'num_layers', 'num_heads']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--num_layers', type=int, default=1,
help='Number of self-attention layers.')
parser.add_argument('--num_heads', type=int, default=4,
help='Number of attention heads.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.max_his = args.history_max
self.num_layers = args.num_layers
self.num_heads = args.num_heads
self.len_range = torch.from_numpy(np.arange(self.max_his)).to(self.device)
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.p_embeddings = nn.Embedding(self.max_his + 1, self.emb_size)
self.transformer_block = nn.ModuleList([
layers.TransformerLayer(d_model=self.emb_size, d_ff=self.emb_size, n_heads=self.num_heads,
dropout=self.dropout, kq_same=False)
for _ in range(self.num_layers)
])
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
history = feed_dict['history_items'] # [batch_size, history_max]
lengths = feed_dict['lengths'] # [batch_size]
batch_size, seq_len = history.shape
valid_his = (history > 0).long()
his_vectors = self.i_embeddings(history)
# Position embedding
# lengths: [4, 2, 5]
# position: [[4, 3, 2, 1, 0], [2, 1, 0, 0, 0], [5, 4, 3, 2, 1]]
position = (lengths[:, None] - self.len_range[None, :seq_len]) * valid_his
pos_vectors = self.p_embeddings(position)
his_vectors = his_vectors + pos_vectors
# Self-attention
causality_mask = np.tril(np.ones((1, 1, seq_len, seq_len), dtype=np.int))
attn_mask = torch.from_numpy(causality_mask).to(self.device)
# attn_mask = valid_his.view(batch_size, 1, 1, seq_len)
for block in self.transformer_block:
his_vectors = block(his_vectors, attn_mask)
his_vectors = his_vectors * valid_his[:, :, None].float()
his_vector = his_vectors[torch.arange(batch_size), lengths - 1, :]
# his_vector = his_vectors.sum(1) / lengths[:, None].float()
# ↑ average pooling is shown to be more effective than the most recent embedding
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
return {'prediction': prediction.view(batch_size, -1)}
| 3,606 | 38.637363 | 109 | py |
ReChorus | ReChorus-master/src/models/sequential/Caser.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" Caser
Reference:
"Personalized Top-N Sequential Recommendation via Convolutional Sequence Embedding"
Jiaxi Tang et al., WSDM'2018.
Reference code:
https://github.com/graytowne/caser_pytorch
Note:
We use a maximum of L (instead of history_max) horizontal filters to prevent excessive CNN layers.
Besides, to keep consistent with other sequential models, we do not use the sliding window to generate
training instances in the paper, and set the parameter T as 1.
CMD example:
python main.py --model_name Caser --emb_size 64 --L 5 --num_horizon 64 --num_vertical 32 --lr 1e-3 --l2 1e-4 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
from torch import nn
import torch.nn.functional as F
from models.BaseModel import SequentialModel
class Caser(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'num_horizon', 'num_vertical', 'L']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--num_horizon', type=int, default=16,
help='Number of horizon convolution kernels.')
parser.add_argument('--num_vertical', type=int, default=8,
help='Number of vertical convolution kernels.')
parser.add_argument('--L', type=int, default=4,
help='Union window size.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.max_his = args.history_max
self.num_horizon = args.num_horizon
self.num_vertical = args.num_vertical
self.l = args.L
assert self.l <= self.max_his # use L instead of max_his to avoid excessive conv_h
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size, padding_idx=0)
lengths = [i + 1 for i in range(self.l)]
self.conv_h = nn.ModuleList(
[nn.Conv2d(in_channels=1, out_channels=self.num_horizon, kernel_size=(i, self.emb_size)) for i in lengths])
self.conv_v = nn.Conv2d(in_channels=1, out_channels=self.num_vertical, kernel_size=(self.max_his, 1))
self.fc_dim_h = self.num_horizon * len(lengths)
self.fc_dim_v = self.num_vertical * self.emb_size
fc_dim_in = self.fc_dim_v + self.fc_dim_h
self.fc = nn.Linear(fc_dim_in, self.emb_size)
self.out = nn.Linear(self.emb_size * 2, self.emb_size)
def forward(self, feed_dict):
self.check_list = []
u_ids = feed_dict['user_id']
i_ids = feed_dict['item_id'] # [batch_size, -1]
history = feed_dict['history_items'] # [batch_size, history_max]
batch_size, seq_len = history.shape
pad_len = self.max_his - seq_len
history = F.pad(history, [0, pad_len])
his_vectors = self.i_embeddings(history).unsqueeze(1) # [batch_size, 1, history_max, emb_size]
# Convolution Layers
out, out_h, out_v = None, None, None
# vertical conv layer
if self.num_vertical > 0:
out_v = self.conv_v(his_vectors)
out_v = out_v.view(-1, self.fc_dim_v) # prepare for fully connect
# horizontal conv layer
out_hs = list()
if self.num_horizon > 0:
for conv in self.conv_h:
conv_out = conv(his_vectors).squeeze(3).relu()
pool_out = F.max_pool1d(conv_out, conv_out.size(2)).squeeze(2)
out_hs.append(pool_out)
out_h = torch.cat(out_hs, 1) # prepare for fully connect
# Fully-connected Layers
user_vector = self.u_embeddings(u_ids)
z = self.fc(torch.cat([out_v, out_h], 1)).relu()
his_vector = self.out(torch.cat([z, user_vector], 1))
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
return {'prediction': prediction.view(batch_size, -1)} | 4,363 | 41.368932 | 119 | py |
ReChorus | ReChorus-master/src/models/sequential/SLRCPlus.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" SLRC+
Reference:
"Modeling Item-specific Temporal Dynamics of Repeat Consumption for Recommender Systems"
Chenyang Wang et al., TheWebConf'2019.
Reference code:
The authors' tensorflow implementation https://github.com/THUwangcy/SLRC
Note:
We generalize the original SLRC by also including mutual-excitation of relational history interactions.
This makes SLRC+ a knowledge-aware model, and the original SLRC can be seen that there is only one special
relation between items and themselves (i.e., repeat consumption).
CMD example:
python main.py --model_name SLRCPlus --emb_size 64 --lr 5e-4 --l2 1e-5 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import torch.distributions
import numpy as np
from models.BaseModel import SequentialModel
from helpers.KGReader import KGReader
class SLRCPlus(SequentialModel):
reader = 'KGReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--time_scalar', type=int, default=60 * 60 * 24 * 100,
help='Time scalar for time intervals.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus: KGReader):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.time_scalar = args.time_scalar
self.relation_num = len(corpus.item_relations) + 1
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.user_bias = nn.Embedding(self.user_num, 1)
self.item_bias = nn.Embedding(self.item_num, 1)
self.global_alpha = nn.Parameter(torch.tensor(0.))
self.alphas = nn.Embedding(self.item_num, self.relation_num)
self.pis = nn.Embedding(self.item_num, self.relation_num)
self.betas = nn.Embedding(self.item_num, self.relation_num)
self.sigmas = nn.Embedding(self.item_num, self.relation_num)
self.mus = nn.Embedding(self.item_num, self.relation_num)
def forward(self, feed_dict):
self.check_list = []
u_ids = feed_dict['user_id'] # [batch_size]
i_ids = feed_dict['item_id'] # [batch_size, -1]
r_intervals = feed_dict['relational_interval'] # [batch_size, -1, relation_num]
# Excitation
alphas = self.global_alpha + self.alphas(i_ids)
pis, mus = self.pis(i_ids) + 0.5, self.mus(i_ids) + 1
betas = (self.betas(i_ids) + 1).clamp(min=1e-10, max=10)
sigmas = (self.sigmas(i_ids) + 1).clamp(min=1e-10, max=10)
mask = (r_intervals >= 0).float()
delta_t = r_intervals * mask
norm_dist = torch.distributions.normal.Normal(mus, sigmas)
exp_dist = torch.distributions.exponential.Exponential(betas, validate_args=False)
decay = pis * exp_dist.log_prob(delta_t).exp() + (1 - pis) * norm_dist.log_prob(delta_t).exp()
excitation = (alphas * decay * mask).sum(-1) # [batch_size, -1]
# Base Intensity (MF)
u_bias = self.user_bias(u_ids)
i_bias = self.item_bias(i_ids).squeeze(-1)
cf_u_vectors = self.u_embeddings(u_ids)
cf_i_vectors = self.i_embeddings(i_ids)
base_intensity = (cf_u_vectors[:, None, :] * cf_i_vectors).sum(-1)
base_intensity = base_intensity + u_bias + i_bias
prediction = base_intensity + excitation
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
class Dataset(SequentialModel.Dataset):
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
user_id, time = self.data['user_id'][index], self.data['time'][index]
history_item, history_time = feed_dict['history_items'], feed_dict['history_times']
# Collect time information related to the target item:
# - re-consuming time gaps
# - time intervals w.r.t. recent relational interactions
relational_interval = list()
for i, target_item in enumerate(feed_dict['item_id']):
interval = np.ones(self.model.relation_num, dtype=float) * -1 # -1 if not existing
# the first dimension for re-consuming time gaps
for j in range(len(history_item))[::-1]:
if history_item[j] == target_item:
interval[0] = (time - history_time[j]) / self.model.time_scalar
break
# the rest for relational time intervals
for r_idx in range(1, self.model.relation_num):
for j in range(len(history_item))[::-1]:
if (history_item[j], r_idx, target_item) in self.corpus.triplet_set:
interval[r_idx] = (time - history_time[j]) / self.model.time_scalar
break
relational_interval.append(interval)
feed_dict['relational_interval'] = np.array(relational_interval, dtype=np.float32)
return feed_dict
| 5,414 | 45.282051 | 111 | py |
ReChorus | ReChorus-master/src/models/sequential/NARM.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" NARM
Reference:
"Neural Attentive Session-based Recommendation"
Jing Li et al., CIKM'2017.
CMD example:
python main.py --model_name NARM --emb_size 64 --hidden_size 100 --attention_size 4 --lr 1e-3 --l2 1e-4 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
from models.BaseModel import SequentialModel
class NARM(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'hidden_size', 'attention_size']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--hidden_size', type=int, default=100,
help='Size of hidden vectors in GRU.')
parser.add_argument('--attention_size', type=int, default=50,
help='Size of attention hidden space.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.hidden_size = args.hidden_size
self.attention_size = args.attention_size
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.encoder_g = nn.GRU(input_size=self.emb_size, hidden_size=self.hidden_size, batch_first=True)
self.encoder_l = nn.GRU(input_size=self.emb_size, hidden_size=self.hidden_size, batch_first=True)
self.A1 = nn.Linear(self.hidden_size, self.attention_size, bias=False)
self.A2 = nn.Linear(self.hidden_size, self.attention_size, bias=False)
self.attention_out = nn.Linear(self.attention_size, 1, bias=False)
self.out = nn.Linear(2 * self.hidden_size, self.emb_size, bias=False)
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
history = feed_dict['history_items'] # [batch_size, history_max]
lengths = feed_dict['lengths'] # [batch_size]
# Embedding Layer
i_vectors = self.i_embeddings(i_ids)
his_vectors = self.i_embeddings(history)
# Encoding Layer
sort_his_lengths, sort_idx = torch.topk(lengths, k=len(lengths))
sort_his_vectors = his_vectors.index_select(dim=0, index=sort_idx)
history_packed = nn.utils.rnn.pack_padded_sequence(sort_his_vectors, sort_his_lengths.cpu(), batch_first=True)
_, hidden_g = self.encoder_g(history_packed, None)
output_l, hidden_l = self.encoder_l(history_packed, None)
output_l, _ = torch.nn.utils.rnn.pad_packed_sequence(output_l, batch_first=True)
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
output_l = output_l.index_select(dim=0, index=unsort_idx) # [batch_size, history_max, emb_size]
hidden_g = hidden_g[-1].index_select(dim=0, index=unsort_idx) # [batch_size, emb_size]
# Attention Layer
attention_g = self.A1(hidden_g)
attention_l = self.A2(output_l)
attention_value = self.attention_out((attention_g[:, None, :] + attention_l).sigmoid())
mask = (history > 0).unsqueeze(-1)
attention_value = attention_value.masked_fill(mask == 0, 0)
c_l = (attention_value * output_l).sum(1)
# Prediction Layer
pred_vector = self.out(torch.cat((hidden_g, c_l), dim=1))
prediction = (pred_vector[:, None, :] * i_vectors).sum(dim=-1)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
| 3,776 | 43.435294 | 118 | py |
ReChorus | ReChorus-master/src/models/sequential/Chorus.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" Chorus
Reference:
"Make It a Chorus: Knowledge- and Time-aware Item Modeling for Sequential Recommendation"
Chenyang Wang et al., SIGIR'2020.
CMD example:
python main.py --model_name Chorus --emb_size 64 --margin 1 --lr 5e-4 --l2 1e-5 --epoch 50 --early_stop 0 \
--batch_size 512 --dataset 'Grocery_and_Gourmet_Food' --stage 1
python main.py --model_name Chorus --emb_size 64 --margin 1 --lr_scale 0.1 --lr 1e-3 --l2 0 \
--dataset 'Grocery_and_Gourmet_Food' --base_method 'BPR' --stage 2
"""
import os
import torch
import torch.nn as nn
import torch.distributions
import numpy as np
from utils import utils
from models.BaseModel import SequentialModel
class Chorus(SequentialModel):
reader = 'KGReader'
runner = 'BaseRunner'
extra_log_args = ['margin', 'lr_scale', 'stage']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--stage', type=int, default=2,
help='Stage of training: 1-KG_pretrain, 2-recommendation.')
parser.add_argument('--base_method', type=str, default='BPR',
help='Basic method to generate recommendations: BPR, GMF')
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--time_scalar', type=int, default=60 * 60 * 24 * 100,
help='Time scalar for time intervals.')
parser.add_argument('--category_col', type=str, default='i_category',
help='The name of category column in item_meta.csv.')
parser.add_argument('--lr_scale', type=float, default=0.1,
help='Scale the lr for parameters in pre-trained KG model.')
parser.add_argument('--margin', type=float, default=1,
help='Margin in hinge loss.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.margin = args.margin
self.stage = args.stage
self.kg_lr = args.lr_scale * args.lr
self.base_method = args.base_method
self.emb_size = args.emb_size
self.time_scalar = args.time_scalar
self.relations = corpus.item_relations
self.relation_num = len(corpus.item_relations) + 1
if args.category_col in corpus.item_meta_df.columns:
self.category_col = args.category_col
self.category_num = corpus.item_meta_df[self.category_col].max() + 1
else:
self.category_col, self.category_num = None, 1 # a virtual global category
self._define_params()
self.apply(self.init_weights)
assert self.stage in [1, 2]
self.pretrain_path = '../model/Chorus/KG__{}__emb_size={}__margin={}.pt' \
.format(corpus.dataset, self.emb_size, self.margin)
if self.stage == 1:
self.model_path = self.pretrain_path
if self.stage == 2:
if os.path.exists(self.pretrain_path):
self.load_model(self.pretrain_path)
else:
raise ValueError('Pre-trained KG model does not exist, please run with "--stage 1"')
self.relation_range = torch.from_numpy(np.arange(self.relation_num)).to(self.device)
def _define_params(self):
self.u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.r_embeddings = nn.Embedding(self.relation_num, self.emb_size)
self.betas = nn.Embedding(self.category_num, self.relation_num)
self.mus = nn.Embedding(self.category_num, self.relation_num)
self.sigmas = nn.Embedding(self.category_num, self.relation_num)
self.prediction = nn.Linear(self.emb_size, 1, bias=False)
self.user_bias = nn.Embedding(self.user_num, 1)
self.item_bias = nn.Embedding(self.item_num, 1)
self.kg_loss = nn.MarginRankingLoss(margin=self.margin)
def forward(self, feed_dict):
self.check_list = []
if self.stage == 1 and feed_dict['phase'] == 'train':
prediction = self.kg_forward(feed_dict)
else:
prediction = self.rec_forward(feed_dict)
return {'prediction': prediction}
def kernel_functions(self, r_interval, betas, sigmas, mus):
"""
Define kernel function for each relation (exponential distribution by default)
:return [batch_size, -1, relation_num]
"""
decay_lst = list()
for r_idx in range(0, self.relation_num):
delta_t = r_interval[:, :, r_idx]
beta, sigma, mu = betas[:, :, r_idx], sigmas[:, :, r_idx], mus[:, :, r_idx]
if r_idx > 0 and 'complement' in self.relations[r_idx - 1]:
norm_dist = torch.distributions.normal.Normal(0, beta)
decay = norm_dist.log_prob(delta_t).exp()
elif r_idx > 0 and 'substitute' in self.relations[r_idx - 1]:
neg_norm_dist = torch.distributions.normal.Normal(0, beta)
norm_dist = torch.distributions.normal.Normal(mu, sigma)
decay = -neg_norm_dist.log_prob(delta_t).exp() + norm_dist.log_prob(delta_t).exp()
else: # exponential by default
exp_dist = torch.distributions.exponential.Exponential(beta, validate_args=False)
decay = exp_dist.log_prob(delta_t).exp()
decay_lst.append(decay.clamp(-1, 1))
return torch.stack(decay_lst, dim=2)
def rec_forward(self, feed_dict):
u_ids = feed_dict['user_id'] # [batch_size]
i_ids = feed_dict['item_id'] # [batch_size, -1]
c_ids = feed_dict['category_id'] # [batch_size, -1]
r_interval = feed_dict['relational_interval'] # [batch_size, -1, relation_num]
u_vectors = self.u_embeddings(u_ids)
i_vectors = self.i_embeddings(i_ids)
# Temporal Kernel Function
betas = (self.betas(c_ids) + 1).clamp(min=1e-10, max=10)
sigmas = (self.sigmas(c_ids) + 1).clamp(min=1e-10, max=10)
mus = self.mus(c_ids) + 1
mask = (r_interval >= 0).float() # mask positions where there is no corresponding relational history
temporal_decay = self.kernel_functions(r_interval * mask, betas, sigmas, mus)
temporal_decay = temporal_decay * mask # [batch_size, -1, relation_num]
# Dynamic Integrations
r_vectors = self.r_embeddings(self.relation_range)
ri_vectors = i_vectors[:, :, None, :] + r_vectors[None, None, :, :] # [batch_size, -1, relation_num, emb_size]
chorus_vectors = i_vectors + (temporal_decay[:, :, :, None] * ri_vectors).sum(2) # [batch_size, -1, emb_size]
# Prediction
if self.base_method.upper().strip() == 'GMF':
mf_vector = u_vectors[:, None, :] * chorus_vectors
prediction = self.prediction(mf_vector)
else:
u_bias = self.user_bias(u_ids)
i_bias = self.item_bias(i_ids).squeeze(-1)
prediction = (u_vectors[:, None, :] * chorus_vectors).sum(-1)
prediction = prediction + u_bias + i_bias
return prediction.view(feed_dict['batch_size'], -1)
def kg_forward(self, feed_dict):
head_ids = feed_dict['head_id'] # [batch_size, 4]
tail_ids = feed_dict['tail_id'] # [batch_size, 4]
relation_ids = feed_dict['relation_id'] # [batch_size, 4]
head_vectors = self.i_embeddings(head_ids)
tail_vectors = self.i_embeddings(tail_ids)
relation_vectors = self.r_embeddings(relation_ids)
# TransE
prediction = -((head_vectors + relation_vectors - tail_vectors) ** 2).sum(-1)
return prediction
def loss(self, out_dict):
if self.stage == 1:
predictions = out_dict['prediction']
batch_size = predictions.shape[0]
pos_pred, neg_pred = predictions[:, :2].flatten(), predictions[:, 2:].flatten()
target = torch.from_numpy(np.ones(batch_size * 2, dtype=np.float32)).to(self.device)
loss = self.kg_loss(pos_pred, neg_pred, target)
else:
loss = super().loss(out_dict)
return loss
def customize_parameters(self):
if self.stage == 2:
weight_p, kg_p, bias_p = [], [], []
for name, p in filter(lambda x: x[1].requires_grad, self.named_parameters()):
if 'bias' in name:
bias_p.append(p)
elif 'i_embeddings' in name or 'r_embeddings' in name:
kg_p.append(p)
else:
weight_p.append(p)
optimize_dict = [
{'params': weight_p},
{'params': kg_p, 'lr': self.kg_lr}, # scale down the lr of pretrained embeddings
{'params': bias_p, 'weight_decay': 0.0}
]
return optimize_dict
else:
return super().customize_parameters()
class Dataset(SequentialModel.Dataset):
def __init__(self, model, corpus, phase):
super().__init__(model, corpus, phase)
self.kg_train = self.model.stage == 1 and self.phase == 'train'
if self.kg_train:
self.data = utils.df_to_dict(self.corpus.relation_df)
self.neg_heads = np.zeros(len(self), dtype=int)
self.neg_tails = np.zeros(len(self), dtype=int)
else:
col_name = self.model.category_col
items = self.corpus.item_meta_df['item_id']
categories = self.corpus.item_meta_df[col_name] if col_name is not None else np.zeros_like(items)
self.item2cate = dict(zip(items, categories))
def _get_feed_dict(self, index):
if self.kg_train:
head, tail = self.data['head'][index], self.data['tail'][index]
relation = self.data['relation'][index]
head_id = np.array([head, head, head, self.neg_heads[index]])
tail_id = np.array([tail, tail, self.neg_tails[index], tail])
relation_id = np.array([relation] * 4)
feed_dict = {'head_id': tail_id, 'tail_id': head_id, 'relation_id': relation_id}
# ↑ the head and tail are reversed due to the relations we want are is_complement_of, is_substitute_of,
# which are opposite to the original relations also_buy, also_view
else:
# Collect information related to the target item:
# - category id
# - time intervals w.r.t. recent relational interactions (-1 if not existing)
feed_dict = super()._get_feed_dict(index)
user_id, time = self.data['user_id'][index], self.data['time'][index]
history_item, history_time = feed_dict['history_items'], feed_dict['history_times']
category_id = [self.item2cate[x] for x in feed_dict['item_id']]
relational_interval = list()
for i, target_item in enumerate(feed_dict['item_id']):
interval = np.ones(self.model.relation_num, dtype=float) * -1
# relational intervals
for r_idx in range(1, self.model.relation_num):
for j in range(len(history_item))[::-1]:
if (history_item[j], r_idx, target_item) in self.corpus.triplet_set:
interval[r_idx] = (time - history_time[j]) / self.model.time_scalar
break
relational_interval.append(interval)
feed_dict['category_id'] = np.array(category_id)
feed_dict['relational_interval'] = np.array(relational_interval, dtype=np.float32)
return feed_dict
def actions_before_epoch(self):
if self.kg_train: # sample negative heads and tails for the KG embedding task
for i in range(len(self)):
head, tail, relation = self.data['head'][i], self.data['tail'][i], self.data['relation'][i]
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
while (head, relation, self.neg_tails[i]) in self.corpus.triplet_set:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while (self.neg_heads[i], relation, tail) in self.corpus.triplet_set:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
else:
super().actions_before_epoch()
| 12,854 | 49.214844 | 119 | py |
ReChorus | ReChorus-master/src/models/sequential/ContraKDA.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" ContraKDA (KDA + ContraRec)
Reference:
"Toward Dynamic User Intention: Temporal Evolutionary Effects of Item Relations in Sequential Recommendation"
Chenyang Wang et al., TOIS'2021.
Sequential Recommendation with Multiple Contrast Signals"
Wang et al., TOIS'2022.
CMD example:
python main.py --model_name ContraKDA --emb_size 64 --include_attr 0 --freq_rand 0 --lr 1e-3 --l2 1e-6 \
--num_heads 4 --history_max 20 --contra_gamma 0.1 --ccc_temp 0.2 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
from utils import layers
from models.BaseModel import SequentialModel
from helpers.KDAReader import KDAReader
class ContraKDA(SequentialModel):
reader = 'KDAReader'
runner = 'BaseRunner'
extra_log_args = ['num_layers', 'num_heads', 'gamma', 'freq_rand', 'include_val', 'contra_gamma', 'ccc_temp']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--neg_head_p', type=float, default=0.5,
help='The probability of sampling negative head entity.')
parser.add_argument('--num_layers', type=int, default=1,
help='Number of self-attention layers.')
parser.add_argument('--num_heads', type=int, default=1,
help='Number of attention heads.')
parser.add_argument('--gamma', type=float, default=-1,
help='Coefficient of KG loss (-1 for auto-determine).')
parser.add_argument('--attention_size', type=int, default=10,
help='Size of attention hidden space.')
parser.add_argument('--pooling', type=str, default='average',
help='Method of pooling relational history embeddings: average, max, attention')
parser.add_argument('--include_val', type=int, default=1,
help='Whether include relation value in the relation representation')
parser.add_argument('--contra_gamma', type=float, default=0.1,
help='Coefficient of the contrastive loss.')
parser.add_argument('--ccc_temp', type=float, default=0.2,
help='Temperature in context-target contrastive loss.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.relation_num = corpus.n_relations
self.entity_num = corpus.n_entities
self.freq_x = corpus.freq_x
self.freq_dim = args.n_dft // 2 + 1
self.freq_rand = args.freq_rand
self.emb_size = args.emb_size
self.neg_head_p = args.neg_head_p
self.layer_num = args.num_layers
self.head_num = args.num_heads
self.attention_size = args.attention_size
self.pooling = args.pooling.lower()
self.include_val = args.include_val
self.gamma = args.gamma
if self.gamma < 0:
self.gamma = len(corpus.relation_df) / len(corpus.all_df)
self.contra_gamma = args.contra_gamma
self.ccc_temp = args.ccc_temp
self._define_params()
self.apply(self.init_weights)
if not self.freq_rand:
dft_freq_real = torch.tensor(np.real(self.freq_x)) # R * n_freq
dft_freq_imag = torch.tensor(np.imag(self.freq_x))
self.relational_dynamic_aggregation.freq_real.weight.data.copy_(dft_freq_real)
self.relational_dynamic_aggregation.freq_imag.weight.data.copy_(dft_freq_imag)
def _define_params(self):
self.user_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.entity_embeddings = nn.Embedding(self.entity_num + 1, self.emb_size)
self.relation_embeddings = nn.Embedding(self.relation_num, self.emb_size)
# First-level aggregation
self.relational_dynamic_aggregation = RelationalDynamicAggregation(
self.relation_num, self.freq_dim, self.relation_embeddings, self.include_val, self.device
)
# Second-level aggregation
self.attn_head = layers.MultiHeadAttention(self.emb_size, self.head_num, bias=False)
self.W1 = nn.Linear(self.emb_size, self.emb_size)
self.W2 = nn.Linear(self.emb_size, self.emb_size)
self.dropout_layer = nn.Dropout(self.dropout)
self.layer_norm = nn.LayerNorm(self.emb_size)
# Pooling
if self.pooling == 'attention':
self.A = nn.Linear(self.emb_size, self.attention_size)
self.A_out = nn.Linear(self.attention_size, 1, bias=False)
# Prediction
self.item_bias = nn.Embedding(self.item_num, 1)
""" ContraRec """
self.ccc_loss = ContraLoss(self.device, temperature=self.ccc_temp)
def forward(self, feed_dict):
self.check_list = []
_, prediction = self.rec_forward(feed_dict)
out_dict = {'prediction': prediction}
if feed_dict['phase'] == 'train':
kg_prediction = self.kg_forward(feed_dict)
out_dict['kg_prediction'] = kg_prediction
""" ContraRec """
feed_dict['history_items'] = feed_dict['history_items_a']
his_a_vector, _ = self.rec_forward(feed_dict)
feed_dict['history_items'] = feed_dict['history_items_b']
his_b_vector, _ = self.rec_forward(feed_dict)
features = torch.stack([his_a_vector[:, 0], his_b_vector[:, 0]], dim=1) # bsz, 2, emb
features = F.normalize(features, dim=-1)
out_dict['features'] = features # bsz, 2, emb
out_dict['labels'] = feed_dict['item_id'][:, 0] # bsz
return out_dict
def rec_forward(self, feed_dict):
u_ids = feed_dict['user_id'] # B
i_ids = feed_dict['item_id'] # B * -1
v_ids = feed_dict['item_val'] # B * -1 * R
history = feed_dict['history_items'] # B * H
delta_t_n = feed_dict['history_delta_t'].float() # B * H
batch_size, seq_len = history.shape
u_vectors = self.user_embeddings(u_ids)
i_vectors = self.entity_embeddings(i_ids)
v_vectors = self.entity_embeddings(v_ids) # B * -1 * R * V
his_vectors = self.entity_embeddings(history) # B * H * V
"""
Relational Dynamic History Aggregation
"""
valid_mask = (history > 0).view(batch_size, 1, seq_len, 1)
context = self.relational_dynamic_aggregation(
his_vectors, delta_t_n, i_vectors, v_vectors, valid_mask) # B * -1 * R * V
"""
Multi-layer Self-attention
"""
for i in range(self.layer_num):
residual = context
# self-attention
context = self.attn_head(context, context, context)
# feed forward
context = self.W1(context)
context = self.W2(context.relu())
# dropout, residual and layer_norm
context = self.dropout_layer(context)
context = self.layer_norm(residual + context)
"""
Pooling Layer
"""
if self.pooling == 'attention':
query_vectors = context * u_vectors[:, None, None, :] # B * -1 * R * V
user_attention = self.A_out(self.A(query_vectors).tanh()).squeeze(-1) # B * -1 * R
user_attention = (user_attention - user_attention.max()).softmax(dim=-1)
his_vector = (context * user_attention[:, :, :, None]).sum(dim=-2) # B * -1 * V
elif self.pooling == 'max':
his_vector = context.max(dim=-2).values # B * -1 * V
else:
his_vector = context.mean(dim=-2) # B * -1 * V
"""
Prediction
"""
i_bias = self.item_bias(i_ids).squeeze(-1)
prediction = ((u_vectors[:, None, :] + his_vector) * i_vectors).sum(dim=-1)
prediction = prediction + i_bias
""" ContraRec """
return his_vector, prediction.view(feed_dict['batch_size'], -1)
def kg_forward(self, feed_dict):
head_ids = feed_dict['head_id'].long() # B * -1
tail_ids = feed_dict['tail_id'].long() # B * -1
value_ids = feed_dict['value_id'].long() # B
relation_ids = feed_dict['relation_id'].long() # B
head_vectors = self.entity_embeddings(head_ids)
tail_vectors = self.entity_embeddings(tail_ids)
value_vectors = self.entity_embeddings(value_ids)
relation_vectors = self.relation_embeddings(relation_ids)
# DistMult
if self.include_val:
prediction = (head_vectors * (relation_vectors + value_vectors)[:, None, :] * tail_vectors).sum(-1)
else:
prediction = (head_vectors * relation_vectors[:, None, :] * tail_vectors).sum(-1)
return prediction
def loss(self, out_dict):
predictions = out_dict['prediction']
pos_pred, neg_pred = predictions[:, 0], predictions[:, 1:]
neg_softmax = (neg_pred - neg_pred.max()).softmax(dim=1)
rec_loss = -((pos_pred[:, None] - neg_pred).sigmoid() * neg_softmax).sum(dim=1).log().mean()
predictions = out_dict['kg_prediction']
pos_pred, neg_pred = predictions[:, 0], predictions[:, 1:]
neg_softmax = (neg_pred - neg_pred.max()).softmax(dim=1)
kg_loss = -((pos_pred[:, None] - neg_pred).sigmoid() * neg_softmax).sum(dim=1).log().mean()
""" ContraRec """
ccc_loss = self.ccc_loss(out_dict['features'], labels=out_dict['labels'])
loss = rec_loss + self.gamma * kg_loss + self.contra_gamma * ccc_loss
return loss
class Dataset(SequentialModel.Dataset):
def __init__(self, model, corpus, phase):
super().__init__(model, corpus, phase)
if self.phase == 'train':
self.kg_data, self.neg_heads, self.neg_tails = None, None, None
# Prepare item-to-value dict
item_val = self.corpus.item_meta_df.copy()
item_val[self.corpus.item_relations] = 0 # set the value of natural item relations to None
for idx, r in enumerate(self.corpus.attr_relations):
base = self.corpus.n_items + np.sum(self.corpus.attr_max[:idx])
item_val[r] = item_val[r].apply(lambda x: x + base).astype(int)
item_vals = item_val[self.corpus.relations].values # this ensures the order is consistent to relations
self.item_val_dict = dict()
for item, vals in zip(item_val['item_id'].values, item_vals.tolist()):
self.item_val_dict[item] = [0] + vals # the first dimension None for the virtual relation
""" ContraRec """
def reorder_op(self, seq):
ratio = np.random.beta(a=3, b=3)
select_len = int(len(seq) * ratio)
start = np.random.randint(0, len(seq) - select_len + 1)
idx_range = np.arange(len(seq))
np.random.shuffle(idx_range[start: start + select_len])
return seq[idx_range]
def mask_op(self, seq):
ratio = np.random.beta(a=3, b=3)
selected_len = int(len(seq) * ratio)
mask = np.full(len(seq), False)
mask[:selected_len] = True
np.random.shuffle(mask)
seq[mask] = self.model.entity_num
return seq
def augment(self, seq):
aug_seq = np.array(seq).copy()
if np.random.rand() > 0.5:
return self.mask_op(aug_seq)
else:
return self.reorder_op(aug_seq)
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
feed_dict['item_val'] = [self.item_val_dict[item] for item in feed_dict['item_id']]
delta_t = self.data['time'][index] - feed_dict['history_times']
feed_dict['history_delta_t'] = KDAReader.norm_time(delta_t, self.corpus.t_scalar)
if self.phase == 'train':
feed_dict['head_id'] = np.concatenate([[self.kg_data['head'][index]], self.neg_heads[index]])
feed_dict['tail_id'] = np.concatenate([[self.kg_data['tail'][index]], self.neg_tails[index]])
feed_dict['relation_id'] = self.kg_data['relation'][index]
feed_dict['value_id'] = self.kg_data['value'][index]
""" ContraRec """
history_items_a = self.augment(feed_dict['history_items'])
history_items_b = self.augment(feed_dict['history_items'])
feed_dict['history_items_a'] = history_items_a
feed_dict['history_items_b'] = history_items_b
return feed_dict
def generate_kg_data(self) -> pd.DataFrame:
rec_data_size = len(self)
replace = (rec_data_size > len(self.corpus.relation_df))
kg_data = self.corpus.relation_df.sample(n=rec_data_size, replace=replace).reset_index(drop=True)
kg_data['value'] = np.zeros(len(kg_data), dtype=int) # default for None
tail_select = kg_data['tail'].apply(lambda x: x < self.corpus.n_items)
item_item_df = kg_data[tail_select]
item_attr_df = kg_data.drop(item_item_df.index)
item_attr_df['value'] = item_attr_df['tail'].values
sample_tails = list() # sample items sharing the same attribute
for head, val in zip(item_attr_df['head'].values, item_attr_df['tail'].values):
share_attr_items = self.corpus.share_attr_dict[val]
tail_idx = np.random.randint(len(share_attr_items))
sample_tails.append(share_attr_items[tail_idx])
item_attr_df['tail'] = sample_tails
kg_data = pd.concat([item_item_df, item_attr_df], ignore_index=True)
return kg_data
def actions_before_epoch(self):
super().actions_before_epoch()
self.kg_data = self.generate_kg_data()
heads, tails = self.kg_data['head'].values, self.kg_data['tail'].values
relations, vals = self.kg_data['relation'].values, self.kg_data['value'].values
self.neg_heads = np.random.randint(1, self.corpus.n_items, size=(len(self.kg_data), self.model.num_neg))
self.neg_tails = np.random.randint(1, self.corpus.n_items, size=(len(self.kg_data), self.model.num_neg))
for i in range(len(self.kg_data)):
item_item_relation = (tails[i] <= self.corpus.n_items)
for j in range(self.model.num_neg):
if np.random.rand() < self.model.neg_head_p: # sample negative head
tail = tails[i] if item_item_relation else vals[i]
while (self.neg_heads[i][j], relations[i], tail) in self.corpus.triplet_set:
self.neg_heads[i][j] = np.random.randint(1, self.corpus.n_items)
self.neg_tails[i][j] = tails[i]
else: # sample negative tail
head = heads[i] if item_item_relation else self.neg_tails[i][j]
tail = self.neg_tails[i][j] if item_item_relation else vals[i]
while (head, relations[i], tail) in self.corpus.triplet_set:
self.neg_tails[i][j] = np.random.randint(1, self.corpus.n_items)
head = heads[i] if item_item_relation else self.neg_tails[i][j]
tail = self.neg_tails[i][j] if item_item_relation else vals[i]
self.neg_heads[i][j] = heads[i]
class RelationalDynamicAggregation(nn.Module):
def __init__(self, n_relation, n_freq, relation_embeddings, include_val, device):
super().__init__()
self.relation_embeddings = relation_embeddings
self.include_val = include_val
self.freq_real = nn.Embedding(n_relation, n_freq)
self.freq_imag = nn.Embedding(n_relation, n_freq)
freq = np.linspace(0, 1, n_freq) / 2.
self.freqs = torch.from_numpy(np.concatenate((freq, -freq))).to(device).float()
self.relation_range = torch.from_numpy(np.arange(n_relation)).to(device)
def idft_decay(self, delta_t):
real, imag = self.freq_real(self.relation_range), self.freq_imag(self.relation_range)
# create conjugate symmetric to ensure real number output
x_real = torch.cat([real, real], dim=-1)
x_imag = torch.cat([imag, -imag], dim=-1)
w = 2. * np.pi * self.freqs * delta_t.unsqueeze(-1) # B * H * n_freq
real_part = w.cos()[:, :, None, :] * x_real[None, None, :, :] # B * H * R * n_freq
imag_part = w.sin()[:, :, None, :] * x_imag[None, None, :, :]
decay = (real_part - imag_part).mean(dim=-1) / 2. # B * H * R
return decay.float()
def forward(self, seq, delta_t_n, target, target_value, valid_mask):
r_vectors = self.relation_embeddings(self.relation_range) # R * V
if self.include_val:
rv_vectors = r_vectors[None, None, :, :] + target_value
ri_vectors = rv_vectors * target[:, :, None, :] # B * -1 * R * V
else:
ri_vectors = r_vectors[None, None, :, :] * target[:, :, None, :] # B * -1 * R * V
attention = (seq[:, None, :, None, :] * ri_vectors[:, :, None, :, :]).sum(-1) # B * -1 * H * R
# shift masked softmax
attention = attention - attention.max()
attention = attention.masked_fill(valid_mask == 0, -np.inf).softmax(dim=-2)
# temporal evolution
decay = self.idft_decay(delta_t_n).clamp(0, 1).unsqueeze(1).masked_fill(valid_mask==0, 0.) # B * 1 * H * R
attention = attention * decay
# attentional aggregation of history items
context = (seq[:, None, :, None, :] * attention[:, :, :, :, None]).sum(-3) # B * -1 * R * V
return context
""" Context-Context Contrastive Loss """
class ContraLoss(nn.Module):
def __init__(self, device, temperature=0.2):
super(ContraLoss, self).__init__()
self.device = device
self.temperature = temperature
def forward(self, features, labels=None):
"""
If both `labels` and `mask` are None, it degenerates to InfoNCE loss
Args:
features: hidden vector of shape [bsz, n_views, dim].
labels: target item of shape [bsz].
Returns:
A loss scalar.
"""
batch_size = features.shape[0]
if labels is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(self.device)
else:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.transpose(0, 1)).float().to(self.device)
contrast_count = features.shape[1] # n_views
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0) # bsz * n_views, -1
# compute logits
anchor_dot_contrast = torch.matmul(contrast_feature, contrast_feature.transpose(0, 1)) /self.temperature
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach() # bsz * n_views, bsz * n_views
# tile mask
mask = mask.repeat(contrast_count, contrast_count) # bsz * n_views, bsz * n_views
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask), 1,
torch.arange(mask.shape[0]).view(-1, 1).to(self.device), 0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-10)
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / (mask.sum(1) + 1e-10)
# loss
loss = - self.temperature * mean_log_prob_pos
return loss.mean() | 20,256 | 47.577938 | 116 | py |
ReChorus | ReChorus-master/src/models/sequential/TiMiRec.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" TiMiRec
Reference:
"Target Interest Distillation for Multi-Interest Recommendation"
Wang et al., CIKM'2022.
CMD example:
python main.py --model_name TiMiRec --dataset Grocery_and_Gourmet_Food \
--emb_size 64 --lr 1e-4 --l2 1e-6 --history_max 20 --K 6 \
--add_pos 1 --add_trm 1 --stage pretrain
python main.py --model_name TiMiRec --dataset Grocery_and_Gourmet_Food \
--emb_size 64 --lr 1e-4 --l2 1e-6 --history_max 20 --K 6 \
--add_pos 1 --add_trm 1 --stage finetune --temp 1 --n_layers 1
"""
import os
import logging
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from models.BaseModel import SequentialModel
from utils import layers
class TiMiRec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'attn_size', 'K', 'temp', 'add_pos', 'add_trm', 'n_layers']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--attn_size', type=int, default=8,
help='Size of attention vectors.')
parser.add_argument('--K', type=int, default=2,
help='Number of hidden interests.')
parser.add_argument('--add_pos', type=int, default=1,
help='Whether add position embedding in extractor.')
parser.add_argument('--add_trm', type=int, default=1,
help='Whether add the transformer layer in extractor.')
parser.add_argument('--temp', type=float, default=1,
help='Temperature in knowledge distillation loss.')
parser.add_argument('--n_layers', type=int, default=1,
help='Number of the projection layer.')
parser.add_argument('--stage', type=str, default='finetune',
help='Training stage: pretrain / finetune.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.attn_size = args.attn_size
self.K = args.K
self.add_pos = args.add_pos
self.add_trm = args.add_trm
self.temp = args.temp
self.n_layers = args.n_layers
self.stage = args.stage
self.max_his = args.history_max
self._define_params()
self.apply(self.init_weights)
self.extractor_path = '../model/TiMiRec/Extractor__{}__{}__emb_size={}__K={}__add_pos={}__add_trm={}.pt' \
.format(corpus.dataset, args.random_seed, self.emb_size, self.K, self.add_pos, self.add_trm)
if self.stage == 'pretrain':
self.model_path = self.extractor_path
elif self.stage == 'finetune':
if os.path.exists(self.extractor_path):
self.load_model(self.extractor_path)
else:
logging.info('Train from scratch!')
else:
raise ValueError('Invalid stage: ' + self.stage)
def _define_params(self):
self.interest_extractor = MultiInterestExtractor(
self.K, self.item_num, self.emb_size, self.attn_size, self.max_his, self.add_pos, self.add_trm)
if self.stage == 'finetune':
self.interest_predictor = InterestPredictor(self.item_num, self.emb_size)
self.proj = nn.Sequential()
for i, _ in enumerate(range(self.n_layers - 1)):
self.proj.add_module('proj_' + str(i), nn.Linear(self.emb_size, self.emb_size))
self.proj.add_module('dropout_' + str(i), nn.Dropout(p=0.5))
self.proj.add_module('relu_' + str(i), nn.ReLU(inplace=True))
self.proj.add_module('proj_final', nn.Linear(self.emb_size, self.K))
def load_model(self, model_path=None):
if model_path is None:
model_path = self.model_path
model_dict = self.state_dict()
state_dict = torch.load(model_path)
exist_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
model_dict.update(exist_state_dict)
self.load_state_dict(model_dict)
logging.info('Load model from ' + model_path)
@staticmethod
def similarity(a, b): # cosine similarity
a = F.normalize(a, dim=-1)
b = F.normalize(b, dim=-1)
return (a * b).sum(dim=-1)
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # bsz, -1
history = feed_dict['history_items'] # bsz, max_his
lengths = feed_dict['lengths'] # bsz
batch_size, seq_len = history.shape
out_dict = dict()
if self.stage == 'pretrain': # pretrain extractor
interest_vectors = self.interest_extractor(history, lengths) # bsz, K, emb
i_vectors = self.interest_extractor.i_embeddings(i_ids)
if feed_dict['phase'] == 'train':
target_vector = i_vectors[:, 0] # bsz, emb
target_intent = (interest_vectors * target_vector[:, None, :]).sum(-1) # bsz, K
idx_select = target_intent.max(-1)[1] # bsz
user_vector = interest_vectors[torch.arange(batch_size), idx_select, :] # bsz, emb
prediction = (user_vector[:, None, :] * i_vectors).sum(-1)
else:
prediction = (interest_vectors[:, None, :, :] * i_vectors[:, :, None, :]).sum(-1) # bsz, -1, K
prediction = prediction.max(-1)[0] # bsz, -1
else: # finetune
interest_vectors = self.interest_extractor(history, lengths) # bsz, K, emb
i_vectors = self.interest_extractor.i_embeddings(i_ids)
his_vector = self.interest_predictor(history, lengths)
pred_intent = self.proj(his_vector) # bsz, K
if feed_dict['phase'] == 'train':
target_vector = i_vectors[:, 0] # bsz, emb
target_intent = self.similarity(interest_vectors, target_vector.unsqueeze(1)) # bsz, K
out_dict['pred_intent'] = pred_intent
out_dict['target_intent'] = target_intent
self.check_list.append(('pred_intent', pred_intent.softmax(-1)))
self.check_list.append(('target_intent', target_intent.softmax(-1)))
user_vector = (interest_vectors * pred_intent.softmax(-1)[:, :, None]).sum(-2) # bsz, emb
prediction = (user_vector[:, None, :] * i_vectors).sum(-1)
out_dict['prediction'] = prediction.view(batch_size, -1)
return out_dict
def loss(self, out_dict: dict):
if self.stage == 'pretrain': # pretrain
loss = super().loss(out_dict)
else: # finetune
pred_intent = out_dict['pred_intent'] / self.temp
target_intent = out_dict['target_intent'].detach() / self.temp
kl_criterion = nn.KLDivLoss(reduction='batchmean')
loss = kl_criterion(F.log_softmax(pred_intent, dim=1), F.softmax(target_intent, dim=1))
loss = super().loss(out_dict) + self.temp * self.temp * loss
return loss
class MultiInterestExtractor(nn.Module):
def __init__(self, k, item_num, emb_size, attn_size, max_his, add_pos, add_trm):
super(MultiInterestExtractor, self).__init__()
self.max_his = max_his
self.add_pos = add_pos
self.add_trm = add_trm
self.i_embeddings = nn.Embedding(item_num, emb_size)
if self.add_pos:
self.p_embeddings = nn.Embedding(max_his + 1, emb_size)
self.W1 = nn.Linear(emb_size, attn_size)
self.W2 = nn.Linear(attn_size, k)
if self.add_trm:
self.transformer = layers.TransformerLayer(d_model=emb_size, d_ff=emb_size, n_heads=1, kq_same=False)
def forward(self, history, lengths):
batch_size, seq_len = history.shape
valid_his = (history > 0).long()
his_vectors = self.i_embeddings(history)
if self.add_pos:
len_range = torch.from_numpy(np.arange(self.max_his)).to(history.device)
position = (lengths[:, None] - len_range[None, :seq_len]) * valid_his
pos_vectors = self.p_embeddings(position)
his_vectors = his_vectors + pos_vectors
if self.add_trm:
attn_mask = valid_his.view(batch_size, 1, 1, seq_len)
his_vectors = self.transformer(his_vectors, attn_mask)
his_vectors = his_vectors * valid_his[:, :, None].float()
# Multi-Interest Extraction
attn_score = self.W2(self.W1(his_vectors).tanh()) # bsz, his_max, K
attn_score = attn_score.masked_fill(valid_his.unsqueeze(-1) == 0, -np.inf)
attn_score = attn_score.transpose(-1, -2) # bsz, K, his_max
attn_score = (attn_score - attn_score.max()).softmax(dim=-1)
attn_score = attn_score.masked_fill(torch.isnan(attn_score), 0)
interest_vectors = (his_vectors[:, None, :, :] * attn_score[:, :, :, None]).sum(-2) # bsz, K, emb
return interest_vectors
class InterestPredictor(nn.Module):
def __init__(self, item_num, emb_size):
super(InterestPredictor, self).__init__()
self.i_embeddings = nn.Embedding(item_num + 1, emb_size)
self.rnn = nn.GRU(input_size=emb_size, hidden_size=emb_size, batch_first=True)
def forward(self, history, lengths):
his_vectors = self.i_embeddings(history)
sort_lengths, sort_idx = torch.topk(lengths, k=len(lengths))
sort_seq = his_vectors.index_select(dim=0, index=sort_idx)
seq_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_seq, sort_lengths.cpu(), batch_first=True)
output, hidden = self.rnn(seq_packed, None)
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
his_vector = hidden[-1].index_select(dim=0, index=unsort_idx)
return his_vector
| 10,124 | 45.875 | 114 | py |
ReChorus | ReChorus-master/src/models/sequential/ContraRec.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" ContraRec
Reference:
"Sequential Recommendation with Multiple Contrast Signals"
Wang et al., TOIS'2022.
CMD example:
python main.py --model_name ContraRec --emb_size 64 --lr 1e-4 --l2 1e-6 --history_max 20 --encoder BERT4Rec \
--num_neg 1 --ctc_temp 1 --ccc_temp 0.2 --batch_size 4096 --gamma 1 --dataset Grocery_and_Gourmet_Food
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from models.BaseModel import SequentialModel
from utils import layers
class ContraRec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['gamma', 'num_neg', 'batch_size', 'ctc_temp', 'ccc_temp', 'encoder']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--gamma', type=float, default=1,
help='Coefficient of the contrastive loss.')
parser.add_argument('--beta_a', type=int, default=3,
help='Parameter of the beta distribution for sampling.')
parser.add_argument('--beta_b', type=int, default=3,
help='Parameter of the beta distribution for sampling.')
parser.add_argument('--ctc_temp', type=float, default=1,
help='Temperature in context-target contrastive loss.')
parser.add_argument('--ccc_temp', type=float, default=0.2,
help='Temperature in context-context contrastive loss.')
parser.add_argument('--encoder', type=str, default='BERT4Rec',
help='Choose a sequence encoder: GRU4Rec, Caser, BERT4Rec.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.max_his = args.history_max
self.gamma = args.gamma
self.beta_a = args.beta_a
self.beta_b = args.beta_b
self.ctc_temp = args.ctc_temp
self.ccc_temp = args.ccc_temp
self.encoder_name = args.encoder
self.mask_token = corpus.n_items
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num + 1, self.emb_size)
if self.encoder_name == 'GRU4Rec':
self.encoder = GRU4RecEncoder(self.emb_size, hidden_size=128)
elif self.encoder_name == 'Caser':
self.encoder = CaserEncoder(self.emb_size, self.max_his, num_horizon=16, num_vertical=8, l=5)
elif self.encoder_name == 'BERT4Rec':
self.encoder = BERT4RecEncoder(self.emb_size, self.max_his, num_layers=2, num_heads=2)
else:
raise ValueError('Invalid sequence encoder.')
self.ccc_loss = ContraLoss(self.device, temperature=self.ccc_temp)
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # bsz, n_candidate
history = feed_dict['history_items'] # bsz, history_max
lengths = feed_dict['lengths'] # bsz
his_vectors = self.i_embeddings(history)
his_vector = self.encoder(his_vectors, lengths)
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
out_dict = {'prediction': prediction}
if feed_dict['phase'] == 'train':
history_a = feed_dict['history_items_a']
his_a_vectors = self.i_embeddings(history_a)
his_a_vector = self.encoder(his_a_vectors, lengths)
history_b = feed_dict['history_items_b']
his_b_vectors = self.i_embeddings(history_b)
his_b_vector = self.encoder(his_b_vectors, lengths)
features = torch.stack([his_a_vector, his_b_vector], dim=1) # bsz, 2, emb
features = F.normalize(features, dim=-1)
out_dict['features'] = features # bsz, 2, emb
out_dict['labels'] = i_ids[:, 0] # bsz
return out_dict
def loss(self, out_dict):
predictions = out_dict['prediction'] / self.ctc_temp
pre_softmax = (predictions - predictions.max()).softmax(dim=1)
ctc_loss = - self.ctc_temp * pre_softmax[:, 0].log().mean()
ccc_loss = self.ccc_loss(out_dict['features'], labels=out_dict['labels'])
loss = ctc_loss + self.gamma * ccc_loss
return loss
class Dataset(SequentialModel.Dataset):
def reorder_op(self, seq):
ratio = np.random.beta(a=self.model.beta_a, b=self.model.beta_b)
select_len = int(len(seq) * ratio)
start = np.random.randint(0, len(seq) - select_len + 1)
idx_range = np.arange(len(seq))
np.random.shuffle(idx_range[start: start + select_len])
return seq[idx_range]
def mask_op(self, seq):
ratio = np.random.beta(a=self.model.beta_a, b=self.model.beta_b)
selected_len = int(len(seq) * ratio)
mask = np.full(len(seq), False)
mask[:selected_len] = True
np.random.shuffle(mask)
seq[mask] = self.model.mask_token
return seq
def augment(self, seq):
aug_seq = np.array(seq).copy()
if np.random.rand() > 0.5:
return self.mask_op(aug_seq)
else:
return self.reorder_op(aug_seq)
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
if self.phase == 'train':
history_items_a = self.augment(feed_dict['history_items'])
history_items_b = self.augment(feed_dict['history_items'])
feed_dict['history_items_a'] = history_items_a
feed_dict['history_items_b'] = history_items_b
return feed_dict
""" Context-Context Contrastive Loss """
class ContraLoss(nn.Module):
def __init__(self, device, temperature=0.2):
super(ContraLoss, self).__init__()
self.device = device
self.temperature = temperature
def forward(self, features, labels=None):
"""
If both `labels` and `mask` are None, it degenerates to InfoNCE loss
Args:
features: hidden vector of shape [bsz, n_views, dim].
labels: target item of shape [bsz].
Returns:
A loss scalar.
"""
batch_size = features.shape[0]
if labels is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(self.device)
else:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.transpose(0, 1)).float().to(self.device)
contrast_count = features.shape[1] # n_views
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0) # bsz * n_views, -1
# compute logits
anchor_dot_contrast = torch.matmul(contrast_feature, contrast_feature.transpose(0, 1)) /self.temperature
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach() # bsz * n_views, bsz * n_views
# tile mask
mask = mask.repeat(contrast_count, contrast_count) # bsz * n_views, bsz * n_views
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask), 1,
torch.arange(mask.shape[0]).view(-1, 1).to(self.device), 0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-10)
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / (mask.sum(1) + 1e-10)
# loss
loss = - self.temperature * mean_log_prob_pos
return loss.mean()
""" Encoder Layers """
class GRU4RecEncoder(nn.Module):
def __init__(self, emb_size, hidden_size=128):
super().__init__()
self.rnn = nn.GRU(input_size=emb_size, hidden_size=hidden_size, batch_first=True)
self.out = nn.Linear(hidden_size, emb_size, bias=False)
def forward(self, seq, lengths):
# Sort and Pack
sort_lengths, sort_idx = torch.topk(lengths, k=len(lengths))
sort_seq = seq.index_select(dim=0, index=sort_idx)
seq_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_seq, sort_lengths.cpu(), batch_first=True)
# RNN
output, hidden = self.rnn(seq_packed, None)
# Unsort
sort_rnn_vector = self.out(hidden[-1])
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
rnn_vector = sort_rnn_vector.index_select(dim=0, index=unsort_idx)
return rnn_vector
class CaserEncoder(nn.Module):
def __init__(self, emb_size, max_his, num_horizon=16, num_vertical=8, l=5):
super().__init__()
self.max_his = max_his
lengths = [i + 1 for i in range(l)]
self.conv_h = nn.ModuleList(
[nn.Conv2d(in_channels=1, out_channels=num_horizon, kernel_size=(i, emb_size)) for i in lengths])
self.conv_v = nn.Conv2d(in_channels=1, out_channels=num_vertical, kernel_size=(max_his, 1))
self.fc_dim_h = num_horizon * len(lengths)
self.fc_dim_v = num_vertical * emb_size
fc_dim_in = self.fc_dim_v + self.fc_dim_h
self.fc = nn.Linear(fc_dim_in, emb_size)
def forward(self, seq, lengths):
batch_size, seq_len = seq.size(0), seq.size(1)
pad_len = self.max_his - seq_len
seq = F.pad(seq, [0, 0, 0, pad_len]).unsqueeze(1)
# Convolution Layers
out_v = self.conv_v(seq).view(-1, self.fc_dim_v)
out_hs = list()
for conv in self.conv_h:
conv_out = conv(seq).squeeze(3).relu()
pool_out = F.max_pool1d(conv_out, conv_out.size(2)).squeeze(2)
out_hs.append(pool_out)
out_h = torch.cat(out_hs, 1)
# Fully-connected Layers
his_vector = self.fc(torch.cat([out_v, out_h], 1))
return his_vector
class BERT4RecEncoder(nn.Module):
def __init__(self, emb_size, max_his, num_layers=2, num_heads=2):
super().__init__()
self.p_embeddings = nn.Embedding(max_his + 1, emb_size)
self.transformer_block = nn.ModuleList([
layers.TransformerLayer(d_model=emb_size, d_ff=emb_size, n_heads=num_heads)
for _ in range(num_layers)
])
def forward(self, seq, lengths):
batch_size, seq_len = seq.size(0), seq.size(1)
len_range = torch.from_numpy(np.arange(seq_len)).to(seq.device)
valid_mask = len_range[None, :] < lengths[:, None]
# Position embedding
position = len_range[None, :] * valid_mask.long()
pos_vectors = self.p_embeddings(position)
seq = seq + pos_vectors
# Self-attention
attn_mask = valid_mask.view(batch_size, 1, 1, seq_len)
for block in self.transformer_block:
seq = block(seq, attn_mask)
seq = seq * valid_mask[:, :, None].float()
his_vector = seq[torch.arange(batch_size), lengths - 1]
return his_vector
| 11,519 | 40.588448 | 113 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.