code stringlengths 17 6.64M |
|---|
class ISAB(nn.Module):
def __init__(self, dim_in, dim_out, num_heads, num_inds, ln=False):
super(ISAB, self).__init__()
self.I = nn.Parameter(torch.Tensor(1, num_inds, dim_out))
nn.init.xavier_uniform_(self.I)
self.mab0 = MAB(dim_out, dim_in, dim_out, num_heads, ln=ln)
self.mab1 = MAB(dim_in, dim_out, dim_out, num_heads, ln=ln)
def forward(self, X):
H = self.mab0(self.I.repeat(X.size(0), 1, 1), X)
return self.mab1(X, H)
|
class PMA(nn.Module):
def __init__(self, dim, num_heads, num_seeds, ln=False):
super(PMA, self).__init__()
self.S = nn.Parameter(torch.Tensor(1, num_seeds, dim))
nn.init.xavier_uniform_(self.S)
self.mab = MAB(dim, dim, dim, num_heads, ln=ln)
def forward(self, X):
return self.mab(self.S.repeat(X.size(0), 1, 1), X)
|
def generate_benchmark():
if (not os.path.isdir('benchmark')):
os.makedirs('benchmark')
N_list = np.random.randint(N_min, N_max, args.num_bench)
data = []
ll = 0.0
for N in tqdm(N_list):
(X, labels, pi, params) = mog.sample(B, N, K, return_gt=True)
ll += mog.log_prob(X, pi, params).item()
data.append(X)
bench = [data, (ll / args.num_bench)]
torch.save(bench, benchfile)
|
def train():
if (not os.path.isdir(save_dir)):
os.makedirs(save_dir)
if (not os.path.isfile(benchfile)):
generate_benchmark()
bench = torch.load(benchfile)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(args.run_name)
logger.addHandler(logging.FileHandler(os.path.join(save_dir, (('train_' + time.strftime('%Y%m%d-%H%M')) + '.log')), mode='w'))
logger.info((str(args) + '\n'))
optimizer = optim.Adam(net.parameters(), lr=args.lr)
tick = time.time()
for t in range(1, (args.num_steps + 1)):
if (t == int((0.5 * args.num_steps))):
optimizer.param_groups[0]['lr'] *= 0.1
net.train()
optimizer.zero_grad()
N = np.random.randint(N_min, N_max)
X = mog.sample(B, N, K)
ll = mog.log_prob(X, *mvn.parse(net(X)))
loss = (- ll)
loss.backward()
optimizer.step()
if ((t % args.test_freq) == 0):
line = 'step {}, lr {:.3e}, '.format(t, optimizer.param_groups[0]['lr'])
line += test(bench, verbose=False)
line += ' ({:.3f} secs)'.format((time.time() - tick))
tick = time.time()
logger.info(line)
if ((t % args.save_freq) == 0):
torch.save({'state_dict': net.state_dict()}, os.path.join(save_dir, 'model.tar'))
torch.save({'state_dict': net.state_dict()}, os.path.join(save_dir, 'model.tar'))
|
def test(bench, verbose=True):
net.eval()
(data, oracle_ll) = bench
avg_ll = 0.0
for X in data:
X = X.cuda()
avg_ll += mog.log_prob(X, *mvn.parse(net(X))).item()
avg_ll /= len(data)
line = 'test ll {:.4f} (oracle {:.4f})'.format(avg_ll, oracle_ll)
if verbose:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(args.run_name)
logger.addHandler(logging.FileHandler(os.path.join(save_dir, 'test.log'), mode='w'))
logger.info(line)
return line
|
def plot():
net.eval()
X = mog.sample(B, np.random.randint(N_min, N_max), K)
(pi, params) = mvn.parse(net(X))
(ll, labels) = mog.log_prob(X, pi, params, return_labels=True)
(fig, axes) = plt.subplots(2, (B // 2), figsize=(((7 * B) // 5), 5))
mog.plot(X, labels, params, axes)
plt.show()
|
class Logger():
' Writes results of training/testing '
@classmethod
def initialize(cls, args, training):
logtime = datetime.datetime.now().__format__('_%m%d_%H%M%S')
logpath = (args.logpath if training else (('_TEST_' + args.load.split('/')[(- 1)].split('.')[0]) + logtime))
if (logpath == ''):
logpath = logtime
cls.logpath = os.path.join('logs', (logpath + '.log'))
cls.benchmark = args.benchmark
os.makedirs(cls.logpath)
logging.basicConfig(filemode='w', filename=os.path.join(cls.logpath, 'log.txt'), level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
cls.tbd_writer = SummaryWriter(os.path.join(cls.logpath, 'tbd/runs'))
if training:
logging.info(':======== Convolutional Hough Matching Networks =========')
for arg_key in args.__dict__:
logging.info(('| %20s: %-24s' % (arg_key, str(args.__dict__[arg_key]))))
logging.info(':========================================================\n')
@classmethod
def info(cls, msg):
' Writes message to .txt '
logging.info(msg)
@classmethod
def save_model(cls, model, epoch, val_pck):
torch.save(model.state_dict(), os.path.join(cls.logpath, 'pck_best_model.pt'))
cls.info(('Model saved @%d w/ val. PCK: %5.2f.\n' % (epoch, val_pck)))
|
class AverageMeter():
' Stores loss, evaluation results, selected layers '
def __init__(self, benchamrk):
' Constructor of AverageMeter '
self.buffer_keys = ['pck']
self.buffer = {}
for key in self.buffer_keys:
self.buffer[key] = []
self.loss_buffer = []
def update(self, eval_result, loss=None):
for key in self.buffer_keys:
self.buffer[key] += eval_result[key]
if (loss is not None):
self.loss_buffer.append(loss)
def write_result(self, split, epoch):
msg = ('\n*** %s ' % split)
msg += ('[@Epoch %02d] ' % epoch)
if (len(self.loss_buffer) > 0):
msg += ('Loss: %5.2f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('%s: %6.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
msg += '***\n'
Logger.info(msg)
def write_process(self, batch_idx, datalen, epoch):
msg = ('[Epoch: %02d] ' % epoch)
msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
if (len(self.loss_buffer) > 0):
msg += ('Loss: %5.2f ' % self.loss_buffer[(- 1)])
msg += ('Avg Loss: %5.5f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('Avg %s: %5.2f ' % (key.upper(), ((sum(self.buffer[key]) / len(self.buffer[key])) * 100)))
Logger.info(msg)
def write_test_process(self, batch_idx, datalen):
msg = ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
for key in self.buffer_keys:
if (key == 'pck'):
pcks = (torch.stack(self.buffer[key]).mean(dim=0) * 100)
val = ''
for p in pcks:
val += ('%5.2f ' % p.item())
msg += ('Avg %s: %s ' % (key.upper(), val))
else:
msg += ('Avg %s: %5.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
Logger.info(msg)
def get_test_result(self):
result = {}
for key in self.buffer_keys:
result[key] = (torch.stack(self.buffer[key]).mean(dim=0) * 100)
return result
|
class CorrespondenceDataset(Dataset):
' Parent class of PFPascal, PFWillow, and SPair '
def __init__(self, benchmark, datapath, thres, split):
' CorrespondenceDataset constructor '
super(CorrespondenceDataset, self).__init__()
self.metadata = {'pfwillow': ('PF-WILLOW', 'test_pairs.csv', '', '', 'bbox'), 'pfpascal': ('PF-PASCAL', '_pairs.csv', 'JPEGImages', 'Annotations', 'img'), 'spair': ('SPair-71k', 'Layout/large', 'JPEGImages', 'PairAnnotation', 'bbox')}
base_path = os.path.join(os.path.abspath(datapath), self.metadata[benchmark][0])
if (benchmark == 'pfpascal'):
self.spt_path = os.path.join(base_path, (split + '_pairs.csv'))
elif (benchmark == 'spair'):
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1], (split + '.txt'))
else:
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1])
self.img_path = os.path.join(base_path, self.metadata[benchmark][2])
if (benchmark == 'spair'):
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3], split)
else:
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3])
self.max_pts = 40
self.split = split
self.img_size = Geometry.img_size
self.benchmark = benchmark
self.range_ts = torch.arange(self.max_pts)
self.thres = (self.metadata[benchmark][4] if (thres == 'auto') else thres)
self.transform = transforms.Compose([transforms.Resize((self.img_size, self.img_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.train_data = []
self.src_imnames = []
self.trg_imnames = []
self.cls = []
self.cls_ids = []
self.src_kps = []
self.trg_kps = []
def __len__(self):
' Returns the number of pairs '
return len(self.train_data)
def __getitem__(self, idx):
' Constructs and return a batch '
batch = dict()
batch['src_imname'] = self.src_imnames[idx]
batch['trg_imname'] = self.trg_imnames[idx]
batch['category_id'] = self.cls_ids[idx]
batch['category'] = self.cls[batch['category_id']]
src_pil = self.get_image(self.src_imnames, idx)
trg_pil = self.get_image(self.trg_imnames, idx)
batch['src_imsize'] = src_pil.size
batch['trg_imsize'] = trg_pil.size
batch['src_img'] = self.transform(src_pil)
batch['trg_img'] = self.transform(trg_pil)
(batch['src_kps'], num_pts) = self.get_points(self.src_kps, idx, src_pil.size)
(batch['trg_kps'], _) = self.get_points(self.trg_kps, idx, trg_pil.size)
batch['n_pts'] = torch.tensor(num_pts)
batch['datalen'] = len(self.train_data)
return batch
def get_image(self, imnames, idx):
' Reads PIL image from path '
path = os.path.join(self.img_path, imnames[idx])
return Image.open(path).convert('RGB')
def get_pckthres(self, batch, imsize):
' Computes PCK threshold '
if (self.thres == 'bbox'):
bbox = batch['trg_bbox'].clone()
bbox_w = (bbox[2] - bbox[0])
bbox_h = (bbox[3] - bbox[1])
pckthres = torch.max(bbox_w, bbox_h)
elif (self.thres == 'img'):
imsize_t = batch['trg_img'].size()
pckthres = torch.tensor(max(imsize_t[1], imsize_t[2]))
else:
raise Exception(('Invalid pck threshold type: %s' % self.thres))
return pckthres.float()
def get_points(self, pts_list, idx, org_imsize):
' Returns key-points of an image '
(xy, n_pts) = pts_list[idx].size()
pad_pts = (torch.zeros((xy, (self.max_pts - n_pts))) - 2)
x_crds = (pts_list[idx][0] * (self.img_size / org_imsize[0]))
y_crds = (pts_list[idx][1] * (self.img_size / org_imsize[1]))
kps = torch.cat([torch.stack([x_crds, y_crds]), pad_pts], dim=1)
return (kps, n_pts)
|
def load_dataset(benchmark, datapath, thres, split='test'):
' Instantiate a correspondence dataset '
correspondence_benchmark = {'spair': spair.SPairDataset, 'pfpascal': pfpascal.PFPascalDataset, 'pfwillow': pfwillow.PFWillowDataset}
dataset = correspondence_benchmark.get(benchmark)
if (dataset is None):
raise Exception(('Invalid benchmark dataset %s.' % benchmark))
return dataset(benchmark, datapath, thres, split)
|
def download_from_google(token_id, filename):
' Download desired filename from Google drive '
print(('Downloading %s ...' % os.path.basename(filename)))
url = 'https://docs.google.com/uc?export=download'
destination = (filename + '.tar.gz')
session = requests.Session()
response = session.get(url, params={'id': token_id, 'confirm': 't'}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': token_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
file = tarfile.open(destination, 'r:gz')
print(('Extracting %s ...' % destination))
file.extractall(filename)
file.close()
os.remove(destination)
os.rename(filename, (filename + '_tmp'))
os.rename(os.path.join((filename + '_tmp'), os.path.basename(filename)), filename)
os.rmdir((filename + '_tmp'))
|
def get_confirm_token(response):
'Retrieves confirm token'
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination):
'Saves the response to the destination'
chunk_size = 32768
with open(destination, 'wb') as file:
for chunk in response.iter_content(chunk_size):
if chunk:
file.write(chunk)
|
def download_dataset(datapath, benchmark):
'Downloads semantic correspondence benchmark dataset from Google drive'
if (not os.path.isdir(datapath)):
os.mkdir(datapath)
file_data = {'spair': ('1KSvB0k2zXA06ojWNvFjBv0Ake426Y76k', 'SPair-71k'), 'pfpascal': ('1OOwpGzJnTsFXYh-YffMQ9XKM_Kl_zdzg', 'PF-PASCAL'), 'pfwillow': ('1tDP0y8RO5s45L-vqnortRaieiWENQco_', 'PF-WILLOW')}
(file_id, filename) = file_data[benchmark]
abs_filepath = os.path.join(datapath, filename)
if (not os.path.isdir(abs_filepath)):
download_from_google(file_id, abs_filepath)
|
class SPairDataset(CorrespondenceDataset):
def __init__(self, benchmark, datapath, thres, split):
' SPair-71k dataset constructor '
super(SPairDataset, self).__init__(benchmark, datapath, thres, split)
self.train_data = open(self.spt_path).read().split('\n')
self.train_data = self.train_data[:(len(self.train_data) - 1)]
self.src_imnames = list(map((lambda x: (x.split('-')[1] + '.jpg')), self.train_data))
self.trg_imnames = list(map((lambda x: (x.split('-')[2].split(':')[0] + '.jpg')), self.train_data))
self.seg_path = os.path.abspath(os.path.join(self.img_path, os.pardir, 'Segmentation'))
self.cls = os.listdir(self.img_path)
self.cls.sort()
anntn_files = []
for data_name in self.train_data:
anntn_files.append(glob.glob(('%s/%s.json' % (self.ann_path, data_name)))[0])
anntn_files = list(map((lambda x: json.load(open(x))), anntn_files))
self.src_kps = list(map((lambda x: torch.tensor(x['src_kps']).t().float()), anntn_files))
self.trg_kps = list(map((lambda x: torch.tensor(x['trg_kps']).t().float()), anntn_files))
self.src_bbox = list(map((lambda x: torch.tensor(x['src_bndbox']).float()), anntn_files))
self.trg_bbox = list(map((lambda x: torch.tensor(x['trg_bndbox']).float()), anntn_files))
self.cls_ids = list(map((lambda x: self.cls.index(x['category'])), anntn_files))
self.vpvar = list(map((lambda x: torch.tensor(x['viewpoint_variation'])), anntn_files))
self.scvar = list(map((lambda x: torch.tensor(x['scale_variation'])), anntn_files))
self.trncn = list(map((lambda x: torch.tensor(x['truncation'])), anntn_files))
self.occln = list(map((lambda x: torch.tensor(x['occlusion'])), anntn_files))
def __getitem__(self, idx):
' Construct and return a batch for SPair-71k dataset '
sample = super(SPairDataset, self).__getitem__(idx)
sample['src_mask'] = self.get_mask(sample, sample['src_imname'])
sample['trg_mask'] = self.get_mask(sample, sample['trg_imname'])
sample['src_bbox'] = self.get_bbox(self.src_bbox, idx, sample['src_imsize'])
sample['trg_bbox'] = self.get_bbox(self.trg_bbox, idx, sample['trg_imsize'])
sample['pckthres'] = self.get_pckthres(sample, sample['trg_imsize'])
sample['vpvar'] = self.vpvar[idx]
sample['scvar'] = self.scvar[idx]
sample['trncn'] = self.trncn[idx]
sample['occln'] = self.occln[idx]
return sample
def get_mask(self, sample, imname):
mask_path = os.path.join(self.seg_path, sample['category'], (imname.split('.')[0] + '.png'))
tensor_mask = torch.tensor(np.array(Image.open(mask_path)))
class_dict = {'aeroplane': 0, 'bicycle': 1, 'bird': 2, 'boat': 3, 'bottle': 4, 'bus': 5, 'car': 6, 'cat': 7, 'chair': 8, 'cow': 9, 'diningtable': 10, 'dog': 11, 'horse': 12, 'motorbike': 13, 'person': 14, 'pottedplant': 15, 'sheep': 16, 'sofa': 17, 'train': 18, 'tvmonitor': 19}
class_id = (class_dict[sample['category']] + 1)
tensor_mask[(tensor_mask != class_id)] = 0
tensor_mask[(tensor_mask == class_id)] = 255
tensor_mask = F.interpolate(tensor_mask.unsqueeze(0).unsqueeze(0).float(), size=(self.img_size, self.img_size), mode='bilinear', align_corners=True).int().squeeze()
return tensor_mask
def get_image(self, img_names, idx):
' Return image tensor '
path = os.path.join(self.img_path, self.cls[self.cls_ids[idx]], img_names[idx])
return Image.open(path).convert('RGB')
def get_pckthres(self, sample, imsize):
' Compute PCK threshold '
return super(SPairDataset, self).get_pckthres(sample, imsize)
def get_points(self, pts_list, idx, imsize):
' Return key-points of an image '
return super(SPairDataset, self).get_points(pts_list, idx, imsize)
def match_idx(self, kps, n_pts):
' Sample the nearst feature (receptive field) indices '
return super(SPairDataset, self).match_idx(kps, n_pts)
def get_bbox(self, bbox_list, idx, imsize):
' Return object bounding-box '
bbox = bbox_list[idx].clone()
bbox[0::2] *= (self.img_size / imsize[0])
bbox[1::2] *= (self.img_size / imsize[1])
return bbox
|
def conv3x3(in_planes, out_planes, stride=1):
' 3x3 convolution with padding '
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=2, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1):
' 1x1 convolution '
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, groups=2, bias=False)
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Backbone(nn.Module):
def __init__(self, block, layers, zero_init_residual=False):
super(Backbone, self).__init__()
self.inplanes = 128
self.conv1 = nn.Conv2d(6, 128, kernel_size=7, stride=2, padding=3, groups=2, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0])
self.layer2 = self._make_layer(block, 256, layers[1], stride=2)
self.layer3 = self._make_layer(block, 512, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), 1000)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
|
def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = Backbone(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
weights = model_zoo.load_url(model_urls['resnet101'])
for key in weights:
if (key.split('.')[0] == 'fc'):
weights[key] = weights[key].clone()
continue
weights[key] = torch.cat([weights[key].clone(), weights[key].clone()], dim=0)
model.load_state_dict(weights)
return model
|
class KernelGenerator():
def __init__(self, ksz, ktype):
self.ksz = ksz
self.idx4d = Geometry.init_idx4d(ksz)
self.kernel = torch.zeros((ksz, ksz, ksz, ksz)).cuda()
self.center = ((ksz // 2), (ksz // 2))
self.ktype = ktype
def quadrant(self, crd):
if (crd[0] < self.center[0]):
horz_quad = (- 1)
elif (crd[0] < self.center[0]):
horz_quad = 1
else:
horz_quad = 0
if (crd[1] < self.center[1]):
vert_quad = (- 1)
elif (crd[1] < self.center[1]):
vert_quad = 1
else:
vert_quad = 0
return (horz_quad, vert_quad)
def generate(self):
return (None if (self.ktype == 'full') else self.generate_chm_kernel())
def generate_chm_kernel(self):
param_dict = {}
for idx in self.idx4d:
(src_i, src_j, trg_i, trg_j) = idx
d_tail = Geometry.get_distance((src_i, src_j), self.center)
d_head = Geometry.get_distance((trg_i, trg_j), self.center)
d_off = Geometry.get_distance((src_i, src_j), (trg_i, trg_j))
(horz_quad, vert_quad) = self.quadrant((src_j, src_i))
src_crd = (src_i, src_j)
trg_crd = (trg_i, trg_j)
key = self.build_key(horz_quad, vert_quad, d_head, d_tail, src_crd, trg_crd, d_off)
coord1d = Geometry.get_coord1d((src_i, src_j, trg_i, trg_j), self.ksz)
if (param_dict.get(key) is None):
param_dict[key] = []
param_dict[key].append(coord1d)
return param_dict
def build_key(self, horz_quad, vert_quad, d_head, d_tail, src_crd, trg_crd, d_off):
if (self.ktype == 'iso'):
return ('%d' % d_off)
elif (self.ktype == 'psi'):
d_max = max(d_head, d_tail)
d_min = min(d_head, d_tail)
return ('%d_%d_%d' % (d_max, d_min, d_off))
else:
raise Exception('not implemented.')
|
class Correlation():
@classmethod
def mutual_nn_filter(cls, correlation_matrix, eps=1e-30):
" Mutual nearest neighbor filtering (Rocco et al. NeurIPS'18 )"
corr_src_max = torch.max(correlation_matrix, dim=2, keepdim=True)[0]
corr_trg_max = torch.max(correlation_matrix, dim=1, keepdim=True)[0]
corr_src_max[(corr_src_max == 0)] += eps
corr_trg_max[(corr_trg_max == 0)] += eps
corr_src = (correlation_matrix / corr_src_max)
corr_trg = (correlation_matrix / corr_trg_max)
return (correlation_matrix * (corr_src * corr_trg))
@classmethod
def build_correlation6d(self, src_feat, trg_feat, scales, conv2ds):
' Build 6-dimensional correlation tensor '
(bsz, _, side, side) = src_feat.size()
_src_feats = []
_trg_feats = []
for (scale, conv) in zip(scales, conv2ds):
s = ((round((side * math.sqrt(scale))),) * 2)
_src_feat = conv(resize(src_feat, s, mode='bilinear', align_corners=True))
_trg_feat = conv(resize(trg_feat, s, mode='bilinear', align_corners=True))
_src_feats.append(_src_feat)
_trg_feats.append(_trg_feat)
corr6d = []
for src_feat in _src_feats:
ch = src_feat.size(1)
src_side = src_feat.size((- 1))
src_feat = src_feat.view(bsz, ch, (- 1)).transpose(1, 2)
src_norm = src_feat.norm(p=2, dim=2, keepdim=True)
for trg_feat in _trg_feats:
trg_side = trg_feat.size((- 1))
trg_feat = trg_feat.view(bsz, ch, (- 1))
trg_norm = trg_feat.norm(p=2, dim=1, keepdim=True)
correlation = (torch.bmm(src_feat, trg_feat) / torch.bmm(src_norm, trg_norm))
correlation = correlation.view(bsz, src_side, src_side, trg_side, trg_side).contiguous()
corr6d.append(correlation)
for (idx, correlation) in enumerate(corr6d):
corr6d[idx] = Geometry.interpolate4d(correlation, [side, side])
corr6d = torch.stack(corr6d).view(len(scales), len(scales), bsz, side, side, side, side).permute(2, 0, 1, 3, 4, 5, 6)
return corr6d.clamp(min=0)
|
class CHMLearner(nn.Module):
def __init__(self, ktype, feat_dim):
super(CHMLearner, self).__init__()
self.scales = [0.5, 1, 2]
self.conv2ds = nn.ModuleList([nn.Conv2d(feat_dim, (feat_dim // 4), kernel_size=3, padding=1, bias=False) for _ in self.scales])
ksz_translation = 5
ksz_scale = 3
self.chm6d = CHM6d(1, 1, ksz_scale, ksz_translation, ktype)
self.chm4d = CHM4d(1, 1, ksz_translation, ktype, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.softplus = nn.Softplus()
def forward(self, src_feat, trg_feat):
corr = Correlation.build_correlation6d(src_feat, trg_feat, self.scales, self.conv2ds).unsqueeze(1)
(bsz, ch, s, s, h, w, h, w) = corr.size()
corr = self.chm6d(corr)
corr = self.sigmoid(corr)
corr = corr.view(bsz, (- 1), h, w, h, w).max(dim=1)[0]
corr = Geometry.interpolate4d(corr, [(h * 2), (w * 2)]).unsqueeze(1)
corr = self.chm4d(corr).squeeze(1)
corr = self.softplus(corr)
corr = Correlation.mutual_nn_filter(corr.view(bsz, (corr.size((- 1)) ** 2), (corr.size((- 1)) ** 2)).contiguous())
return corr
|
class CHMNet(nn.Module):
def __init__(self, ktype):
super(CHMNet, self).__init__()
self.backbone = backbone.resnet101(pretrained=True)
self.learner = chmlearner.CHMLearner(ktype, feat_dim=1024)
def forward(self, src_img, trg_img):
(src_feat, trg_feat) = self.extract_features(src_img, trg_img)
correlation = self.learner(src_feat, trg_feat)
return correlation
def extract_features(self, src_img, trg_img):
feat = self.backbone.conv1.forward(torch.cat([src_img, trg_img], dim=1))
feat = self.backbone.bn1.forward(feat)
feat = self.backbone.relu.forward(feat)
feat = self.backbone.maxpool.forward(feat)
for idx in range(1, 5):
feat = self.backbone.__getattr__(('layer%d' % idx))(feat)
if (idx == 3):
src_feat = feat.narrow(1, 0, (feat.size(1) // 2)).clone()
trg_feat = feat.narrow(1, (feat.size(1) // 2), (feat.size(1) // 2)).clone()
return (src_feat, trg_feat)
def training_objective(cls, prd_kps, trg_kps, npts):
l2dist = (prd_kps - trg_kps).pow(2).sum(dim=1)
loss = []
for (dist, npt) in zip(l2dist, npts):
loss.append(dist[:npt].mean())
return torch.stack(loss).mean()
|
def test(model, dataloader):
average_meter = AverageMeter(dataloader.dataset.benchmark)
model.eval()
for (idx, batch) in enumerate(dataloader):
corr_matrix = model(batch['src_img'].cuda(), batch['trg_img'].cuda())
prd_kps = Geometry.transfer_kps(corr_matrix, batch['src_kps'].cuda(), batch['n_pts'].cuda(), normalized=False)
eval_result = Evaluator.evaluate(Geometry.unnormalize_kps(prd_kps), batch)
average_meter.update(eval_result)
average_meter.write_test_process(idx, len(dataloader))
return average_meter.get_test_result()
|
def train(epoch, model, dataloader, optimizer, training):
(model.train() if training else model.eval())
average_meter = AverageMeter(dataloader.dataset.benchmark)
for (idx, batch) in enumerate(dataloader):
corr_matrix = model(batch['src_img'].cuda(), batch['trg_img'].cuda())
prd_trg_kps = Geometry.transfer_kps(corr_matrix, batch['src_kps'].cuda(), batch['n_pts'].cuda(), normalized=False)
eval_result = Evaluator.evaluate(Geometry.unnormalize_kps(prd_trg_kps), batch)
loss = model.training_objective(prd_trg_kps, Geometry.normalize_kps(batch['trg_kps'].cuda()), batch['n_pts'].cuda())
if training:
optimizer.zero_grad()
loss.backward()
optimizer.step()
average_meter.update(eval_result, loss.item())
average_meter.write_process(idx, len(dataloader), epoch)
average_meter.write_result(('Training' if training else 'Validation'), epoch)
mean = (lambda x: (sum(x) / len(x)))
avg_loss = mean(average_meter.loss_buffer)
avg_pck = mean(average_meter.buffer['pck'])
return (avg_loss, avg_pck)
|
class Logger():
'Writes results of training/testing'
@classmethod
def initialize(cls, args):
logtime = datetime.datetime.now().__format__('_%m%d_%H%M%S')
logpath = args.logpath
cls.logpath = os.path.join('logs', ((logpath + logtime) + '.log'))
cls.benchmark = args.benchmark
os.makedirs(cls.logpath)
logging.basicConfig(filemode='w', filename=os.path.join(cls.logpath, 'log.txt'), level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
cls.tbd_writer = SummaryWriter(os.path.join(cls.logpath, 'tbd/runs'))
logging.info('\n+=========== Dynamic Hyperpixel Flow ============+')
for arg_key in args.__dict__:
logging.info(('| %20s: %-24s |' % (arg_key, str(args.__dict__[arg_key]))))
logging.info('+================================================+\n')
@classmethod
def info(cls, msg):
'Writes message to .txt'
logging.info(msg)
@classmethod
def save_model(cls, model, epoch, val_pck):
torch.save(model.state_dict(), os.path.join(cls.logpath, 'best_model.pt'))
cls.info(('Model saved @%d w/ val. PCK: %5.2f.\n' % (epoch, val_pck)))
@classmethod
def visualize_selection(cls, catwise_sel):
'Visualize (class-wise) layer selection frequency'
if (cls.benchmark == 'pfpascal'):
sort_ids = [17, 8, 10, 19, 4, 15, 0, 3, 6, 5, 18, 13, 1, 14, 12, 2, 11, 7, 16, 9]
elif (cls.benchmark == 'pfwillow'):
sort_ids = np.arange(10)
elif (cls.benchmark == 'caltech'):
sort_ids = np.arange(101)
elif (cls.benchmark == 'spair'):
sort_ids = np.arange(18)
for key in catwise_sel:
catwise_sel[key] = torch.stack(catwise_sel[key]).mean(dim=0).cpu().numpy()
category = np.array(list(catwise_sel.keys()))[sort_ids]
values = np.array(list(catwise_sel.values()))[sort_ids]
cols = list(range(values.shape[1]))
df = pd.DataFrame(values, index=category, columns=cols)
plt.pcolor(df, vmin=0.0, vmax=1.0)
plt.gca().set_aspect('equal')
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 5), df.columns[::5])
plt.tight_layout()
plt.savefig(('%s/selected_layers.jpg' % cls.logpath))
|
class AverageMeter():
'Stores loss, evaluation results, selected layers'
def __init__(self, benchamrk):
'Constructor of AverageMeter'
if (benchamrk == 'caltech'):
self.buffer_keys = ['ltacc', 'iou']
else:
self.buffer_keys = ['pck']
self.buffer = {}
for key in self.buffer_keys:
self.buffer[key] = []
self.sel_buffer = {}
self.loss_buffer = []
def update(self, eval_result, layer_sel, category, loss=None):
for key in self.buffer_keys:
self.buffer[key] += eval_result[key]
for (sel, cls) in zip(layer_sel, category):
if (self.sel_buffer.get(cls) is None):
self.sel_buffer[cls] = []
self.sel_buffer[cls] += [sel]
if (loss is not None):
self.loss_buffer.append(loss)
def write_result(self, split, epoch=(- 1)):
msg = ('\n*** %s ' % split)
msg += (('[@Epoch %02d] ' % epoch) if (epoch > (- 1)) else '')
if (len(self.loss_buffer) > 0):
msg += ('Loss: %5.2f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('%s: %6.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
msg += '***\n'
Logger.info(msg)
def write_process(self, batch_idx, datalen, epoch=(- 1)):
msg = (('[Epoch: %02d] ' % epoch) if (epoch > (- 1)) else '')
msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
if (len(self.loss_buffer) > 0):
msg += ('Loss: %6.2f ' % self.loss_buffer[(- 1)])
msg += ('Avg Loss: %6.5f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('Avg %s: %6.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
Logger.info(msg)
|
class SupervisionStrategy(ABC):
'Different strategies for methods:'
@abstractmethod
def get_image_pair(self, batch, *args):
pass
@abstractmethod
def get_correlation(self, correlation_matrix):
pass
@abstractmethod
def compute_loss(self, correlation_matrix, *args):
pass
|
class StrongSupStrategy(SupervisionStrategy):
def get_image_pair(self, batch, *args):
'Returns (semantically related) pairs for strongly-supervised training'
return (batch['src_img'], batch['trg_img'])
def get_correlation(self, correlation_matrix):
"Returns correlation matrices of 'ALL PAIRS' in a batch"
return correlation_matrix.clone().detach()
def compute_loss(self, correlation_matrix, *args):
'Strongly-supervised matching loss (L_{match})'
easy_match = args[0]['easy_match']
hard_match = args[0]['hard_match']
layer_sel = args[1]
batch = args[2]
loss_cre = Objective.weighted_cross_entropy(correlation_matrix, easy_match, hard_match, batch)
loss_sel = Objective.layer_selection_loss(layer_sel)
loss_net = (loss_cre + loss_sel)
return loss_net
|
class WeakSupStrategy(SupervisionStrategy):
def get_image_pair(self, batch, *args):
'Forms positive/negative image paris for weakly-supervised training'
training = args[0]
self.bsz = len(batch['src_img'])
if training:
shifted_idx = np.roll(np.arange(self.bsz), (- 1))
trg_img_neg = batch['trg_img'][shifted_idx].clone()
trg_cls_neg = batch['category_id'][shifted_idx].clone()
neg_subidx = ((batch['category_id'] - trg_cls_neg) != 0)
src_img = torch.cat([batch['src_img'], batch['src_img'][neg_subidx]], dim=0)
trg_img = torch.cat([batch['trg_img'], trg_img_neg[neg_subidx]], dim=0)
self.num_negatives = neg_subidx.sum()
else:
(src_img, trg_img) = (batch['src_img'], batch['trg_img'])
self.num_negatives = 0
return (src_img, trg_img)
def get_correlation(self, correlation_matrix):
"Returns correlation matrices of 'POSITIVE PAIRS' in a batch"
return correlation_matrix[:self.bsz].clone().detach()
def compute_loss(self, correlation_matrix, *args):
'Weakly-supervised matching loss (L_{match})'
layer_sel = args[1]
loss_pos = Objective.information_entropy(correlation_matrix[:self.bsz])
loss_neg = (Objective.information_entropy(correlation_matrix[self.bsz:]) if (self.num_negatives > 0) else 1.0)
loss_sel = Objective.layer_selection_loss(layer_sel)
loss_net = ((loss_pos / loss_neg) + loss_sel)
return loss_net
|
def fix_randseed(seed):
'Fixes random seed for reproducibility'
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
|
def mean(x):
'Computes average of a list'
return ((sum(x) / len(x)) if (len(x) > 0) else 0.0)
|
def where(predicate):
'Predicate must be a condition on nd-tensor'
matching_indices = predicate.nonzero()
if (len(matching_indices) != 0):
matching_indices = matching_indices.t().squeeze(0)
return matching_indices
|
class CorrespondenceDataset(Dataset):
'Parent class of PFPascal, PFWillow, Caltech, and SPair'
def __init__(self, benchmark, datapath, thres, device, split):
'CorrespondenceDataset constructor'
super(CorrespondenceDataset, self).__init__()
self.metadata = {'pfwillow': ('PF-WILLOW', 'test_pairs.csv', '', '', 'bbox'), 'pfpascal': ('PF-PASCAL', '_pairs.csv', 'JPEGImages', 'Annotations', 'img'), 'caltech': ('Caltech-101', 'test_pairs_caltech_with_category.csv', '101_ObjectCategories', '', ''), 'spair': ('SPair-71k', 'Layout/large', 'JPEGImages', 'PairAnnotation', 'bbox')}
base_path = os.path.join(os.path.abspath(datapath), self.metadata[benchmark][0])
if (benchmark == 'pfpascal'):
self.spt_path = os.path.join(base_path, (split + '_pairs.csv'))
elif (benchmark == 'spair'):
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1], (split + '.txt'))
else:
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1])
self.img_path = os.path.join(base_path, self.metadata[benchmark][2])
if (benchmark == 'spair'):
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3], split)
else:
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3])
if (benchmark == 'caltech'):
self.max_pts = 400
else:
self.max_pts = 40
self.split = split
self.device = device
self.imside = 240
self.benchmark = benchmark
self.range_ts = torch.arange(self.max_pts)
self.thres = (self.metadata[benchmark][4] if (thres == 'auto') else thres)
self.transform = transforms.Compose([transforms.Resize((self.imside, self.imside)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.train_data = []
self.src_imnames = []
self.trg_imnames = []
self.cls = []
self.cls_ids = []
self.src_kps = []
self.trg_kps = []
def __len__(self):
'Returns the number of pairs'
return len(self.train_data)
def __getitem__(self, idx):
'Constructs and return a batch'
batch = dict()
batch['src_imname'] = self.src_imnames[idx]
batch['trg_imname'] = self.trg_imnames[idx]
batch['category_id'] = self.cls_ids[idx]
batch['category'] = self.cls[batch['category_id']]
src_pil = self.get_image(self.src_imnames, idx)
trg_pil = self.get_image(self.trg_imnames, idx)
batch['src_imsize'] = src_pil.size
batch['trg_imsize'] = trg_pil.size
batch['src_img'] = self.transform(src_pil).to(self.device)
batch['trg_img'] = self.transform(trg_pil).to(self.device)
(batch['src_kps'], num_pts) = self.get_points(self.src_kps, idx, src_pil.size)
(batch['trg_kps'], _) = self.get_points(self.trg_kps, idx, trg_pil.size)
batch['n_pts'] = torch.tensor(num_pts)
batch['datalen'] = len(self.train_data)
return batch
def get_image(self, imnames, idx):
'Reads PIL image from path'
path = os.path.join(self.img_path, imnames[idx])
return Image.open(path).convert('RGB')
def get_pckthres(self, batch, imsize):
'Computes PCK threshold'
if (self.thres == 'bbox'):
bbox = batch['trg_bbox'].clone()
bbox_w = (bbox[2] - bbox[0])
bbox_h = (bbox[3] - bbox[1])
pckthres = torch.max(bbox_w, bbox_h)
elif (self.thres == 'img'):
imsize_t = batch['trg_img'].size()
pckthres = torch.tensor(max(imsize_t[1], imsize_t[2]))
else:
raise Exception(('Invalid pck threshold type: %s' % self.thres))
return pckthres.float().to(self.device)
def get_points(self, pts_list, idx, org_imsize):
'Returns key-points of an image with size of (240,240)'
(xy, n_pts) = pts_list[idx].size()
pad_pts = (torch.zeros((xy, (self.max_pts - n_pts))) - 1)
x_crds = (pts_list[idx][0] * (self.imside / org_imsize[0]))
y_crds = (pts_list[idx][1] * (self.imside / org_imsize[1]))
kps = torch.cat([torch.stack([x_crds, y_crds]), pad_pts], dim=1).to(self.device)
return (kps, n_pts)
def match_idx(self, kps, n_pts):
'Samples the nearst feature (receptive field) indices'
nearest_idx = find_knn(Geometry.rf_center, kps.t())
nearest_idx -= (self.range_ts >= n_pts).to(self.device).long()
return nearest_idx
|
def find_knn(db_vectors, qr_vectors):
'Finds K-nearest neighbors (Euclidean distance)'
db = db_vectors.unsqueeze(1).repeat(1, qr_vectors.size(0), 1)
qr = qr_vectors.unsqueeze(0).repeat(db_vectors.size(0), 1, 1)
dist = (db - qr).pow(2).sum(2).pow(0.5).t()
(_, nearest_idx) = dist.min(dim=1)
return nearest_idx
|
def load_dataset(benchmark, datapath, thres, device, split='test'):
'Instantiates desired correspondence dataset'
correspondence_benchmark = {'pfpascal': pfpascal.PFPascalDataset, 'pfwillow': pfwillow.PFWillowDataset, 'caltech': caltech.CaltechDataset, 'spair': spair.SPairDataset}
dataset = correspondence_benchmark.get(benchmark)
if (dataset is None):
raise Exception(('Invalid benchmark dataset %s.' % benchmark))
return dataset(benchmark, datapath, thres, device, split)
|
def download_from_google(token_id, filename):
'Downloads desired filename from Google drive'
print(('Downloading %s ...' % os.path.basename(filename)))
url = 'https://docs.google.com/uc?export=download'
destination = (filename + '.tar.gz')
session = requests.Session()
response = session.get(url, params={'id': token_id, 'confirm': 't'}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': token_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
file = tarfile.open(destination, 'r:gz')
print(('Extracting %s ...' % destination))
file.extractall(filename)
file.close()
os.remove(destination)
os.rename(filename, (filename + '_tmp'))
os.rename(os.path.join((filename + '_tmp'), os.path.basename(filename)), filename)
os.rmdir((filename + '_tmp'))
|
def get_confirm_token(response):
'Retrieves confirm token'
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination):
'Saves the response to the destination'
chunk_size = 32768
with open(destination, 'wb') as file:
for chunk in response.iter_content(chunk_size):
if chunk:
file.write(chunk)
|
def download_dataset(datapath, benchmark):
'Downloads semantic correspondence benchmark dataset from Google drive'
if (not os.path.isdir(datapath)):
os.mkdir(datapath)
file_data = {'pfwillow': ('1tDP0y8RO5s45L-vqnortRaieiWENQco_', 'PF-WILLOW'), 'pfpascal': ('1OOwpGzJnTsFXYh-YffMQ9XKM_Kl_zdzg', 'PF-PASCAL'), 'caltech': ('1IV0E5sJ6xSdDyIvVSTdZjPHELMwGzsMn', 'Caltech-101'), 'spair': ('1KSvB0k2zXA06ojWNvFjBv0Ake426Y76k', 'SPair-71k')}
(file_id, filename) = file_data[benchmark]
abs_filepath = os.path.join(datapath, filename)
if (not os.path.isdir(abs_filepath)):
download_from_google(file_id, abs_filepath)
|
class SPairDataset(CorrespondenceDataset):
'Inherits CorrespondenceDataset'
def __init__(self, benchmark, datapath, thres, device, split):
'SPair-71k dataset constructor'
super(SPairDataset, self).__init__(benchmark, datapath, thres, device, split)
self.train_data = open(self.spt_path).read().split('\n')
self.train_data = self.train_data[:(len(self.train_data) - 1)]
self.src_imnames = list(map((lambda x: (x.split('-')[1] + '.jpg')), self.train_data))
self.trg_imnames = list(map((lambda x: (x.split('-')[2].split(':')[0] + '.jpg')), self.train_data))
self.cls = os.listdir(self.img_path)
self.cls.sort()
anntn_files = []
for data_name in self.train_data:
anntn_files.append(glob.glob(('%s/%s.json' % (self.ann_path, data_name)))[0])
anntn_files = list(map((lambda x: json.load(open(x))), anntn_files))
self.src_kps = list(map((lambda x: torch.tensor(x['src_kps']).t().float()), anntn_files))
self.trg_kps = list(map((lambda x: torch.tensor(x['trg_kps']).t().float()), anntn_files))
self.src_bbox = list(map((lambda x: torch.tensor(x['src_bndbox']).float()), anntn_files))
self.trg_bbox = list(map((lambda x: torch.tensor(x['trg_bndbox']).float()), anntn_files))
self.cls_ids = list(map((lambda x: self.cls.index(x['category'])), anntn_files))
self.vpvar = list(map((lambda x: torch.tensor(x['viewpoint_variation'])), anntn_files))
self.scvar = list(map((lambda x: torch.tensor(x['scale_variation'])), anntn_files))
self.trncn = list(map((lambda x: torch.tensor(x['truncation'])), anntn_files))
self.occln = list(map((lambda x: torch.tensor(x['occlusion'])), anntn_files))
def __getitem__(self, idx):
'Constructs and return a batch for SPair-71k dataset'
batch = super(SPairDataset, self).__getitem__(idx)
batch['src_bbox'] = self.get_bbox(self.src_bbox, idx, batch['src_imsize'])
batch['trg_bbox'] = self.get_bbox(self.trg_bbox, idx, batch['trg_imsize'])
batch['pckthres'] = self.get_pckthres(batch, batch['trg_imsize'])
batch['src_kpidx'] = self.match_idx(batch['src_kps'], batch['n_pts'])
batch['trg_kpidx'] = self.match_idx(batch['trg_kps'], batch['n_pts'])
batch['vpvar'] = self.vpvar[idx]
batch['scvar'] = self.scvar[idx]
batch['trncn'] = self.trncn[idx]
batch['occln'] = self.occln[idx]
return batch
def get_image(self, img_names, idx):
'Returns image tensor'
path = os.path.join(self.img_path, self.cls[self.cls_ids[idx]], img_names[idx])
return Image.open(path).convert('RGB')
def get_bbox(self, bbox_list, idx, imsize):
'Returns object bounding-box'
bbox = bbox_list[idx].clone()
bbox[0::2] *= (self.imside / imsize[0])
bbox[1::2] *= (self.imside / imsize[1])
return bbox.to(self.device)
|
class Correlation():
@classmethod
def bmm_interp(cls, src_feat, trg_feat, interp_size):
'Performs batch-wise matrix-multiplication after interpolation'
src_feat = F.interpolate(src_feat, interp_size, mode='bilinear', align_corners=True)
trg_feat = F.interpolate(trg_feat, interp_size, mode='bilinear', align_corners=True)
src_feat = src_feat.view(src_feat.size(0), src_feat.size(1), (- 1)).transpose(1, 2)
trg_feat = trg_feat.view(trg_feat.size(0), trg_feat.size(1), (- 1))
return torch.bmm(src_feat, trg_feat)
@classmethod
def mutual_nn_filter(cls, correlation_matrix):
"Mutual nearest neighbor filtering (Rocco et al. NeurIPS'18)"
corr_src_max = torch.max(correlation_matrix, dim=2, keepdim=True)[0]
corr_trg_max = torch.max(correlation_matrix, dim=1, keepdim=True)[0]
corr_src_max[(corr_src_max == 0)] += 1e-30
corr_trg_max[(corr_trg_max == 0)] += 1e-30
corr_src = (correlation_matrix / corr_src_max)
corr_trg = (correlation_matrix / corr_trg_max)
return (correlation_matrix * (corr_src * corr_trg))
|
class Norm():
'Vector normalization'
@classmethod
def feat_normalize(cls, x, interp_size):
'L2-normalizes given 2D feature map after interpolation'
x = F.interpolate(x, interp_size, mode='bilinear', align_corners=True)
return x.pow(2).sum(1).view(x.size(0), (- 1))
@classmethod
def l1normalize(cls, x):
'L1-normalization'
vector_sum = torch.sum(x, dim=2, keepdim=True)
vector_sum[(vector_sum == 0)] = 1.0
return (x / vector_sum)
@classmethod
def unit_gaussian_normalize(cls, x):
'Make each (row) distribution into unit gaussian'
correlation_matrix = (x - x.mean(dim=2).unsqueeze(2).expand_as(x))
with torch.no_grad():
standard_deviation = correlation_matrix.std(dim=2)
standard_deviation[(standard_deviation == 0)] = 1.0
correlation_matrix /= standard_deviation.unsqueeze(2).expand_as(correlation_matrix)
return correlation_matrix
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=2, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, groups=2, bias=False)
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Backbone(nn.Module):
def __init__(self, block, layers, zero_init_residual=False):
super(Backbone, self).__init__()
self.inplanes = 128
self.conv1 = nn.Conv2d(6, 128, kernel_size=7, stride=2, padding=3, groups=2, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0])
self.layer2 = self._make_layer(block, 256, layers[1], stride=2)
self.layer3 = self._make_layer(block, 512, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), 1000)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
|
def resnet50(pretrained=False, **kwargs):
'Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = Backbone(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
weights = model_zoo.load_url(model_urls['resnet50'])
for key in weights:
if (key.split('.')[0] == 'fc'):
weights[key] = weights[key].clone()
continue
weights[key] = torch.cat([weights[key].clone(), weights[key].clone()], dim=0)
model.load_state_dict(weights)
return model
|
def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = Backbone(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
weights = model_zoo.load_url(model_urls['resnet101'])
for key in weights:
if (key.split('.')[0] == 'fc'):
weights[key] = weights[key].clone()
continue
weights[key] = torch.cat([weights[key].clone(), weights[key].clone()], dim=0)
model.load_state_dict(weights)
return model
|
class DynamicHPF():
'Dynamic Hyperpixel Flow (DHPF)'
def __init__(self, backbone, device, img_side=240):
'Constructor for DHPF'
super(DynamicHPF, self).__init__()
if (backbone == 'resnet50'):
self.backbone = resnet.resnet50(pretrained=True).to(device)
self.in_channels = [64, 256, 256, 256, 512, 512, 512, 512, 1024, 1024, 1024, 1024, 1024, 1024, 2048, 2048, 2048]
nbottlenecks = [3, 4, 6, 3]
elif (backbone == 'resnet101'):
self.backbone = resnet.resnet101(pretrained=True).to(device)
self.in_channels = [64, 256, 256, 256, 512, 512, 512, 512, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 2048, 2048, 2048]
nbottlenecks = [3, 4, 23, 3]
else:
raise Exception(('Unavailable backbone: %s' % backbone))
self.bottleneck_ids = reduce(add, list(map((lambda x: list(range(x))), nbottlenecks)))
self.layer_ids = reduce(add, [([(i + 1)] * x) for (i, x) in enumerate(nbottlenecks)])
self.backbone.eval()
self.learner = gating.GumbelFeatureSelection(self.in_channels).to(device)
self.relu = nn.ReLU()
self.upsample_size = ([int((img_side / 4))] * 2)
Geometry.initialize(self.upsample_size, device)
self.rhm = rhm.HoughMatching(Geometry.rfs, torch.tensor([img_side, img_side]).to(device))
def __call__(self, *args, **kwargs):
src_img = args[0]
trg_img = args[1]
(correlation_matrix, layer_sel) = self.hyperimage_correlation(src_img, trg_img)
with torch.no_grad():
geometric_scores = torch.stack([self.rhm.run(c.clone().detach()) for c in correlation_matrix], dim=0)
correlation_matrix *= geometric_scores
return (correlation_matrix, layer_sel)
def hyperimage_correlation(self, src_img, trg_img):
'Dynamically construct hyperimages and compute their correlations'
layer_sel = []
(correlation, src_norm, trg_norm) = (0, 0, 0)
pair_img = torch.cat([src_img, trg_img], dim=1)
with torch.no_grad():
feat = self.backbone.conv1.forward(pair_img)
feat = self.backbone.bn1.forward(feat)
feat = self.backbone.relu.forward(feat)
feat = self.backbone.maxpool.forward(feat)
src_feat = feat.narrow(1, 0, (feat.size(1) // 2)).clone()
trg_feat = feat.narrow(1, (feat.size(1) // 2), (feat.size(1) // 2)).clone()
base_src_feat = self.learner.reduction_ffns[0](src_feat)
base_trg_feat = self.learner.reduction_ffns[0](trg_feat)
base_correlation = Correlation.bmm_interp(base_src_feat, base_trg_feat, self.upsample_size)
base_src_norm = Norm.feat_normalize(base_src_feat, self.upsample_size)
base_trg_norm = Norm.feat_normalize(base_trg_feat, self.upsample_size)
(src_feat, trg_feat, lsel) = self.learner(0, src_feat, trg_feat)
if ((src_feat is not None) and (trg_feat is not None)):
correlation += Correlation.bmm_interp(src_feat, trg_feat, self.upsample_size)
src_norm += Norm.feat_normalize(src_feat, self.upsample_size)
trg_norm += Norm.feat_normalize(trg_feat, self.upsample_size)
layer_sel.append(lsel)
for (hid, (bid, lid)) in enumerate(zip(self.bottleneck_ids, self.layer_ids)):
with torch.no_grad():
res = feat
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].conv1.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].bn1.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].conv2.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].bn2.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].conv3.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].bn3.forward(feat)
if (bid == 0):
res = self.backbone.__getattr__(('layer%d' % lid))[bid].downsample.forward(res)
feat += res
src_feat = feat.narrow(1, 0, (feat.size(1) // 2)).clone()
trg_feat = feat.narrow(1, (feat.size(1) // 2), (feat.size(1) // 2)).clone()
(src_feat, trg_feat, lsel) = self.learner((hid + 1), src_feat, trg_feat)
if ((src_feat is not None) and (trg_feat is not None)):
correlation += Correlation.bmm_interp(src_feat, trg_feat, self.upsample_size)
src_norm += Norm.feat_normalize(src_feat, self.upsample_size)
trg_norm += Norm.feat_normalize(trg_feat, self.upsample_size)
layer_sel.append(lsel)
with torch.no_grad():
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat)
layer_sel = torch.stack(layer_sel).t()
if ((layer_sel.sum(dim=1) == 0).sum() > 0):
empty_sel = (layer_sel.sum(dim=1) == 0).nonzero().view((- 1)).long()
if (src_img.size(0) == 1):
correlation = base_correlation
src_norm = base_src_norm
trg_norm = base_trg_norm
else:
correlation[empty_sel] += base_correlation[empty_sel]
src_norm[empty_sel] += base_src_norm[empty_sel]
trg_norm[empty_sel] += base_trg_norm[empty_sel]
if self.learner.training:
src_norm[(src_norm == 0.0)] += 0.0001
trg_norm[(trg_norm == 0.0)] += 0.0001
src_norm = src_norm.pow(0.5).unsqueeze(2)
trg_norm = trg_norm.pow(0.5).unsqueeze(1)
correlation_ts = self.relu((correlation / (torch.bmm(src_norm, trg_norm) + 0.001))).pow(2)
return (correlation_ts, layer_sel)
def parameters(self):
return self.learner.parameters()
def state_dict(self):
return self.learner.state_dict()
def load_state_dict(self, state_dict):
self.learner.load_state_dict(state_dict)
def eval(self):
self.learner.eval()
def train(self):
self.learner.train()
|
class Objective():
'Provides training objectives of DHPF'
@classmethod
def initialize(cls, target_rate, alpha):
cls.softmax = torch.nn.Softmax(dim=1)
cls.target_rate = target_rate
cls.alpha = alpha
cls.eps = 1e-30
@classmethod
def weighted_cross_entropy(cls, correlation_matrix, easy_match, hard_match, batch):
'Computes sum of weighted cross-entropy values between ground-truth and prediction'
loss_buf = correlation_matrix.new_zeros(correlation_matrix.size(0))
correlation_matrix = Norm.unit_gaussian_normalize(correlation_matrix)
for (idx, (ct, thres, npt)) in enumerate(zip(correlation_matrix, batch['pckthres'], batch['n_pts'])):
if (len(hard_match['src'][idx]) > 0):
cross_ent = cls.cross_entropy(ct, hard_match['src'][idx], hard_match['trg'][idx])
loss_buf[idx] += cross_ent.sum()
if (len(easy_match['src'][idx]) > 0):
cross_ent = cls.cross_entropy(ct, easy_match['src'][idx], easy_match['trg'][idx])
smooth_weight = (easy_match['dist'][idx] / (thres * cls.alpha)).pow(2)
loss_buf[idx] += (smooth_weight * cross_ent).sum()
loss_buf[idx] /= npt
return torch.mean(loss_buf)
@classmethod
def cross_entropy(cls, correlation_matrix, src_match, trg_match):
'Cross-entropy between predicted pdf and ground-truth pdf (one-hot vector)'
pdf = cls.softmax(correlation_matrix.index_select(0, src_match))
prob = pdf[(range(len(trg_match)), trg_match)]
cross_ent = (- torch.log((prob + cls.eps)))
return cross_ent
@classmethod
def information_entropy(cls, correlation_matrix, rescale_factor=4):
'Computes information entropy of all candidate matches'
bsz = correlation_matrix.size(0)
correlation_matrix = Correlation.mutual_nn_filter(correlation_matrix)
side = int(math.sqrt(correlation_matrix.size(1)))
new_side = (side // rescale_factor)
trg2src_dist = correlation_matrix.view(bsz, (- 1), side, side)
src2trg_dist = correlation_matrix.view(bsz, side, side, (- 1)).permute(0, 3, 1, 2)
trg2src_dist = F.interpolate(trg2src_dist, [new_side, new_side], mode='bilinear', align_corners=True)
src2trg_dist = F.interpolate(src2trg_dist, [new_side, new_side], mode='bilinear', align_corners=True)
src_pdf = Norm.l1normalize(trg2src_dist.view(bsz, (- 1), (new_side * new_side)))
trg_pdf = Norm.l1normalize(src2trg_dist.view(bsz, (- 1), (new_side * new_side)))
src_pdf[(src_pdf == 0.0)] = cls.eps
trg_pdf[(trg_pdf == 0.0)] = cls.eps
src_ent = (- (src_pdf * torch.log2(src_pdf)).sum(dim=2)).view(bsz, (- 1))
trg_ent = (- (trg_pdf * torch.log2(trg_pdf)).sum(dim=2)).view(bsz, (- 1))
score_net = ((src_ent + trg_ent).mean(dim=1) / 2)
return score_net.mean()
@classmethod
def layer_selection_loss(cls, layer_sel):
'Encourages model to select each layer at a certain rate'
return (layer_sel.mean(dim=0) - cls.target_rate).pow(2).sum()
|
def test(model, dataloader):
'Code for testing DHPF'
average_meter = AverageMeter(dataloader.dataset.benchmark)
for (idx, batch) in enumerate(dataloader):
(correlation_matrix, layer_sel) = model(batch['src_img'], batch['trg_img'])
prd_kps = Geometry.transfer_kps(correlation_matrix, batch['src_kps'], batch['n_pts'])
eval_result = Evaluator.evaluate(prd_kps, batch)
average_meter.update(eval_result, layer_sel.detach(), batch['category'])
average_meter.write_process(idx, len(dataloader))
Logger.visualize_selection(average_meter.sel_buffer)
average_meter.write_result('Test')
|
def train(epoch, model, dataloader, strategy, optimizer, training):
'Code for training DHPF'
(model.train() if training else model.eval())
average_meter = AverageMeter(dataloader.dataset.benchmark)
for (idx, batch) in enumerate(dataloader):
(src_img, trg_img) = strategy.get_image_pair(batch, training)
(correlation_matrix, layer_sel) = model(src_img, trg_img)
prd_kps = Geometry.transfer_kps(strategy.get_correlation(correlation_matrix), batch['src_kps'], batch['n_pts'])
eval_result = Evaluator.evaluate(prd_kps, batch)
loss = strategy.compute_loss(correlation_matrix, eval_result, layer_sel, batch)
if training:
optimizer.zero_grad()
loss.backward()
optimizer.step()
average_meter.update(eval_result, layer_sel.detach(), batch['category'], loss.item())
average_meter.write_process(idx, len(dataloader), epoch)
average_meter.write_result(('Training' if training else 'Validation'), epoch)
avg_loss = utils.mean(average_meter.loss_buffer)
avg_pck = utils.mean(average_meter.buffer['pck'])
return (avg_loss, avg_pck)
|
def parse_layers(layer_ids):
'Parse list of layer ids (int) into string format'
layer_str = ''.join(list(map((lambda x: ('%d,' % x)), layer_ids)))[:(- 1)]
layer_str = (('(' + layer_str) + ')')
return layer_str
|
def find_topk(membuf, kval):
'Return top-k performance along with layer combinations'
membuf.sort(key=(lambda x: x[0]), reverse=True)
return membuf[:kval]
|
def log_evaluation(layers, score, elapsed):
'Log a single evaluation result'
logging.info(('%20s: %4.2f %% %5.1f sec' % (layers, score, elapsed)))
|
def log_selected(depth, membuf_topk):
'Log selected layers at each depth'
logging.info((' ===================== Depth %d =====================' % depth))
for (score, layers) in membuf_topk:
logging.info(('%20s: %4.2f %%' % (layers, score)))
logging.info(' ====================================================')
|
def beamsearch_hp(datapath, benchmark, backbone, thres, alpha, logpath, candidate_base, candidate_layers, beamsize, maxdepth):
'Implementation of beam search for hyperpixel layers'
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
model = hpflow.HyperpixelFlow(backbone, '0', benchmark, device)
download.download_dataset(os.path.abspath(datapath), benchmark)
dset = download.load_dataset(benchmark, datapath, thres, device, 'val')
dataloader = DataLoader(dset, batch_size=1, num_workers=0)
membuf_cand = []
for base in candidate_base:
start = time.time()
hyperpixel = parse_layers(base)
score = evaluate.run(datapath, benchmark, backbone, thres, alpha, hyperpixel, logpath, True, model, dataloader)
log_evaluation(base, score, (time.time() - start))
membuf_cand.append((score, base))
membuf_topk = find_topk(membuf_cand, beamsize)
(score_sel, layer_sel) = find_topk(membuf_cand, 1)[0]
log_selected(0, membuf_topk)
for depth in range(1, maxdepth):
membuf_cand = []
for (_, test_layer) in membuf_topk:
for cand_layer in candidate_layers:
if ((cand_layer not in test_layer) and (cand_layer > min(test_layer))):
start = time.time()
test_layers = sorted((test_layer + [cand_layer]))
if (test_layers in list(map((lambda x: x[1]), membuf_cand))):
break
hyperpixel = parse_layers(test_layers)
score = evaluate.run(datapath, benchmark, backbone, thres, alpha, hyperpixel, logpath, True, model, dataloader)
log_evaluation(test_layers, score, (time.time() - start))
membuf_cand.append((score, test_layers))
membuf_topk = find_topk(membuf_cand, beamsize)
(score_tmp, layer_tmp) = find_topk(membuf_cand, 1)[0]
if (score_tmp > score_sel):
layer_sel = layer_tmp
score_sel = score_tmp
log_selected(depth, membuf_topk)
logging.info(('\nBest layers, score: %s %5.3f' % (layer_sel, score_sel)))
return layer_sel
|
class CorrespondenceDataset(Dataset):
'Parent class of PFPascal, PFWillow, Caltech, and SPair'
def __init__(self, benchmark, datapath, thres, device, split):
'CorrespondenceDataset constructor'
super(CorrespondenceDataset, self).__init__()
self.metadata = {'pfwillow': ('PF-WILLOW', 'test_pairs.csv', '', '', 'bbox'), 'pfpascal': ('PF-PASCAL', '_pairs.csv', 'JPEGImages', 'Annotations', 'img'), 'caltech': ('Caltech-101', 'test_pairs_caltech_with_category.csv', '101_ObjectCategories', '', ''), 'spair': ('SPair-71k', 'Layout/large', 'JPEGImages', 'PairAnnotation', 'bbox')}
base_path = os.path.join(os.path.abspath(datapath), self.metadata[benchmark][0])
if (benchmark == 'pfpascal'):
self.spt_path = os.path.join(base_path, (split + '_pairs.csv'))
elif (benchmark == 'spair'):
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1], (split + '.txt'))
else:
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1])
self.img_path = os.path.join(base_path, self.metadata[benchmark][2])
if (benchmark == 'spair'):
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3], split)
else:
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3])
self.thres = (self.metadata[benchmark][4] if (thres == 'auto') else thres)
self.transform = Normalize(['src_img', 'trg_img'])
self.device = device
self.split = split
self.src_imnames = []
self.trg_imnames = []
self.train_data = []
self.src_kps = []
self.trg_kps = []
self.cls_ids = []
self.cls = []
def __len__(self):
'Returns the number of pairs'
return len(self.train_data)
def __getitem__(self, idx):
'Construct and return a batch'
sample = dict()
sample['src_imname'] = self.src_imnames[idx]
sample['trg_imname'] = self.trg_imnames[idx]
sample['pair_classid'] = self.cls_ids[idx]
sample['pair_class'] = self.cls[sample['pair_classid']]
sample['src_img'] = self.get_image(self.src_imnames, idx)
sample['trg_img'] = self.get_image(self.trg_imnames, idx)
sample['src_kps'] = self.get_points(self.src_kps, idx).to(self.device)
sample['trg_kps'] = self.get_points(self.trg_kps, idx).to(self.device)
sample['datalen'] = len(self.train_data)
if self.transform:
sample = self.transform(sample)
sample['src_img'] = sample['src_img'].to(self.device)
sample['trg_img'] = sample['trg_img'].to(self.device)
return sample
def get_image(self, img_names, idx):
'Return image tensor'
img_name = os.path.join(self.img_path, img_names[idx])
image = self.get_imarr(img_name)
image = torch.tensor(image.transpose(2, 0, 1).astype(np.float32))
return image
def get_pckthres(self, sample):
'Compute PCK threshold'
if (self.thres == 'bbox'):
trg_bbox = sample['trg_bbox']
return torch.max((trg_bbox[2] - trg_bbox[0]), (trg_bbox[3] - trg_bbox[1]))
elif (self.thres == 'img'):
return torch.tensor(max(sample['trg_img'].size(1), sample['trg_img'].size(2)))
else:
raise Exception(('Invalid pck evaluation level: %s' % self.thres))
def get_points(self, pts, idx):
'Return key-points of an image'
return pts[idx]
def get_imarr(self, path):
'Read a single image file as numpy array from path'
return np.array(Image.open(path).convert('RGB'))
|
class UnNormalize():
'Image unnormalization'
def __init__(self):
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
def __call__(self, image):
img = image.clone()
for (im_channel, mean, std) in zip(img, self.mean, self.std):
im_channel.mul_(std).add_(mean)
return img
|
class Normalize():
'Image normalization'
def __init__(self, image_keys, norm_range=True):
self.image_keys = image_keys
self.norm_range = norm_range
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def __call__(self, sample):
for key in self.image_keys:
if self.norm_range:
sample[key] /= 255.0
sample[key] = self.normalize(sample[key])
return sample
|
def load_dataset(benchmark, datapath, thres, device, split='test'):
'Instantiate desired correspondence dataset'
correspondence_benchmark = {'pfpascal': pfpascal.PFPascalDataset, 'pfwillow': pfwillow.PFWillowDataset, 'caltech': caltech.CaltechDataset, 'spair': spair.SPairDataset}
dataset = correspondence_benchmark.get(benchmark)
if (dataset is None):
raise Exception(('Invalid benchmark dataset %s.' % benchmark))
return dataset(benchmark, datapath, thres, device, split)
|
def download_from_google(token_id, filename):
'Download desired filename from Google drive'
print(('Downloading %s ...' % os.path.basename(filename)))
url = 'https://docs.google.com/uc?export=download'
destination = (filename + '.tar.gz')
session = requests.Session()
response = session.get(url, params={'id': token_id, 'confirm': 't'}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': token_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
file = tarfile.open(destination, 'r:gz')
print(('Extracting %s ...' % destination))
file.extractall(filename)
file.close()
os.remove(destination)
os.rename(filename, (filename + '_tmp'))
os.rename(os.path.join((filename + '_tmp'), os.path.basename(filename)), filename)
os.rmdir((filename + '_tmp'))
|
def get_confirm_token(response):
'Retrieve confirm token'
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination):
'Save the response to the destination'
chunk_size = 32768
with open(destination, 'wb') as file:
for chunk in response.iter_content(chunk_size):
if chunk:
file.write(chunk)
|
def download_dataset(datapath, benchmark):
'Download desired semantic correspondence benchmark dataset from Google drive'
if (not os.path.isdir(datapath)):
os.mkdir(datapath)
file_data = {'pfwillow': ('1tDP0y8RO5s45L-vqnortRaieiWENQco_', 'PF-WILLOW'), 'pfpascal': ('1OOwpGzJnTsFXYh-YffMQ9XKM_Kl_zdzg', 'PF-PASCAL'), 'caltech': ('1IV0E5sJ6xSdDyIvVSTdZjPHELMwGzsMn', 'Caltech-101'), 'spair': ('1KSvB0k2zXA06ojWNvFjBv0Ake426Y76k', 'SPair-71k')}
(file_id, filename) = file_data[benchmark]
abs_filepath = os.path.join(datapath, filename)
if (not os.path.isdir(abs_filepath)):
download_from_google(file_id, abs_filepath)
|
class SPairDataset(CorrespondenceDataset):
'Inherits CorrespondenceDataset'
def __init__(self, benchmark, datapath, thres, device, split):
'SPair-71k dataset constructor'
super(SPairDataset, self).__init__(benchmark, datapath, thres, device, split)
self.train_data = open(self.spt_path).read().split('\n')
self.train_data = self.train_data[:(len(self.train_data) - 1)]
self.src_imnames = list(map((lambda x: (x.split('-')[1] + '.jpg')), self.train_data))
self.trg_imnames = list(map((lambda x: (x.split('-')[2].split(':')[0] + '.jpg')), self.train_data))
self.cls = os.listdir(self.img_path)
self.cls.sort()
anntn_files = []
for data_name in self.train_data:
anntn_files.append(glob.glob(('%s/%s.json' % (self.ann_path, data_name)))[0])
anntn_files = list(map((lambda x: json.load(open(x))), anntn_files))
self.src_kps = list(map((lambda x: torch.tensor(x['src_kps'])), anntn_files))
self.trg_kps = list(map((lambda x: torch.tensor(x['trg_kps'])), anntn_files))
self.src_bbox = list(map((lambda x: torch.tensor(x['src_bndbox'])), anntn_files))
self.trg_bbox = list(map((lambda x: torch.tensor(x['trg_bndbox'])), anntn_files))
self.cls_ids = list(map((lambda x: self.cls.index(x['category'])), anntn_files))
self.vpvar = list(map((lambda x: torch.tensor(x['viewpoint_variation'])), anntn_files))
self.scvar = list(map((lambda x: torch.tensor(x['scale_variation'])), anntn_files))
self.trncn = list(map((lambda x: torch.tensor(x['truncation'])), anntn_files))
self.occln = list(map((lambda x: torch.tensor(x['occlusion'])), anntn_files))
def __getitem__(self, idx):
'Construct and return a batch for SPair-71k dataset'
sample = super(SPairDataset, self).__getitem__(idx)
sample['src_bbox'] = self.src_bbox[idx].to(self.device)
sample['trg_bbox'] = self.trg_bbox[idx].to(self.device)
sample['pckthres'] = self.get_pckthres(sample).to(self.device)
sample['vpvar'] = self.vpvar[idx]
sample['scvar'] = self.scvar[idx]
sample['trncn'] = self.trncn[idx]
sample['occln'] = self.occln[idx]
return sample
def get_image(self, img_names, idx):
'Return image tensor'
img_name = os.path.join(self.img_path, self.cls[self.cls_ids[idx]], img_names[idx])
image = self.get_imarr(img_name)
image = torch.tensor(image.transpose(2, 0, 1).astype(np.float32))
return image
def get_pckthres(self, sample):
'Compute PCK threshold'
return super(SPairDataset, self).get_pckthres(sample)
def get_points(self, pts, idx):
'Return key-points of an image'
return super(SPairDataset, self).get_points(pts, idx).t()
|
def run(datapath, benchmark, backbone, thres, alpha, hyperpixel, logpath, beamsearch, model=None, dataloader=None, visualize=False):
'Runs Hyperpixel Flow framework'
if (not os.path.isdir('logs')):
os.mkdir('logs')
if (not beamsearch):
cur_datetime = datetime.datetime.now().__format__('_%m%d_%H%M%S')
logfile = os.path.join('logs', ((logpath + cur_datetime) + '.log'))
util.init_logger(logfile)
util.log_args(args)
if visualize:
os.mkdir((logfile + 'vis'))
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
if (dataloader is None):
download.download_dataset(os.path.abspath(datapath), benchmark)
split = ('val' if beamsearch else 'test')
dset = download.load_dataset(benchmark, datapath, thres, device, split)
dataloader = DataLoader(dset, batch_size=1, num_workers=0)
if (model is None):
model = hpflow.HyperpixelFlow(backbone, hyperpixel, benchmark, device)
else:
model.hyperpixel_ids = util.parse_hyperpixel(hyperpixel)
evaluator = evaluation.Evaluator(benchmark, device)
for (idx, data) in enumerate(dataloader):
(data['src_img'], data['src_kps'], data['src_intratio']) = util.resize(data['src_img'], data['src_kps'][0])
(data['trg_img'], data['trg_kps'], data['trg_intratio']) = util.resize(data['trg_img'], data['trg_kps'][0])
data['alpha'] = alpha
with torch.no_grad():
(confidence_ts, src_box, trg_box) = model(data['src_img'], data['trg_img'])
prd_kps = geometry.predict_kps(src_box, trg_box, data['src_kps'], confidence_ts)
evaluator.evaluate(prd_kps, data)
if (not beamsearch):
evaluator.log_result(idx, data=data)
if visualize:
vispath = os.path.join((logfile + 'vis'), ('%03d_%s_%s' % (idx, data['src_imname'][0], data['trg_imname'][0])))
util.visualize_prediction(data['src_kps'].t().cpu(), prd_kps.t().cpu(), data['src_img'], data['trg_img'], vispath)
if beamsearch:
return ((sum(evaluator.eval_buf['pck']) / len(evaluator.eval_buf['pck'])) * 100.0)
else:
evaluator.log_result(len(dset), data=None, average=True)
|
class Evaluator():
'To evaluate and log evaluation metrics: PCK, LT-ACC, IoU'
def __init__(self, benchmark, device):
'Constructor for Evaluator'
self.eval_buf = {'pfwillow': {'pck': [], 'cls_pck': dict()}, 'pfpascal': {'pck': [], 'cls_pck': dict()}, 'spair': {'pck': [], 'cls_pck': dict()}, 'caltech': {'ltacc': [], 'iou': []}}
self.eval_funct = {'pfwillow': self.eval_pck, 'pfpascal': self.eval_pck, 'spair': self.eval_pck, 'caltech': self.eval_caltech}
self.log_funct = {'pfwillow': self.log_pck, 'pfpascal': self.log_pck, 'spair': self.log_pck, 'caltech': self.log_caltech}
self.eval_buf = self.eval_buf[benchmark]
self.eval_funct = self.eval_funct[benchmark]
self.log_funct = self.log_funct[benchmark]
self.benchmark = benchmark
self.device = device
def evaluate(self, prd_kps, data):
'Compute desired evaluation metric'
return self.eval_funct(prd_kps, data)
def log_result(self, idx, data, average=False):
'Print results: PCK, or LT-ACC & IoU '
return self.log_funct(idx, data, average)
def eval_pck(self, prd_kps, data):
'Compute percentage of correct key-points (PCK) based on prediction'
pckthres = (data['pckthres'][0] * data['trg_intratio'])
ncorrt = correct_kps(data['trg_kps'].cuda(), prd_kps, pckthres, data['alpha'])
pair_pck = (int(ncorrt) / int(data['trg_kps'].size(1)))
self.eval_buf['pck'].append(pair_pck)
if (self.eval_buf['cls_pck'].get(data['pair_class'][0]) is None):
self.eval_buf['cls_pck'][data['pair_class'][0]] = []
self.eval_buf['cls_pck'][data['pair_class'][0]].append(pair_pck)
def log_pck(self, idx, data, average):
'Log percentage of correct key-points (PCK)'
if average:
pck = (sum(self.eval_buf['pck']) / len(self.eval_buf['pck']))
for cls in self.eval_buf['cls_pck']:
cls_avg = (sum(self.eval_buf['cls_pck'][cls]) / len(self.eval_buf['cls_pck'][cls]))
logging.info(('%15s: %3.3f' % (cls, cls_avg)))
logging.info((' * Average: %3.3f' % pck))
return pck
logging.info(('[%5d/%5d]: \t [Pair PCK: %3.3f]\t[Average: %3.3f] %s' % ((idx + 1), data['datalen'], self.eval_buf['pck'][idx], (sum(self.eval_buf['pck']) / len(self.eval_buf['pck'])), data['pair_class'][0])))
return None
def eval_caltech(self, prd_kps, data):
'Compute LT-ACC and IoU based on transferred points'
imsize = list(data['trg_img'].size())[1:]
(trg_xstr, trg_ystr) = pts2ptstr(data['trg_kps'])
trg_mask = ptstr2mask(trg_xstr, trg_ystr, imsize[0], imsize[1])
(prd_xstr, pred_ystr) = pts2ptstr(prd_kps)
prd_mask = ptstr2mask(prd_xstr, pred_ystr, imsize[0], imsize[1])
lt_acc = label_transfer_accuracy(prd_mask, trg_mask)
iou = intersection_over_union(prd_mask, trg_mask)
self.eval_buf['ltacc'].append(lt_acc)
self.eval_buf['iou'].append(iou)
def log_caltech(self, idx, data, average):
'Log Caltech-101 dataset evaluation metrics: LT-ACC and IoU'
if average:
lt_acc = (sum(self.eval_buf['ltacc']) / len(self.eval_buf['ltacc']))
segiou = (sum(self.eval_buf['iou']) / len(self.eval_buf['iou']))
logging.info((' * Average LT-ACC: %3.2f' % lt_acc))
logging.info((' * Average IoU: %3.2f' % segiou))
return (lt_acc, segiou)
logging.info(('[%5d/%5d]: \t [LT-ACC/IoU: %5.2f/%.2f]\t[Average: %5.2f/%.2f]' % ((idx + 1), data['datalen'], self.eval_buf['ltacc'][idx], self.eval_buf['iou'][idx], (sum(self.eval_buf['ltacc']) / len(self.eval_buf['ltacc'])), (sum(self.eval_buf['iou']) / len(self.eval_buf['iou'])))))
return None
|
def correct_kps(trg_kps, prd_kps, pckthres, alpha=0.1):
'Compute the number of correctly transferred key-points'
l2dist = torch.pow(torch.sum(torch.pow((trg_kps - prd_kps), 2), 0), 0.5)
thres = pckthres.expand_as(l2dist).float()
correct_pts = torch.le(l2dist, (thres * alpha))
return torch.sum(correct_pts)
|
def pts2ptstr(pts):
'Convert tensor of points to string'
x_str = str(list(pts[0].cpu().numpy()))
x_str = x_str[1:(len(x_str) - 1)]
y_str = str(list(pts[1].cpu().numpy()))
y_str = y_str[1:(len(y_str) - 1)]
return (x_str, y_str)
|
def pts2mask(x_pts, y_pts, shape):
'Build a binary mask tensor base on given xy-points'
(x_idx, y_idx) = draw.polygon(x_pts, y_pts, shape)
mask = np.zeros(shape, dtype=np.bool)
mask[(x_idx, y_idx)] = True
return mask
|
def ptstr2mask(x_str, y_str, out_h, out_w):
'Convert xy-point mask (string) to tensor mask'
x_pts = np.fromstring(x_str, sep=',')
y_pts = np.fromstring(y_str, sep=',')
mask_np = pts2mask(y_pts, x_pts, [out_h, out_w])
mask = torch.tensor(mask_np.astype(np.float32)).unsqueeze(0).unsqueeze(0).float()
return mask
|
def intersection_over_union(mask1, mask2):
'Computes IoU between two masks'
rel_part_weight = (torch.sum(torch.sum(mask2.gt(0.5).float(), 2, True), 3, True) / torch.sum(mask2.gt(0.5).float()))
part_iou = (torch.sum(torch.sum((mask1.gt(0.5) & mask2.gt(0.5)).float(), 2, True), 3, True) / torch.sum(torch.sum((mask1.gt(0.5) | mask2.gt(0.5)).float(), 2, True), 3, True))
weighted_iou = torch.sum(torch.mul(rel_part_weight, part_iou)).item()
return weighted_iou
|
def label_transfer_accuracy(mask1, mask2):
'LT-ACC measures the overlap with emphasis on the background class'
return torch.mean((mask1.gt(0.5) == mask2.gt(0.5)).double()).item()
|
def init_logger(logfile):
'Initialize logging settings'
logging.basicConfig(filemode='w', filename=logfile, level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
|
def log_args(args):
'Log program arguments'
logging.info('\n+========== Hyperpixel Flow Arguments ===========+')
for arg_key in args.__dict__:
logging.info(('| %20s: %-24s |' % (arg_key, str(args.__dict__[arg_key]))))
logging.info('+================================================+\n')
|
def resize(img, kps, side_thres=300):
'Resize given image with imsize: (1, 3, H, W)'
imsize = torch.tensor(img.size()).float()
kps = kps.float()
side_max = torch.max(imsize)
inter_ratio = 1.0
if (side_max > side_thres):
inter_ratio = (side_thres / side_max)
img = F.interpolate(img, size=(int((imsize[2] * inter_ratio)), int((imsize[3] * inter_ratio))), mode='bilinear', align_corners=False)
kps *= inter_ratio
return (img.squeeze(0), kps, inter_ratio)
|
def where(predicate):
'Returns indices which match given predicate'
matching_idx = predicate.nonzero()
n_match = len(matching_idx)
if (n_match != 0):
matching_idx = matching_idx.t().squeeze(0)
return matching_idx
|
def intersect1d(tensor1, tensor2):
'Takes two 1D tensor and returns tensor of common values'
aux = torch.cat((tensor1, tensor2), dim=0)
aux = aux.sort()[0]
return aux[:(- 1)][(aux[1:] == aux[:(- 1)]).data]
|
def parse_hyperpixel(hyperpixel_ids):
'Parse given hyperpixel list (string -> int)'
return list(map(int, re.findall('\\d+', hyperpixel_ids)))
|
def visualize_prediction(src_kps, prd_kps, src_img, trg_img, vispath, relaxation=2000):
'TPS transform source image using predicted correspondences'
src_imsize = src_img.size()[1:][::(- 1)]
trg_imsize = trg_img.size()[1:][::(- 1)]
img_tps = geometry.ImageTPS(src_kps, prd_kps, src_imsize, trg_imsize, relaxation)
wrp_img = ff.to_pil_image(img_tps(unnorm(src_img.cpu())))
trg_img = ff.to_pil_image(unnorm(trg_img.cpu()))
new_im = Image.new('RGB', ((trg_imsize[0] * 2), trg_imsize[1]))
new_im.paste(wrp_img, (0, 0))
new_im.paste(trg_img, (trg_imsize[0], 0))
new_im.save(vispath)
|
class MyDataset(Dataset):
def __init__(self, X, y):
self.data = X
self.target = y
def __getitem__(self, index):
x = self.data[index]
y = self.target[index]
return (x, y)
def __len__(self):
return len(self.data)
|
def lossFunctionKLD(mu, logvar):
'Compute KL divergence loss. \n Parameters\n ----------\n mu: Tensor\n Latent space mean of encoder distribution.\n logvar: Tensor\n Latent space log variance of encoder distribution.\n '
kl_error = ((- 0.5) * torch.sum((((1 + logvar) - mu.pow(2)) - logvar.exp())))
return kl_error
|
def recoLossGaussian(predicted_s, x, gaussian_noise_std, data_std):
'\n Compute reconstruction loss for a Gaussian noise model. \n This is essentially the MSE loss with a factor depending on the standard deviation.\n Parameters\n ----------\n predicted_s: Tensor\n Predicted signal by DivNoising decoder.\n x: Tensor\n Noisy observation image.\n gaussian_noise_std: float\n Standard deviation of Gaussian noise.\n data_std: float\n Standard deviation of training and validation data combined (used for normailzation).\n '
reconstruction_error = (torch.mean(((predicted_s - x) ** 2)) / (2.0 * ((gaussian_noise_std / data_std) ** 2)))
return reconstruction_error
|
def recoLoss(predicted_s, x, data_mean, data_std, noiseModel):
'Compute reconstruction loss for an arbitrary noise model. \n Parameters\n ----------\n predicted_s: Tensor\n Predicted signal by DivNoising decoder.\n x: Tensor\n Noisy observation image.\n data_mean: float\n Mean of training and validation data combined (used for normailzation).\n data_std: float\n Standard deviation of training and validation data combined (used for normailzation).\n device: GPU device\n torch cuda device\n '
predicted_s_denormalized = ((predicted_s * data_std) + data_mean)
x_denormalized = ((x * data_std) + data_mean)
predicted_s_cloned = predicted_s_denormalized
predicted_s_reduced = predicted_s_cloned.permute(1, 0, 2, 3)
x_cloned = x_denormalized
x_cloned = x_cloned.permute(1, 0, 2, 3)
x_reduced = x_cloned[(0, ...)]
likelihoods = noiseModel.likelihood(x_reduced, predicted_s_reduced)
log_likelihoods = torch.log(likelihoods)
reconstruction_error = (- torch.mean(log_likelihoods))
return reconstruction_error
|
def loss_fn(predicted_s, x, mu, logvar, gaussian_noise_std, data_mean, data_std, noiseModel):
'Compute DivNoising loss. \n Parameters\n ----------\n predicted_s: Tensor\n Predicted signal by DivNoising decoder.\n x: Tensor\n Noisy observation image.\n mu: Tensor\n Latent space mean of encoder distribution.\n logvar: Tensor\n Latent space logvar of encoder distribution.\n gaussian_noise_std: float\n Standard deviation of Gaussian noise (required when using Gaussian reconstruction loss).\n data_mean: float\n Mean of training and validation data combined (used for normailzation).\n data_std: float\n Standard deviation of training and validation data combined (used for normailzation).\n device: GPU device\n torch cuda device\n noiseModel: NoiseModel object\n Distribution of noisy pixel values corresponding to clean signal (required when using general reconstruction loss).\n '
kl_loss = lossFunctionKLD(mu, logvar)
if (noiseModel is not None):
reconstruction_loss = recoLoss(predicted_s, x, data_mean, data_std, noiseModel)
else:
reconstruction_loss = recoLossGaussian(predicted_s, x, gaussian_noise_std, data_std)
return (reconstruction_loss, (kl_loss / float(x.numel())))
|
def create_dataloaders(x_train_tensor, x_val_tensor, batch_size):
train_dataset = dataLoader.MyDataset(x_train_tensor, x_train_tensor)
val_dataset = dataLoader.MyDataset(x_val_tensor, x_val_tensor)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
return (train_loader, val_loader)
|
def create_model_and_train(basedir, data_mean, data_std, gaussian_noise_std, noise_model, n_depth, max_epochs, logger, checkpoint_callback, train_loader, val_loader, kl_annealing, weights_summary):
for filename in glob.glob((basedir + '/*')):
os.remove(filename)
vae = lightningmodel.VAELightning(data_mean=data_mean, data_std=data_std, gaussian_noise_std=gaussian_noise_std, noise_model=noise_model, n_depth=n_depth, kl_annealing=kl_annealing)
if torch.cuda.is_available():
trainer = pl.Trainer(gpus=1, max_epochs=max_epochs, logger=logger, callbacks=[EarlyStopping(monitor='val_loss', min_delta=1e-06, patience=100, verbose=True, mode='min'), checkpoint_callback], weights_summary=weights_summary)
else:
trainer = pl.Trainer(max_epochs=max_epochs, logger=logger, callbacks=[EarlyStopping(monitor='val_loss', min_delta=1e-06, patience=100, verbose=True, mode='min'), checkpoint_callback], weights_summary=weights_summary)
trainer.fit(vae, train_loader, val_loader)
collapse_flag = vae.collapse
return collapse_flag
|
def train_network(x_train_tensor, x_val_tensor, batch_size, data_mean, data_std, gaussian_noise_std, noise_model, n_depth, max_epochs, model_name, basedir, log_info=False):
(train_loader, val_loader) = create_dataloaders(x_train_tensor, x_val_tensor, batch_size)
collapse_flag = True
if (not os.path.exists(basedir)):
os.makedirs(basedir)
checkpoint_callback = ModelCheckpoint(monitor='val_loss', dirpath=basedir, filename=(model_name + '_best'), save_last=True, save_top_k=1, mode='min')
checkpoint_callback.CHECKPOINT_NAME_LAST = (model_name + '_last')
logger = TensorBoardLogger(basedir, name='', version='', default_hp_metric=False)
weights_summary = ('top' if log_info else None)
if (not log_info):
pl.utilities.distributed.log.setLevel(logging.ERROR)
posterior_collapse_count = 0
while (collapse_flag and (posterior_collapse_count < 20)):
collapse_flag = create_model_and_train(basedir, data_mean, data_std, gaussian_noise_std, noise_model, n_depth, max_epochs, logger, checkpoint_callback, train_loader, val_loader, kl_annealing=False, weights_summary=weights_summary)
if collapse_flag:
posterior_collapse_count = (posterior_collapse_count + 1)
if collapse_flag:
print('Posterior collapse limit reached, attempting training with KL annealing turned on!')
while collapse_flag:
collapse_flag = create_model_and_train(basedir, data_mean, data_std, gaussian_noise_std, noise_model, n_depth, max_epochs, logger, checkpoint_callback, train_loader, val_loader, kl_annealing=True, weights_summary=weights_summary)
|
def PickleMapName(name):
'\n\tIf you change the name of a function or module, then pickle, you can fix it with this.\n\t'
if (name in renametable):
return renametable[name]
return name
|
def mapped_load_global(self):
module = PickleMapName(self.readline()[:(- 1)])
name = PickleMapName(self.readline()[:(- 1)])
print('Finding ', module, name)
klass = self.find_class(module, name)
self.append(klass)
|
class MyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
return pickle.Unpickler.find_class(self, PickleMapName(module), PickleMapName(name))
|
def UnPickleTM(file):
"\n\tEventually we need to figure out how the mechanics of dispatch tables changed.\n\tSince we only use this as a hack anyways, I'll just comment out what changed\n\tbetween python2.7x and python3x.\n\t"
tmp = None
if (sys.version_info[0] < 3):
f = open(file, 'rb')
unpickler = pickle.Unpickler(f)
unpickler.dispatch[pickle.GLOBAL] = mapped_load_global
tmp = unpickler.load()
f.close()
else:
f = open(file, 'rb')
unpickler = MyUnpickler(f, encoding='latin1')
tmp = unpickler.load()
f.close()
tmp.pop('evaluate', None)
tmp.pop('MolInstance_fc_sqdiff_BP', None)
tmp.pop('Eval_BPForceSingle', None)
tmp.pop('TFMolManage', None)
tmp.pop('TFManage', None)
tmp.pop('Prepare', None)
tmp.pop('load', None)
tmp.pop('Load', None)
tmp.pop('TensorMol.TFMolManage.path', None)
tmp.pop('TensorMol.TFMolManage.Load', None)
tmp.pop('TensorMol.TFMolManage.Prepare', None)
tmp.pop('TensorMol.TFInstance', None)
tmp.pop('TensorMol.TFInstance.train_dir', None)
tmp.pop('TensorMol.TFMolInstance.train_dir', None)
tmp.pop('TensorMol.TFInstance.chk_file', None)
tmp.pop('TensorMol.TFMolInstance.chk_file', None)
tmp.pop('save', None)
tmp.pop('Save', None)
tmp.pop('Trainable', None)
tmp.pop('TFMolManage.Trainable', None)
tmp.pop('__init__', None)
return tmp
|
class MorseModel(ForceHolder):
def __init__(self, natom_=3):
'\n\t\tsimple morse model for three atoms for a training example.\n\t\t'
ForceHolder.__init__(self, natom_)
self.lat_pl = None
self.Prepare()
def PorterKarplus(self, x_pl):
x1 = (x_pl[0] - x_pl[1])
x2 = (x_pl[2] - x_pl[1])
x12 = (x_pl[0] - x_pl[2])
r1 = tf.norm(x1)
r2 = tf.norm(x2)
r12 = tf.norm(x12)
v1 = (0.7 * tf.pow((1.0 - tf.exp((- (r1 - 0.7)))), 2.0))
v2 = (0.7 * tf.pow((1.0 - tf.exp((- (r2 - 0.7)))), 2.0))
v3 = (0.7 * tf.pow((1.0 - tf.exp((- (r12 - 0.7)))), 2.0))
return ((v1 + v2) + v3)
def Prepare(self):
self.x_pl = tf.placeholder(tf.float64, shape=tuple([self.natom, 3]))
self.Energy = self.PorterKarplus(self.x_pl)
self.Force = tf.gradients(((- 1.0) * self.PorterKarplus(self.x_pl)), self.x_pl)
init = tf.global_variables_initializer()
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(init)
def __call__(self, x_):
'\n\t\tArgs:\n\t\t\tx_: the coordinates on which to evaluate the force.\n\t\t\tlat_: the lattice boundary vectors.\n\t\tReturns:\n\t\t\tthe Energy and Force (Eh and Eh/ang.) associated with the quadratic walls.\n\t\t'
(e, f) = self.sess.run([self.Energy, self.Force], feed_dict={self.x_pl: x_})
return (e, f[0])
|
class QuantumElectrostatic(ForceHolder):
def __init__(self, natom_=3):
'\n\t\tThis is a huckle-like model, something like BeH2\n\t\tfour valence charges are exchanged between the atoms\n\t\twhich experience a screened coulomb interaction\n\t\t'
ForceHolder.__init__(self, natom_)
self.Prepare()
def HuckelBeH2(self, x_pl):
r = tf.reduce_sum((x_pl * x_pl), 1)
r = tf.reshape(r, [(- 1), 1])
D = tf.sqrt((((r - (2 * tf.matmul(x_pl, tf.transpose(x_pl)))) + tf.transpose(r)) + tf.cast(1e-26, tf.float64)))
emat = tf.diag(self.en0s)
J = tf.matrix_band_part(((- 1.0) / tf.pow((D + ((0.5 * 0.5) * 0.5)), (1.0 / 3.0))), 0, (- 1))
emat += (J + tf.transpose(J))
(e, v) = tf.self_adjoint_eig(emat)
popd = tf.nn.top_k(((- 1.0) * e), 2, sorted=True).indices
Energy = (e[popd[0]] + e[popd[1]])
q1 = (((- 1.0) + (v[(popd[0], 0)] * v[(popd[0], 0)])) + (v[(popd[1], 0)] * v[(popd[1], 0)]))
q2 = (((- 0.5) + (v[(popd[0], 1)] * v[(popd[0], 1)])) + (v[(popd[1], 1)] * v[(popd[1], 1)]))
q3 = (((- 0.5) + (v[(popd[0], 2)] * v[(popd[0], 2)])) + (v[(popd[1], 2)] * v[(popd[1], 2)]))
Dipole = ((((q1 * x_pl[0]) + (q2 * x_pl[1])) + (q3 * x_pl[2])) / 3.0)
return (Energy, Dipole, [q1, q2, q3])
def Prepare(self):
self.en0s = tf.constant([(- 1.1), (- 0.5), (- 0.5)], dtype=tf.float64)
self.x_pl = tf.placeholder(tf.float64, shape=tuple([self.natom, 3]))
(self.Energy, self.Dipole, self.Charges) = self.HuckelBeH2(self.x_pl)
self.Force = tf.gradients(((- 1.0) * self.Energy), self.x_pl)
init = tf.global_variables_initializer()
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(init)
def __call__(self, x_):
'\n\t\tArgs:\n\t\t\tx_: the coordinates on which to evaluate the force.\n\t\tReturns:\n\t\t\tthe Energy and Force (Eh and Eh/ang.) associated with the quadratic walls.\n\t\t'
(e, f, d, q) = self.sess.run([self.Energy, self.Force, self.Dipole, self.Charges], feed_dict={self.x_pl: x_})
return (e, f[0], d, q)
|
class ExtendedHuckel(ForceHolder):
def __init__(self):
'\n\n\t\t'
ForceHolder.__init__(self, natom_)
self.Prepare()
def HuckelBeH2(self, x_pl):
r = tf.reduce_sum((x_pl * x_pl), 1)
r = tf.reshape(r, [(- 1), 1])
D = tf.sqrt((((r - (2 * tf.matmul(x_pl, tf.transpose(x_pl)))) + tf.transpose(r)) + tf.cast(1e-26, tf.float64)))
emat = tf.diag(self.en0s)
J = tf.matrix_band_part(((- 1.0) / tf.pow((D + ((0.5 * 0.5) * 0.5)), (1.0 / 3.0))), 0, (- 1))
emat += (J + tf.transpose(J))
(e, v) = tf.self_adjoint_eig(emat)
popd = tf.nn.top_k(((- 1.0) * e), 2, sorted=True).indices
Energy = (e[popd[0]] + e[popd[1]])
q1 = (((- 1.0) + (v[(popd[0], 0)] * v[(popd[0], 0)])) + (v[(popd[1], 0)] * v[(popd[1], 0)]))
q2 = (((- 0.5) + (v[(popd[0], 1)] * v[(popd[0], 1)])) + (v[(popd[1], 1)] * v[(popd[1], 1)]))
q3 = (((- 0.5) + (v[(popd[0], 2)] * v[(popd[0], 2)])) + (v[(popd[1], 2)] * v[(popd[1], 2)]))
Dipole = ((((q1 * x_pl[0]) + (q2 * x_pl[1])) + (q3 * x_pl[2])) / 3.0)
return (Energy, Dipole, [q1, q2, q3])
def Prepare(self):
self.en0s = tf.constant([(- 1.1), (- 0.5), (- 0.5)], dtype=tf.float64)
self.x_pl = tf.placeholder(tf.float64, shape=tuple([self.natom, 3]))
(self.Energy, self.Dipole, self.Charges) = self.HuckelBeH2(self.x_pl)
self.Force = tf.gradients(((- 1.0) * self.Energy), self.x_pl)
init = tf.global_variables_initializer()
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(init)
def __call__(self, x_):
'\n\t\tArgs:\n\t\t\tx_: the coordinates on which to evaluate the force.\n\t\tReturns:\n\t\t\tthe Energy and Force (Eh and Eh/ang.) associated with the quadratic walls.\n\t\t'
(e, f, d, q) = self.sess.run([self.Energy, self.Force, self.Dipole, self.Charges], feed_dict={self.x_pl: x_})
return (e, f[0], d, q)
|
class TMIPIManger():
def __init__(self, EnergyForceField=None, TCP_IP='localhost', TCP_PORT=31415):
self.EnergyForceField = EnergyForceField
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.hasdata = False
try:
self.s.connect((TCP_IP, TCP_PORT))
print('Connect to server with address:', ((TCP_IP + ' ') + str(TCP_PORT)))
except:
print('Fail connect to server with address: ', ((TCP_IP + ' ') + str(TCP_PORT)))
def md_run(self):
while True:
data = self.s.recv(HDRLEN)
if (data.strip() == 'STATUS'):
if self.hasdata:
print('client has data.')
self.s.sendall('HAVEDATA ')
else:
print('client is ready to get position from server')
self.s.sendall('READY ')
elif (data.strip() == 'POSDATA'):
print('server is sending positon.')
buf_ = self.s.recv((9 * 8))
cellh = (np.fromstring(buf_, np.float64) / BOHRPERA)
buf_ = self.s.recv((9 * 8))
cellih = (np.fromstring(buf_, np.float64) * BOHRPERA)
buf_ = self.s.recv(4)
natom = np.fromstring(buf_, np.int32)[0]
buf_ = self.s.recv(((3 * natom) * 8))
position = (np.fromstring(buf_, np.float64) / BOHRPERA).reshape(((- 1), 3))
print('cellh:', cellh, ' cellih:', cellih, ' natom:', natom)
print('position:', position)
print('now is running the client to calculate force...')
(energy, force) = self.EnergyForceField(position)
force = ((force / JOULEPERHARTREE) / BOHRPERA)
vir = np.zeros((3, 3))
self.hasdata = True
elif (data.strip() == 'GETFORCE'):
print('server is ready to get force from client')
self.s.sendall('FORCEREADY ')
self.s.sendall(np.float64(energy))
self.s.sendall(np.int32(natom))
self.s.sendall(force)
self.s.sendall(vir)
self.s.sendall(np.int32(7))
self.s.sendall('nothing')
self.hasdata = False
else:
raise Exception('wrong message from server')
|
class NN_MBE():
def __init__(self, tfm_=None):
self.nn_mbe = dict()
if (tfm_ != None):
for order in tfm_:
print(tfm_[order])
self.nn_mbe[order] = TFMolManage(tfm_[order], None, False)
return
def NN_Energy(self, mol):
mol.Generate_All_MBE_term(atom_group=3, cutoff=6, center_atom=0)
nn_energy = 0.0
for i in range(1, (mol.mbe_order + 1)):
nn_energy += self.nn_mbe[i].Eval_Mol(mol)
mol.Set_MBE_Force()
mol.nn_energy = nn_energy
print('coords of mol:', mol.coords)
print('force of mol:', mol.properties['mbe_deri'])
print('energy of mol:', nn_energy)
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.