code stringlengths 17 6.64M |
|---|
def logmeanexp(x, dim=0):
return (x.logsumexp(dim) - math.log(x.shape[dim]))
|
def stack(x, num_samples=None, dim=0):
return (x if (num_samples is None) else torch.stack(([x] * num_samples), dim=dim))
|
class Logger():
' Writes results of training/testing '
@classmethod
def initialize(cls, args, training):
logtime = datetime.datetime.now().__format__('_%m%d_%H%M%S')
logpath = (args.logpath if training else (('_TEST_' + args.load.split('/')[(- 1)].split('.')[0]) + logtime))
if (logpath == ''):
logpath = logtime
cls.logpath = os.path.join('logs', (logpath + '.log'))
cls.benchmark = args.benchmark
os.makedirs(cls.logpath)
logging.basicConfig(filemode='w', filename=os.path.join(cls.logpath, 'log.txt'), level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
cls.tbd_writer = SummaryWriter(os.path.join(cls.logpath, 'tbd/runs'))
if training:
logging.info(':======== Convolutional Hough Matching Networks =========')
for arg_key in args.__dict__:
logging.info(('| %20s: %-24s' % (arg_key, str(args.__dict__[arg_key]))))
logging.info(':========================================================\n')
@classmethod
def info(cls, msg):
' Writes message to .txt '
logging.info(msg)
@classmethod
def save_model(cls, model, epoch, val_pck):
torch.save(model.state_dict(), os.path.join(cls.logpath, 'pck_best_model.pt'))
cls.info(('Model saved @%d w/ val. PCK: %5.2f.\n' % (epoch, val_pck)))
|
class AverageMeter():
' Stores loss, evaluation results, selected layers '
def __init__(self, benchamrk):
' Constructor of AverageMeter '
self.buffer_keys = ['pck']
self.buffer = {}
for key in self.buffer_keys:
self.buffer[key] = []
self.loss_buffer = []
def update(self, eval_result, loss=None):
for key in self.buffer_keys:
self.buffer[key] += eval_result[key]
if (loss is not None):
self.loss_buffer.append(loss)
def write_result(self, split, epoch):
msg = ('\n*** %s ' % split)
msg += ('[@Epoch %02d] ' % epoch)
if (len(self.loss_buffer) > 0):
msg += ('Loss: %5.2f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('%s: %6.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
msg += '***\n'
Logger.info(msg)
def write_process(self, batch_idx, datalen, epoch):
msg = ('[Epoch: %02d] ' % epoch)
msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
if (len(self.loss_buffer) > 0):
msg += ('Loss: %5.2f ' % self.loss_buffer[(- 1)])
msg += ('Avg Loss: %5.5f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('Avg %s: %5.2f ' % (key.upper(), ((sum(self.buffer[key]) / len(self.buffer[key])) * 100)))
Logger.info(msg)
def write_test_process(self, batch_idx, datalen):
msg = ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
for key in self.buffer_keys:
if (key == 'pck'):
pcks = (torch.stack(self.buffer[key]).mean(dim=0) * 100)
val = ''
for p in pcks:
val += ('%5.2f ' % p.item())
msg += ('Avg %s: %s ' % (key.upper(), val))
else:
msg += ('Avg %s: %5.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
Logger.info(msg)
def get_test_result(self):
result = {}
for key in self.buffer_keys:
result[key] = (torch.stack(self.buffer[key]).mean(dim=0) * 100)
return result
|
class CorrespondenceDataset(Dataset):
' Parent class of PFPascal, PFWillow, and SPair '
def __init__(self, benchmark, datapath, thres, split):
' CorrespondenceDataset constructor '
super(CorrespondenceDataset, self).__init__()
self.metadata = {'pfwillow': ('PF-WILLOW', 'test_pairs.csv', '', '', 'bbox'), 'pfpascal': ('PF-PASCAL', '_pairs.csv', 'JPEGImages', 'Annotations', 'img'), 'spair': ('SPair-71k', 'Layout/large', 'JPEGImages', 'PairAnnotation', 'bbox')}
base_path = os.path.join(os.path.abspath(datapath), self.metadata[benchmark][0])
if (benchmark == 'pfpascal'):
self.spt_path = os.path.join(base_path, (split + '_pairs.csv'))
elif (benchmark == 'spair'):
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1], (split + '.txt'))
else:
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1])
self.img_path = os.path.join(base_path, self.metadata[benchmark][2])
if (benchmark == 'spair'):
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3], split)
else:
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3])
self.max_pts = 40
self.split = split
self.img_size = Geometry.img_size
self.benchmark = benchmark
self.range_ts = torch.arange(self.max_pts)
self.thres = (self.metadata[benchmark][4] if (thres == 'auto') else thres)
self.transform = transforms.Compose([transforms.Resize((self.img_size, self.img_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.train_data = []
self.src_imnames = []
self.trg_imnames = []
self.cls = []
self.cls_ids = []
self.src_kps = []
self.trg_kps = []
def __len__(self):
' Returns the number of pairs '
return len(self.train_data)
def __getitem__(self, idx):
' Constructs and return a batch '
batch = dict()
batch['src_imname'] = self.src_imnames[idx]
batch['trg_imname'] = self.trg_imnames[idx]
batch['category_id'] = self.cls_ids[idx]
batch['category'] = self.cls[batch['category_id']]
src_pil = self.get_image(self.src_imnames, idx)
trg_pil = self.get_image(self.trg_imnames, idx)
batch['src_imsize'] = src_pil.size
batch['trg_imsize'] = trg_pil.size
batch['src_img'] = self.transform(src_pil)
batch['trg_img'] = self.transform(trg_pil)
(batch['src_kps'], num_pts) = self.get_points(self.src_kps, idx, src_pil.size)
(batch['trg_kps'], _) = self.get_points(self.trg_kps, idx, trg_pil.size)
batch['n_pts'] = torch.tensor(num_pts)
batch['datalen'] = len(self.train_data)
return batch
def get_image(self, imnames, idx):
' Reads PIL image from path '
path = os.path.join(self.img_path, imnames[idx])
return Image.open(path).convert('RGB')
def get_pckthres(self, batch, imsize):
' Computes PCK threshold '
if (self.thres == 'bbox'):
bbox = batch['trg_bbox'].clone()
bbox_w = (bbox[2] - bbox[0])
bbox_h = (bbox[3] - bbox[1])
pckthres = torch.max(bbox_w, bbox_h)
elif (self.thres == 'img'):
imsize_t = batch['trg_img'].size()
pckthres = torch.tensor(max(imsize_t[1], imsize_t[2]))
else:
raise Exception(('Invalid pck threshold type: %s' % self.thres))
return pckthres.float()
def get_points(self, pts_list, idx, org_imsize):
' Returns key-points of an image '
(xy, n_pts) = pts_list[idx].size()
pad_pts = (torch.zeros((xy, (self.max_pts - n_pts))) - 2)
x_crds = (pts_list[idx][0] * (self.img_size / org_imsize[0]))
y_crds = (pts_list[idx][1] * (self.img_size / org_imsize[1]))
kps = torch.cat([torch.stack([x_crds, y_crds]), pad_pts], dim=1)
return (kps, n_pts)
|
def load_dataset(benchmark, datapath, thres, split='test'):
' Instantiate a correspondence dataset '
correspondence_benchmark = {'spair': spair.SPairDataset, 'pfpascal': pfpascal.PFPascalDataset, 'pfwillow': pfwillow.PFWillowDataset}
dataset = correspondence_benchmark.get(benchmark)
if (dataset is None):
raise Exception(('Invalid benchmark dataset %s.' % benchmark))
return dataset(benchmark, datapath, thres, split)
|
def download_from_google(token_id, filename):
' Download desired filename from Google drive '
print(('Downloading %s ...' % os.path.basename(filename)))
url = 'https://docs.google.com/uc?export=download'
destination = (filename + '.tar.gz')
session = requests.Session()
response = session.get(url, params={'id': token_id, 'confirm': 't'}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': token_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
file = tarfile.open(destination, 'r:gz')
print(('Extracting %s ...' % destination))
file.extractall(filename)
file.close()
os.remove(destination)
os.rename(filename, (filename + '_tmp'))
os.rename(os.path.join((filename + '_tmp'), os.path.basename(filename)), filename)
os.rmdir((filename + '_tmp'))
|
def get_confirm_token(response):
'Retrieves confirm token'
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination):
'Saves the response to the destination'
chunk_size = 32768
with open(destination, 'wb') as file:
for chunk in response.iter_content(chunk_size):
if chunk:
file.write(chunk)
|
def download_dataset(datapath, benchmark):
'Downloads semantic correspondence benchmark dataset from Google drive'
if (not os.path.isdir(datapath)):
os.mkdir(datapath)
file_data = {'spair': ('1KSvB0k2zXA06ojWNvFjBv0Ake426Y76k', 'SPair-71k'), 'pfpascal': ('1OOwpGzJnTsFXYh-YffMQ9XKM_Kl_zdzg', 'PF-PASCAL'), 'pfwillow': ('1tDP0y8RO5s45L-vqnortRaieiWENQco_', 'PF-WILLOW')}
(file_id, filename) = file_data[benchmark]
abs_filepath = os.path.join(datapath, filename)
if (not os.path.isdir(abs_filepath)):
download_from_google(file_id, abs_filepath)
|
class SPairDataset(CorrespondenceDataset):
def __init__(self, benchmark, datapath, thres, split):
' SPair-71k dataset constructor '
super(SPairDataset, self).__init__(benchmark, datapath, thres, split)
self.train_data = open(self.spt_path).read().split('\n')
self.train_data = self.train_data[:(len(self.train_data) - 1)]
self.src_imnames = list(map((lambda x: (x.split('-')[1] + '.jpg')), self.train_data))
self.trg_imnames = list(map((lambda x: (x.split('-')[2].split(':')[0] + '.jpg')), self.train_data))
self.seg_path = os.path.abspath(os.path.join(self.img_path, os.pardir, 'Segmentation'))
self.cls = os.listdir(self.img_path)
self.cls.sort()
anntn_files = []
for data_name in self.train_data:
anntn_files.append(glob.glob(('%s/%s.json' % (self.ann_path, data_name)))[0])
anntn_files = list(map((lambda x: json.load(open(x))), anntn_files))
self.src_kps = list(map((lambda x: torch.tensor(x['src_kps']).t().float()), anntn_files))
self.trg_kps = list(map((lambda x: torch.tensor(x['trg_kps']).t().float()), anntn_files))
self.src_bbox = list(map((lambda x: torch.tensor(x['src_bndbox']).float()), anntn_files))
self.trg_bbox = list(map((lambda x: torch.tensor(x['trg_bndbox']).float()), anntn_files))
self.cls_ids = list(map((lambda x: self.cls.index(x['category'])), anntn_files))
self.vpvar = list(map((lambda x: torch.tensor(x['viewpoint_variation'])), anntn_files))
self.scvar = list(map((lambda x: torch.tensor(x['scale_variation'])), anntn_files))
self.trncn = list(map((lambda x: torch.tensor(x['truncation'])), anntn_files))
self.occln = list(map((lambda x: torch.tensor(x['occlusion'])), anntn_files))
def __getitem__(self, idx):
' Construct and return a batch for SPair-71k dataset '
sample = super(SPairDataset, self).__getitem__(idx)
sample['src_mask'] = self.get_mask(sample, sample['src_imname'])
sample['trg_mask'] = self.get_mask(sample, sample['trg_imname'])
sample['src_bbox'] = self.get_bbox(self.src_bbox, idx, sample['src_imsize'])
sample['trg_bbox'] = self.get_bbox(self.trg_bbox, idx, sample['trg_imsize'])
sample['pckthres'] = self.get_pckthres(sample, sample['trg_imsize'])
sample['vpvar'] = self.vpvar[idx]
sample['scvar'] = self.scvar[idx]
sample['trncn'] = self.trncn[idx]
sample['occln'] = self.occln[idx]
return sample
def get_mask(self, sample, imname):
mask_path = os.path.join(self.seg_path, sample['category'], (imname.split('.')[0] + '.png'))
tensor_mask = torch.tensor(np.array(Image.open(mask_path)))
class_dict = {'aeroplane': 0, 'bicycle': 1, 'bird': 2, 'boat': 3, 'bottle': 4, 'bus': 5, 'car': 6, 'cat': 7, 'chair': 8, 'cow': 9, 'diningtable': 10, 'dog': 11, 'horse': 12, 'motorbike': 13, 'person': 14, 'pottedplant': 15, 'sheep': 16, 'sofa': 17, 'train': 18, 'tvmonitor': 19}
class_id = (class_dict[sample['category']] + 1)
tensor_mask[(tensor_mask != class_id)] = 0
tensor_mask[(tensor_mask == class_id)] = 255
tensor_mask = F.interpolate(tensor_mask.unsqueeze(0).unsqueeze(0).float(), size=(self.img_size, self.img_size), mode='bilinear', align_corners=True).int().squeeze()
return tensor_mask
def get_image(self, img_names, idx):
' Return image tensor '
path = os.path.join(self.img_path, self.cls[self.cls_ids[idx]], img_names[idx])
return Image.open(path).convert('RGB')
def get_pckthres(self, sample, imsize):
' Compute PCK threshold '
return super(SPairDataset, self).get_pckthres(sample, imsize)
def get_points(self, pts_list, idx, imsize):
' Return key-points of an image '
return super(SPairDataset, self).get_points(pts_list, idx, imsize)
def match_idx(self, kps, n_pts):
' Sample the nearst feature (receptive field) indices '
return super(SPairDataset, self).match_idx(kps, n_pts)
def get_bbox(self, bbox_list, idx, imsize):
' Return object bounding-box '
bbox = bbox_list[idx].clone()
bbox[0::2] *= (self.img_size / imsize[0])
bbox[1::2] *= (self.img_size / imsize[1])
return bbox
|
def conv3x3(in_planes, out_planes, stride=1):
' 3x3 convolution with padding '
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=2, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1):
' 1x1 convolution '
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, groups=2, bias=False)
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Backbone(nn.Module):
def __init__(self, block, layers, zero_init_residual=False):
super(Backbone, self).__init__()
self.inplanes = 128
self.conv1 = nn.Conv2d(6, 128, kernel_size=7, stride=2, padding=3, groups=2, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0])
self.layer2 = self._make_layer(block, 256, layers[1], stride=2)
self.layer3 = self._make_layer(block, 512, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), 1000)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
|
def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = Backbone(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
weights = model_zoo.load_url(model_urls['resnet101'])
for key in weights:
if (key.split('.')[0] == 'fc'):
weights[key] = weights[key].clone()
continue
weights[key] = torch.cat([weights[key].clone(), weights[key].clone()], dim=0)
model.load_state_dict(weights)
return model
|
class KernelGenerator():
def __init__(self, ksz, ktype):
self.ksz = ksz
self.idx4d = Geometry.init_idx4d(ksz)
self.kernel = torch.zeros((ksz, ksz, ksz, ksz)).cuda()
self.center = ((ksz // 2), (ksz // 2))
self.ktype = ktype
def quadrant(self, crd):
if (crd[0] < self.center[0]):
horz_quad = (- 1)
elif (crd[0] < self.center[0]):
horz_quad = 1
else:
horz_quad = 0
if (crd[1] < self.center[1]):
vert_quad = (- 1)
elif (crd[1] < self.center[1]):
vert_quad = 1
else:
vert_quad = 0
return (horz_quad, vert_quad)
def generate(self):
return (None if (self.ktype == 'full') else self.generate_chm_kernel())
def generate_chm_kernel(self):
param_dict = {}
for idx in self.idx4d:
(src_i, src_j, trg_i, trg_j) = idx
d_tail = Geometry.get_distance((src_i, src_j), self.center)
d_head = Geometry.get_distance((trg_i, trg_j), self.center)
d_off = Geometry.get_distance((src_i, src_j), (trg_i, trg_j))
(horz_quad, vert_quad) = self.quadrant((src_j, src_i))
src_crd = (src_i, src_j)
trg_crd = (trg_i, trg_j)
key = self.build_key(horz_quad, vert_quad, d_head, d_tail, src_crd, trg_crd, d_off)
coord1d = Geometry.get_coord1d((src_i, src_j, trg_i, trg_j), self.ksz)
if (param_dict.get(key) is None):
param_dict[key] = []
param_dict[key].append(coord1d)
return param_dict
def build_key(self, horz_quad, vert_quad, d_head, d_tail, src_crd, trg_crd, d_off):
if (self.ktype == 'iso'):
return ('%d' % d_off)
elif (self.ktype == 'psi'):
d_max = max(d_head, d_tail)
d_min = min(d_head, d_tail)
return ('%d_%d_%d' % (d_max, d_min, d_off))
else:
raise Exception('not implemented.')
|
class Correlation():
@classmethod
def mutual_nn_filter(cls, correlation_matrix, eps=1e-30):
" Mutual nearest neighbor filtering (Rocco et al. NeurIPS'18 )"
corr_src_max = torch.max(correlation_matrix, dim=2, keepdim=True)[0]
corr_trg_max = torch.max(correlation_matrix, dim=1, keepdim=True)[0]
corr_src_max[(corr_src_max == 0)] += eps
corr_trg_max[(corr_trg_max == 0)] += eps
corr_src = (correlation_matrix / corr_src_max)
corr_trg = (correlation_matrix / corr_trg_max)
return (correlation_matrix * (corr_src * corr_trg))
@classmethod
def build_correlation6d(self, src_feat, trg_feat, scales, conv2ds):
' Build 6-dimensional correlation tensor '
(bsz, _, side, side) = src_feat.size()
_src_feats = []
_trg_feats = []
for (scale, conv) in zip(scales, conv2ds):
s = ((round((side * math.sqrt(scale))),) * 2)
_src_feat = conv(resize(src_feat, s, mode='bilinear', align_corners=True))
_trg_feat = conv(resize(trg_feat, s, mode='bilinear', align_corners=True))
_src_feats.append(_src_feat)
_trg_feats.append(_trg_feat)
corr6d = []
for src_feat in _src_feats:
ch = src_feat.size(1)
src_side = src_feat.size((- 1))
src_feat = src_feat.view(bsz, ch, (- 1)).transpose(1, 2)
src_norm = src_feat.norm(p=2, dim=2, keepdim=True)
for trg_feat in _trg_feats:
trg_side = trg_feat.size((- 1))
trg_feat = trg_feat.view(bsz, ch, (- 1))
trg_norm = trg_feat.norm(p=2, dim=1, keepdim=True)
correlation = (torch.bmm(src_feat, trg_feat) / torch.bmm(src_norm, trg_norm))
correlation = correlation.view(bsz, src_side, src_side, trg_side, trg_side).contiguous()
corr6d.append(correlation)
for (idx, correlation) in enumerate(corr6d):
corr6d[idx] = Geometry.interpolate4d(correlation, [side, side])
corr6d = torch.stack(corr6d).view(len(scales), len(scales), bsz, side, side, side, side).permute(2, 0, 1, 3, 4, 5, 6)
return corr6d.clamp(min=0)
|
class CHMLearner(nn.Module):
def __init__(self, ktype, feat_dim):
super(CHMLearner, self).__init__()
self.scales = [0.5, 1, 2]
self.conv2ds = nn.ModuleList([nn.Conv2d(feat_dim, (feat_dim // 4), kernel_size=3, padding=1, bias=False) for _ in self.scales])
ksz_translation = 5
ksz_scale = 3
self.chm6d = CHM6d(1, 1, ksz_scale, ksz_translation, ktype)
self.chm4d = CHM4d(1, 1, ksz_translation, ktype, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.softplus = nn.Softplus()
def forward(self, src_feat, trg_feat):
corr = Correlation.build_correlation6d(src_feat, trg_feat, self.scales, self.conv2ds).unsqueeze(1)
(bsz, ch, s, s, h, w, h, w) = corr.size()
corr = self.chm6d(corr)
corr = self.sigmoid(corr)
corr = corr.view(bsz, (- 1), h, w, h, w).max(dim=1)[0]
corr = Geometry.interpolate4d(corr, [(h * 2), (w * 2)]).unsqueeze(1)
corr = self.chm4d(corr).squeeze(1)
corr = self.softplus(corr)
corr = Correlation.mutual_nn_filter(corr.view(bsz, (corr.size((- 1)) ** 2), (corr.size((- 1)) ** 2)).contiguous())
return corr
|
class CHMNet(nn.Module):
def __init__(self, ktype):
super(CHMNet, self).__init__()
self.backbone = backbone.resnet101(pretrained=True)
self.learner = chmlearner.CHMLearner(ktype, feat_dim=1024)
def forward(self, src_img, trg_img):
(src_feat, trg_feat) = self.extract_features(src_img, trg_img)
correlation = self.learner(src_feat, trg_feat)
return correlation
def extract_features(self, src_img, trg_img):
feat = self.backbone.conv1.forward(torch.cat([src_img, trg_img], dim=1))
feat = self.backbone.bn1.forward(feat)
feat = self.backbone.relu.forward(feat)
feat = self.backbone.maxpool.forward(feat)
for idx in range(1, 5):
feat = self.backbone.__getattr__(('layer%d' % idx))(feat)
if (idx == 3):
src_feat = feat.narrow(1, 0, (feat.size(1) // 2)).clone()
trg_feat = feat.narrow(1, (feat.size(1) // 2), (feat.size(1) // 2)).clone()
return (src_feat, trg_feat)
def training_objective(cls, prd_kps, trg_kps, npts):
l2dist = (prd_kps - trg_kps).pow(2).sum(dim=1)
loss = []
for (dist, npt) in zip(l2dist, npts):
loss.append(dist[:npt].mean())
return torch.stack(loss).mean()
|
def test(model, dataloader):
average_meter = AverageMeter(dataloader.dataset.benchmark)
model.eval()
for (idx, batch) in enumerate(dataloader):
corr_matrix = model(batch['src_img'].cuda(), batch['trg_img'].cuda())
prd_kps = Geometry.transfer_kps(corr_matrix, batch['src_kps'].cuda(), batch['n_pts'].cuda(), normalized=False)
eval_result = Evaluator.evaluate(Geometry.unnormalize_kps(prd_kps), batch)
average_meter.update(eval_result)
average_meter.write_test_process(idx, len(dataloader))
return average_meter.get_test_result()
|
def train(epoch, model, dataloader, optimizer, training):
(model.train() if training else model.eval())
average_meter = AverageMeter(dataloader.dataset.benchmark)
for (idx, batch) in enumerate(dataloader):
corr_matrix = model(batch['src_img'].cuda(), batch['trg_img'].cuda())
prd_trg_kps = Geometry.transfer_kps(corr_matrix, batch['src_kps'].cuda(), batch['n_pts'].cuda(), normalized=False)
eval_result = Evaluator.evaluate(Geometry.unnormalize_kps(prd_trg_kps), batch)
loss = model.training_objective(prd_trg_kps, Geometry.normalize_kps(batch['trg_kps'].cuda()), batch['n_pts'].cuda())
if training:
optimizer.zero_grad()
loss.backward()
optimizer.step()
average_meter.update(eval_result, loss.item())
average_meter.write_process(idx, len(dataloader), epoch)
average_meter.write_result(('Training' if training else 'Validation'), epoch)
mean = (lambda x: (sum(x) / len(x)))
avg_loss = mean(average_meter.loss_buffer)
avg_pck = mean(average_meter.buffer['pck'])
return (avg_loss, avg_pck)
|
def getSeries(df, value):
df2 = df[(df.Name == value)]
return np.asarray(df2['close'])
|
def Jitter(X, sigma=0.5):
myNoise = np.random.normal(loc=0, scale=sigma, size=X.shape)
return (X + myNoise)
|
def augment(df, n):
res = []
for i in range(0, n):
x = df.apply(Jitter, axis=1)
res.append(np.asarray(x))
return np.hstack(res)
|
def scale(path):
df = pd.read_csv(path, index_col=0)
scaled_feats = StandardScaler().fit_transform(df.values)
scaled_features_df = pd.DataFrame(scaled_feats, columns=df.columns)
return scaled_features_df
|
class UmapKmeans():
def __init__(self, n_clusters, umap_dim=2, umap_neighbors=10, umap_min_distance=float(0), umap_metric='euclidean', random_state=0):
self.n_clusters = n_clusters
self.manifold_in_embedding = umap.UMAP(random_state=random_state, metric=umap_metric, n_components=umap_dim, n_neighbors=umap_neighbors, min_dist=umap_min_distance)
self.cluster_manifold = KMeans(n_clusters=n_clusters, random_state=random_state, n_jobs=(- 1))
self.hle = None
def fit(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
def predict(self, hl):
manifold = self.manifold_in_embedding.transform(hl)
y_pred = self.cluster_manifold.predict(manifold)
return np.asarray(y_pred)
def fit_predict(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
y_pred = self.cluster_manifold.predict(self.hle)
return np.asarray(y_pred)
|
def add_noise(x, noise_factor):
x_clean = x
x_noisy = (x_clean + (noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_clean.shape)))
x_noisy = np.clip(x_noisy, 0.0, 1.0)
return x_noisy
|
class AutoEncoder():
"AutoeEncoder: standard feed forward autoencoder\n\n Parameters:\n -----------\n input_dim: int\n The number of dimensions of your input\n\n\n latent_dim: int\n The number of dimensions which you wish to represent the data as.\n\n architecture: list\n The structure of the hidden architecture of the networks. for example,\n the n2d default is [500, 500, 2000],\n which means the encoder has the structure of:\n [input_dim, 500, 500, 2000, latent_dim], and the decoder has the structure of:\n [latent_dim, 2000, 500, 500, input dim]\n\n act: string\n The activation function. Defaults to 'relu'\n "
def __init__(self, input_dim, latent_dim, architecture=[500, 500, 2000], act='relu', x_lambda=(lambda x: x)):
shape = (([input_dim] + architecture) + [latent_dim])
self.x_lambda = x_lambda
self.dims = shape
self.act = act
self.x = Input(shape=(self.dims[0],), name='input')
self.h = self.x
n_stacks = (len(self.dims) - 1)
for i in range((n_stacks - 1)):
self.h = Dense(self.dims[(i + 1)], activation=self.act, name=('encoder_%d' % i))(self.h)
self.encoder = Dense(self.dims[(- 1)], name=('encoder_%d' % (n_stacks - 1)))(self.h)
self.decoded = Dense(self.dims[(- 2)], name='decoder', activation=self.act)(self.encoder)
for i in range((n_stacks - 2), 0, (- 1)):
self.decoded = Dense(self.dims[i], activation=self.act, name=('decoder_%d' % i))(self.decoded)
self.decoded = Dense(self.dims[0], name='decoder_0')(self.decoded)
self.Model = Model(inputs=self.x, outputs=self.decoded)
self.encoder = Model(inputs=self.x, outputs=self.encoder)
def fit(self, x, batch_size, epochs, loss, optimizer, weights, verbose, weight_id, patience):
'fit: train the autoencoder.\n\n Parameters:\n -------------\n x: array-like\n the data you wish to fit\n\n batch_size: int\n the batch size\n\n epochs: int\n number of epochs you wish to run.\n\n loss: string or function\n loss function. Defaults to mse\n\n optimizer: string or function\n optimizer. defaults to adam\n\n weights: string\n if weights is used, the path to the pretrained nn weights.\n\n verbose: int\n how verbose you wish the autoencoder to be while training.\n\n weight_id: string\n where you wish to save the weights\n\n patience: int\n if not None, the early stopping criterion\n '
if (weights is None):
self.Model.compile(loss=loss, optimizer=optimizer)
if (weight_id is not None):
if (patience is not None):
callbacks = [EarlyStopping(monitor='loss', patience=patience), ModelCheckpoint(filepath=weight_id, monitor='loss', save_best_only=True)]
else:
callbacks = [ModelCheckpoint(filepath=weight_id, monitor='loss', save_best_only=True)]
self.Model.fit(self.x_lambda(x), x, batch_size=batch_size, epochs=epochs, callbacks=callbacks, verbose=verbose)
self.Model.save_weights(weight_id)
elif (patience is not None):
callbacks = [EarlyStopping(monitor='loss', patience=patience)]
self.Model.fit(self.x_lambda(x), x, batch_size=batch_size, epochs=epochs, callbacks=callbacks, verbose=verbose)
else:
self.Model.fit(self.x_lambda(x), x, batch_size=batch_size, epochs=epochs, verbose=verbose)
else:
self.Model.load_weights(weights)
|
class UmapGMM():
'\n UmapGMM: UMAP gaussian mixing\n\n Parameters:\n ------------\n n_clusters: int\n the number of clusters\n\n umap_dim: int\n number of dimensions to find with umap. Defaults to 2\n\n umap_neighbors: int\n Number of nearest neighbors to use for UMAP. Defaults to 10,\n 20 is also a reasonable choice\n\n umap_min_distance: float\n minimum distance for UMAP. Smaller means tighter clusters,\n defaults to 0\n\n umap_metric: string or function\n Distance metric for UMAP. defaults to euclidean distance\n\n random_state: int\n random state\n '
def __init__(self, n_clusters, umap_dim=2, umap_neighbors=10, umap_min_distance=float(0), umap_metric='euclidean', random_state=0):
self.n_clusters = n_clusters
self.manifold_in_embedding = umap.UMAP(random_state=random_state, metric=umap_metric, n_components=umap_dim, n_neighbors=umap_neighbors, min_dist=umap_min_distance)
self.cluster_manifold = mixture.GaussianMixture(covariance_type='full', n_components=n_clusters, random_state=random_state)
self.hle = None
def fit(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
def predict(self, hl):
manifold = self.manifold_in_embedding.transform(hl)
y_prob = self.cluster_manifold.predict_proba(manifold)
y_pred = y_prob.argmax(1)
return np.asarray(y_pred)
def predict_proba(self, hl):
manifold = self.manifold_in_embedding.transform(hl)
y_prob = self.cluster_manifold.predict_proba(manifold)
return np.asarray(y_prob)
def fit_predict(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
y_prob = self.cluster_manifold.predict_proba(self.hle)
y_pred = y_prob.argmax(1)
return np.asarray(y_pred)
|
def best_cluster_fit(y_true, y_pred):
y_true = y_true.astype(np.int64)
D = (max(y_pred.max(), y_true.max()) + 1)
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[(y_pred[i], y_true[i])] += 1
ind = la.linear_assignment((w.max() - w))
best_fit = []
for i in range(y_pred.size):
for j in range(len(ind)):
if (ind[j][0] == y_pred[i]):
best_fit.append(ind[j][1])
return (best_fit, ind, w)
|
def cluster_acc(y_true, y_pred):
(_, ind, w) = best_cluster_fit(y_true, y_pred)
return ((sum([w[(i, j)] for (i, j) in ind]) * 1.0) / y_pred.size)
|
def plot(x, y, plot_id, names=None, n_clusters=10):
viz_df = pd.DataFrame(data=x[:5000])
viz_df['Label'] = y[:5000]
if (names is not None):
viz_df['Label'] = viz_df['Label'].map(names)
plt.subplots(figsize=(8, 5))
sns.scatterplot(x=0, y=1, hue='Label', legend='full', hue_order=sorted(viz_df['Label'].unique()), palette=sns.color_palette('hls', n_colors=n_clusters), alpha=0.5, data=viz_df)
l = plt.legend(bbox_to_anchor=((- 0.1), 1.0, 1.1, 0.5), loc='lower left', markerfirst=True, mode='expand', borderaxespad=0, ncol=(n_clusters + 1), handletextpad=0.01)
l.texts[0].set_text('')
plt.ylabel('')
plt.xlabel('')
plt.tight_layout()
plt.title(plot_id, pad=40)
|
class n2d():
'\n n2d: Class for n2d\n\n Parameters:\n ------------\n\n input_dim: int\n dimensions of input\n\n manifold_learner: initialized class, such as UmapGMM\n the manifold learner and clustering algorithm. Class should have at\n least fit and predict methods. Needs to be initialized\n\n autoencoder: class\n class of the autoencoder. Defaults to standard AutoEncoder class.\n Class must have a fit method, and be structured similar to the example\n on read the docs. At the very least, the embedding must be accessible\n by name (encoder_%d % middle layer index)\n\n architecture: list\n hidden architecture of the autoencoder. Defaults to [500,500,2000],\n meaning that the encoder is [input_dim, 500, 500, 2000, ae_dim], and\n the decoder is [ae_dim, 2000, 500, 500, input_dim].\n\n ae_dim: int\n number of dimensions you wish the autoencoded embedding to be.\n Defaults to 10. It is reasonable to set this to the number of clusters\n\n ae_args: dict\n dictionary of arguments for the autoencoder. Defaults to just\n setting the activation function to relu\n '
def __init__(self, autoencoder, manifold_learner):
self.autoencoder = autoencoder
self.manifold_learner = manifold_learner
self.encoder = self.autoencoder.encoder
self.clusterer = self.manifold_learner.cluster_manifold
self.manifolder = self.manifold_learner.manifold_in_embedding
self.preds = None
self.probs = None
self.hle = None
def fit(self, x, batch_size=256, epochs=1000, loss='mse', optimizer='adam', weights=None, verbose=1, weight_id=None, patience=None):
'fit: train the autoencoder.\n\n Parameters:\n -----------------\n x: array-like\n the input data\n\n batch_size: int\n the batch size\n\n epochs: int\n number of epochs you wish to run.\n\n loss: string or function\n loss function. Defaults to mse\n\n optimizer: string or function\n optimizer. defaults to adam\n\n weights: string\n if weights is used, the path to the pretrained nn weights.\n\n verbose: int\n how verbose you wish the autoencoder to be while training.\n\n weight_id: string\n where you wish to save the weights\n\n patience: int or None\n if patience is None, do nothing special, otherwise patience is the\n early stopping criteria\n\n\n '
self.autoencoder.fit(x=x, batch_size=batch_size, epochs=epochs, loss=loss, optimizer=optimizer, weights=weights, verbose=verbose, weight_id=weight_id, patience=patience)
hl = self.encoder.predict(x)
self.manifold_learner.fit(hl)
def predict(self, x):
hl = self.encoder.predict(x)
self.preds = self.manifold_learner.predict(hl)
self.hle = self.manifold_learner.hle
return self.preds
def predict_proba(self, x):
hl = self.encoder.predict(x)
self.probs = self.manifold_learner.predict_proba(hl)
self.hle = self.manifold_learner.hle
return self.probs
def fit_predict(self, x, batch_size=256, epochs=1000, loss='mse', optimizer='adam', weights=None, verbose=1, weight_id=None, patience=None):
self.autoencoder.fit(x=x, batch_size=batch_size, epochs=epochs, loss=loss, optimizer=optimizer, weights=weights, verbose=verbose, weight_id=weight_id, patience=patience)
hl = self.encoder.predict(x)
self.preds = self.manifold_learner.fit_predict(hl)
self.hle = self.manifold_learner.hle
return self.preds
def assess(self, y):
y = np.asarray(y)
acc = np.round(cluster_acc(y, self.preds), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, self.preds), 5)
ari = np.round(metrics.adjusted_rand_score(y, self.preds), 5)
return (acc, nmi, ari)
def visualize(self, y, names, n_clusters=10):
'\n visualize: visualize the embedding and clusters\n\n Parameters:\n -----------\n\n y: true clusters/labels, if they exist. Numeric\n names: the names of the clusters, if they exist\n savePath: path to save figures\n n_clusters: number of clusters.\n '
y = np.asarray(y)
y_pred = np.asarray(self.preds)
hle = self.hle
plot(hle, y, 'n2d', names, n_clusters=n_clusters)
(y_pred_viz, _, _) = best_cluster_fit(y, y_pred)
plot(hle, y_pred_viz, 'n2d-predicted', names, n_clusters=n_clusters)
|
def save_n2d(obj, encoder_id, manifold_id):
'\n save_n2d: save n2d objects\n --------------------------\n\n description: Saves the encoder to an h5 file and the manifold learner/clusterer\n to a pickle.\n\n parameters:\n\n - obj: the fitted n2d object\n - encoder_id: what to save the encoder as\n - manifold_id: what to save the manifold learner as\n '
obj.encoder.save(encoder_id)
pickle.dump(obj.manifold_learner, open(manifold_id, 'wb'))
|
def load_n2d(encoder_id, manifold_id):
'\n load_n2d: load n2d objects\n --------------------------\n\n description: loads fitted n2d objects from files. Note you CANNOT train\n these objects further, the only method which will perform correctly is `.predict`\n\n parameters:\n\n - encoder_id: where the encoder is stored\n - manifold_id: where the manifold learner/clusterer is stored\n '
man = pickle.load(open(manifold_id, 'rb'))
out = n2d(10, man)
out.encoder = load_model(encoder_id, compile=False)
return out
|
class manifold_cluster_generator(N2D.UmapGMM):
def __init__(self, manifold_class, manifold_args, cluster_class, cluster_args):
self.manifold_in_embedding = manifold_class(**manifold_args)
self.cluster_manifold = cluster_class(**cluster_args)
proba = getattr(self.cluster_manifold, 'predict_proba', None)
self.proba = callable(proba)
self.hle = None
def fit(self, hl):
super().fit(hl)
def predict(self, hl):
if self.proba:
super().predict(hl)
else:
manifold = self.manifold_in_embedding.transform(hl)
y_pred = self.cluster_manifold.predict(manifold)
return np.asarray(y_pred)
def fit_predict(self, hl):
if self.proba:
super().fit_predict(hl)
else:
self.hle = self.manifold_in_embedding.fit_transform(hl)
y_pred = self.cluster_manifold.fit_predict(self.hle)
return np.asarray(y_pred)
def predict_proba(self, hl):
if self.proba:
super().predict_proba(hl)
else:
print('Your clusterer cannot predict probabilities')
|
class autoencoder_generator(N2D.AutoEncoder):
def __init__(self, model_levels=(), x_lambda=(lambda x: x)):
self.Model = Model(model_levels[0], model_levels[2])
self.encoder = Model(model_levels[0], model_levels[1])
self.x_lambda = x_lambda
def fit(self, x, batch_size, epochs, loss, optimizer, weights, verbose, weight_id, patience):
super().fit(x, batch_size, epochs, loss, optimizer, weights, verbose, weight_id, patience)
|
class SetTransformer(nn.Module):
def __init__(self, dim_input=3, num_outputs=1, dim_output=40, num_inds=32, dim_hidden=128, num_heads=4, ln=False):
super(SetTransformer, self).__init__()
self.enc = nn.Sequential(ISAB(dim_input, dim_hidden, num_heads, num_inds, ln=ln), ISAB(dim_hidden, dim_hidden, num_heads, num_inds, ln=ln))
self.dec = nn.Sequential(nn.Dropout(), PMA(dim_hidden, num_heads, num_outputs, ln=ln), nn.Dropout(), nn.Linear(dim_hidden, dim_output))
def forward(self, X):
return self.dec(self.enc(X)).squeeze()
|
def gen_data(batch_size, max_length=10, test=False):
length = np.random.randint(1, (max_length + 1))
x = np.random.randint(1, 100, (batch_size, length))
y = np.max(x, axis=1)
(x, y) = (np.expand_dims(x, axis=2), np.expand_dims(y, axis=1))
return (x, y)
|
class SmallDeepSet(nn.Module):
def __init__(self, pool='max'):
super().__init__()
self.enc = nn.Sequential(nn.Linear(in_features=1, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=64))
self.dec = nn.Sequential(nn.Linear(in_features=64, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=1))
self.pool = pool
def forward(self, x):
x = self.enc(x)
if (self.pool == 'max'):
x = x.max(dim=1)[0]
elif (self.pool == 'mean'):
x = x.mean(dim=1)
elif (self.pool == 'sum'):
x = x.sum(dim=1)
x = self.dec(x)
return x
|
class SmallSetTransformer(nn.Module):
def __init__(self):
super().__init__()
self.enc = nn.Sequential(SAB(dim_in=1, dim_out=64, num_heads=4), SAB(dim_in=64, dim_out=64, num_heads=4))
self.dec = nn.Sequential(PMA(dim=64, num_heads=4, num_seeds=1), nn.Linear(in_features=64, out_features=1))
def forward(self, x):
x = self.enc(x)
x = self.dec(x)
return x.squeeze((- 1))
|
def train(model):
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.L1Loss().cuda()
losses = []
for _ in range(500):
(x, y) = gen_data(batch_size=(2 ** 10), max_length=10)
(x, y) = (torch.from_numpy(x).float().cuda(), torch.from_numpy(y).float().cuda())
loss = criterion(model(x), y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
return losses
|
class MultivariateNormal(object):
def __init__(self, dim):
self.dim = dim
def sample(self, B, K, labels):
raise NotImplementedError
def log_prob(self, X, params):
raise NotImplementedError
def stats(self):
raise NotImplementedError
def parse(self, raw):
raise NotImplementedError
|
class MixtureOfMVNs(object):
def __init__(self, mvn):
self.mvn = mvn
def sample(self, B, N, K, return_gt=False):
device = ('cpu' if (not torch.cuda.is_available()) else torch.cuda.current_device())
pi = Dirichlet(torch.ones(K)).sample(torch.Size([B])).to(device)
labels = Categorical(probs=pi).sample(torch.Size([N])).to(device)
labels = labels.transpose(0, 1).contiguous()
(X, params) = self.mvn.sample(B, K, labels)
if return_gt:
return (X, labels, pi, params)
else:
return X
def log_prob(self, X, pi, params, return_labels=False):
ll = self.mvn.log_prob(X, params)
ll = (ll + (pi + 1e-10).log().unsqueeze((- 2)))
if return_labels:
labels = ll.argmax((- 1))
return (ll.logsumexp((- 1)).mean(), labels)
else:
return ll.logsumexp((- 1)).mean()
def plot(self, X, labels, params, axes):
(mu, cov) = self.mvn.stats(params)
for (i, ax) in enumerate(axes.flatten()):
scatter_mog(X[i].cpu().data.numpy(), labels[i].cpu().data.numpy(), mu[i].cpu().data.numpy(), cov[i].cpu().data.numpy(), ax=ax)
ax.set_xticks([])
ax.set_yticks([])
plt.subplots_adjust(hspace=0.1, wspace=0.1)
def parse(self, raw):
return self.mvn.parse(raw)
|
class DeepSet(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output, dim_hidden=128):
super(DeepSet, self).__init__()
self.num_outputs = num_outputs
self.dim_output = dim_output
self.enc = nn.Sequential(nn.Linear(dim_input, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden))
self.dec = nn.Sequential(nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, (num_outputs * dim_output)))
def forward(self, X):
X = self.enc(X).mean((- 2))
X = self.dec(X).reshape((- 1), self.num_outputs, self.dim_output)
return X
|
class SetTransformer(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output, num_inds=32, dim_hidden=128, num_heads=4, ln=False):
super(SetTransformer, self).__init__()
self.enc = nn.Sequential(ISAB(dim_input, dim_hidden, num_heads, num_inds, ln=ln), ISAB(dim_hidden, dim_hidden, num_heads, num_inds, ln=ln))
self.dec = nn.Sequential(PMA(dim_hidden, num_heads, num_outputs, ln=ln), SAB(dim_hidden, dim_hidden, num_heads, ln=ln), SAB(dim_hidden, dim_hidden, num_heads, ln=ln), nn.Linear(dim_hidden, dim_output))
def forward(self, X):
return self.dec(self.enc(X))
|
class MAB(nn.Module):
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
super(MAB, self).__init__()
self.dim_V = dim_V
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_Q, dim_V)
self.fc_k = nn.Linear(dim_K, dim_V)
self.fc_v = nn.Linear(dim_K, dim_V)
if ln:
self.ln0 = nn.LayerNorm(dim_V)
self.ln1 = nn.LayerNorm(dim_V)
self.fc_o = nn.Linear(dim_V, dim_V)
def forward(self, Q, K):
Q = self.fc_q(Q)
(K, V) = (self.fc_k(K), self.fc_v(K))
dim_split = (self.dim_V // self.num_heads)
Q_ = torch.cat(Q.split(dim_split, 2), 0)
K_ = torch.cat(K.split(dim_split, 2), 0)
V_ = torch.cat(V.split(dim_split, 2), 0)
A = torch.softmax((Q_.bmm(K_.transpose(1, 2)) / math.sqrt(self.dim_V)), 2)
O = torch.cat((Q_ + A.bmm(V_)).split(Q.size(0), 0), 2)
O = (O if (getattr(self, 'ln0', None) is None) else self.ln0(O))
O = (O + F.relu(self.fc_o(O)))
O = (O if (getattr(self, 'ln1', None) is None) else self.ln1(O))
return O
|
class SAB(nn.Module):
def __init__(self, dim_in, dim_out, num_heads, ln=False):
super(SAB, self).__init__()
self.mab = MAB(dim_in, dim_in, dim_out, num_heads, ln=ln)
def forward(self, X):
return self.mab(X, X)
|
class ISAB(nn.Module):
def __init__(self, dim_in, dim_out, num_heads, num_inds, ln=False):
super(ISAB, self).__init__()
self.I = nn.Parameter(torch.Tensor(1, num_inds, dim_out))
nn.init.xavier_uniform_(self.I)
self.mab0 = MAB(dim_out, dim_in, dim_out, num_heads, ln=ln)
self.mab1 = MAB(dim_in, dim_out, dim_out, num_heads, ln=ln)
def forward(self, X):
H = self.mab0(self.I.repeat(X.size(0), 1, 1), X)
return self.mab1(X, H)
|
class PMA(nn.Module):
def __init__(self, dim, num_heads, num_seeds, ln=False):
super(PMA, self).__init__()
self.S = nn.Parameter(torch.Tensor(1, num_seeds, dim))
nn.init.xavier_uniform_(self.S)
self.mab = MAB(dim, dim, dim, num_heads, ln=ln)
def forward(self, X):
return self.mab(self.S.repeat(X.size(0), 1, 1), X)
|
def generate_benchmark():
if (not os.path.isdir('benchmark')):
os.makedirs('benchmark')
N_list = np.random.randint(N_min, N_max, args.num_bench)
data = []
ll = 0.0
for N in tqdm(N_list):
(X, labels, pi, params) = mog.sample(B, N, K, return_gt=True)
ll += mog.log_prob(X, pi, params).item()
data.append(X)
bench = [data, (ll / args.num_bench)]
torch.save(bench, benchfile)
|
def train():
if (not os.path.isdir(save_dir)):
os.makedirs(save_dir)
if (not os.path.isfile(benchfile)):
generate_benchmark()
bench = torch.load(benchfile)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(args.run_name)
logger.addHandler(logging.FileHandler(os.path.join(save_dir, (('train_' + time.strftime('%Y%m%d-%H%M')) + '.log')), mode='w'))
logger.info((str(args) + '\n'))
optimizer = optim.Adam(net.parameters(), lr=args.lr)
tick = time.time()
for t in range(1, (args.num_steps + 1)):
if (t == int((0.5 * args.num_steps))):
optimizer.param_groups[0]['lr'] *= 0.1
net.train()
optimizer.zero_grad()
N = np.random.randint(N_min, N_max)
X = mog.sample(B, N, K)
ll = mog.log_prob(X, *mvn.parse(net(X)))
loss = (- ll)
loss.backward()
optimizer.step()
if ((t % args.test_freq) == 0):
line = 'step {}, lr {:.3e}, '.format(t, optimizer.param_groups[0]['lr'])
line += test(bench, verbose=False)
line += ' ({:.3f} secs)'.format((time.time() - tick))
tick = time.time()
logger.info(line)
if ((t % args.save_freq) == 0):
torch.save({'state_dict': net.state_dict()}, os.path.join(save_dir, 'model.tar'))
torch.save({'state_dict': net.state_dict()}, os.path.join(save_dir, 'model.tar'))
|
def test(bench, verbose=True):
net.eval()
(data, oracle_ll) = bench
avg_ll = 0.0
for X in data:
X = X.cuda()
avg_ll += mog.log_prob(X, *mvn.parse(net(X))).item()
avg_ll /= len(data)
line = 'test ll {:.4f} (oracle {:.4f})'.format(avg_ll, oracle_ll)
if verbose:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(args.run_name)
logger.addHandler(logging.FileHandler(os.path.join(save_dir, 'test.log'), mode='w'))
logger.info(line)
return line
|
def plot():
net.eval()
X = mog.sample(B, np.random.randint(N_min, N_max), K)
(pi, params) = mvn.parse(net(X))
(ll, labels) = mog.log_prob(X, pi, params, return_labels=True)
(fig, axes) = plt.subplots(2, (B // 2), figsize=(((7 * B) // 5), 5))
mog.plot(X, labels, params, axes)
plt.show()
|
class Logger():
'Writes results of training/testing'
@classmethod
def initialize(cls, args):
logtime = datetime.datetime.now().__format__('_%m%d_%H%M%S')
logpath = args.logpath
cls.logpath = os.path.join('logs', ((logpath + logtime) + '.log'))
cls.benchmark = args.benchmark
os.makedirs(cls.logpath)
logging.basicConfig(filemode='w', filename=os.path.join(cls.logpath, 'log.txt'), level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
cls.tbd_writer = SummaryWriter(os.path.join(cls.logpath, 'tbd/runs'))
logging.info('\n+=========== Dynamic Hyperpixel Flow ============+')
for arg_key in args.__dict__:
logging.info(('| %20s: %-24s |' % (arg_key, str(args.__dict__[arg_key]))))
logging.info('+================================================+\n')
@classmethod
def info(cls, msg):
'Writes message to .txt'
logging.info(msg)
@classmethod
def save_model(cls, model, epoch, val_pck):
torch.save(model.state_dict(), os.path.join(cls.logpath, 'best_model.pt'))
cls.info(('Model saved @%d w/ val. PCK: %5.2f.\n' % (epoch, val_pck)))
@classmethod
def visualize_selection(cls, catwise_sel):
'Visualize (class-wise) layer selection frequency'
if (cls.benchmark == 'pfpascal'):
sort_ids = [17, 8, 10, 19, 4, 15, 0, 3, 6, 5, 18, 13, 1, 14, 12, 2, 11, 7, 16, 9]
elif (cls.benchmark == 'pfwillow'):
sort_ids = np.arange(10)
elif (cls.benchmark == 'caltech'):
sort_ids = np.arange(101)
elif (cls.benchmark == 'spair'):
sort_ids = np.arange(18)
for key in catwise_sel:
catwise_sel[key] = torch.stack(catwise_sel[key]).mean(dim=0).cpu().numpy()
category = np.array(list(catwise_sel.keys()))[sort_ids]
values = np.array(list(catwise_sel.values()))[sort_ids]
cols = list(range(values.shape[1]))
df = pd.DataFrame(values, index=category, columns=cols)
plt.pcolor(df, vmin=0.0, vmax=1.0)
plt.gca().set_aspect('equal')
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 5), df.columns[::5])
plt.tight_layout()
plt.savefig(('%s/selected_layers.jpg' % cls.logpath))
|
class AverageMeter():
'Stores loss, evaluation results, selected layers'
def __init__(self, benchamrk):
'Constructor of AverageMeter'
if (benchamrk == 'caltech'):
self.buffer_keys = ['ltacc', 'iou']
else:
self.buffer_keys = ['pck']
self.buffer = {}
for key in self.buffer_keys:
self.buffer[key] = []
self.sel_buffer = {}
self.loss_buffer = []
def update(self, eval_result, layer_sel, category, loss=None):
for key in self.buffer_keys:
self.buffer[key] += eval_result[key]
for (sel, cls) in zip(layer_sel, category):
if (self.sel_buffer.get(cls) is None):
self.sel_buffer[cls] = []
self.sel_buffer[cls] += [sel]
if (loss is not None):
self.loss_buffer.append(loss)
def write_result(self, split, epoch=(- 1)):
msg = ('\n*** %s ' % split)
msg += (('[@Epoch %02d] ' % epoch) if (epoch > (- 1)) else '')
if (len(self.loss_buffer) > 0):
msg += ('Loss: %5.2f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('%s: %6.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
msg += '***\n'
Logger.info(msg)
def write_process(self, batch_idx, datalen, epoch=(- 1)):
msg = (('[Epoch: %02d] ' % epoch) if (epoch > (- 1)) else '')
msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
if (len(self.loss_buffer) > 0):
msg += ('Loss: %6.2f ' % self.loss_buffer[(- 1)])
msg += ('Avg Loss: %6.5f ' % (sum(self.loss_buffer) / len(self.loss_buffer)))
for key in self.buffer_keys:
msg += ('Avg %s: %6.2f ' % (key.upper(), (sum(self.buffer[key]) / len(self.buffer[key]))))
Logger.info(msg)
|
class SupervisionStrategy(ABC):
'Different strategies for methods:'
@abstractmethod
def get_image_pair(self, batch, *args):
pass
@abstractmethod
def get_correlation(self, correlation_matrix):
pass
@abstractmethod
def compute_loss(self, correlation_matrix, *args):
pass
|
class StrongSupStrategy(SupervisionStrategy):
def get_image_pair(self, batch, *args):
'Returns (semantically related) pairs for strongly-supervised training'
return (batch['src_img'], batch['trg_img'])
def get_correlation(self, correlation_matrix):
"Returns correlation matrices of 'ALL PAIRS' in a batch"
return correlation_matrix.clone().detach()
def compute_loss(self, correlation_matrix, *args):
'Strongly-supervised matching loss (L_{match})'
easy_match = args[0]['easy_match']
hard_match = args[0]['hard_match']
layer_sel = args[1]
batch = args[2]
loss_cre = Objective.weighted_cross_entropy(correlation_matrix, easy_match, hard_match, batch)
loss_sel = Objective.layer_selection_loss(layer_sel)
loss_net = (loss_cre + loss_sel)
return loss_net
|
class WeakSupStrategy(SupervisionStrategy):
def get_image_pair(self, batch, *args):
'Forms positive/negative image paris for weakly-supervised training'
training = args[0]
self.bsz = len(batch['src_img'])
if training:
shifted_idx = np.roll(np.arange(self.bsz), (- 1))
trg_img_neg = batch['trg_img'][shifted_idx].clone()
trg_cls_neg = batch['category_id'][shifted_idx].clone()
neg_subidx = ((batch['category_id'] - trg_cls_neg) != 0)
src_img = torch.cat([batch['src_img'], batch['src_img'][neg_subidx]], dim=0)
trg_img = torch.cat([batch['trg_img'], trg_img_neg[neg_subidx]], dim=0)
self.num_negatives = neg_subidx.sum()
else:
(src_img, trg_img) = (batch['src_img'], batch['trg_img'])
self.num_negatives = 0
return (src_img, trg_img)
def get_correlation(self, correlation_matrix):
"Returns correlation matrices of 'POSITIVE PAIRS' in a batch"
return correlation_matrix[:self.bsz].clone().detach()
def compute_loss(self, correlation_matrix, *args):
'Weakly-supervised matching loss (L_{match})'
layer_sel = args[1]
loss_pos = Objective.information_entropy(correlation_matrix[:self.bsz])
loss_neg = (Objective.information_entropy(correlation_matrix[self.bsz:]) if (self.num_negatives > 0) else 1.0)
loss_sel = Objective.layer_selection_loss(layer_sel)
loss_net = ((loss_pos / loss_neg) + loss_sel)
return loss_net
|
def fix_randseed(seed):
'Fixes random seed for reproducibility'
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
|
def mean(x):
'Computes average of a list'
return ((sum(x) / len(x)) if (len(x) > 0) else 0.0)
|
def where(predicate):
'Predicate must be a condition on nd-tensor'
matching_indices = predicate.nonzero()
if (len(matching_indices) != 0):
matching_indices = matching_indices.t().squeeze(0)
return matching_indices
|
class CorrespondenceDataset(Dataset):
'Parent class of PFPascal, PFWillow, Caltech, and SPair'
def __init__(self, benchmark, datapath, thres, device, split):
'CorrespondenceDataset constructor'
super(CorrespondenceDataset, self).__init__()
self.metadata = {'pfwillow': ('PF-WILLOW', 'test_pairs.csv', '', '', 'bbox'), 'pfpascal': ('PF-PASCAL', '_pairs.csv', 'JPEGImages', 'Annotations', 'img'), 'caltech': ('Caltech-101', 'test_pairs_caltech_with_category.csv', '101_ObjectCategories', '', ''), 'spair': ('SPair-71k', 'Layout/large', 'JPEGImages', 'PairAnnotation', 'bbox')}
base_path = os.path.join(os.path.abspath(datapath), self.metadata[benchmark][0])
if (benchmark == 'pfpascal'):
self.spt_path = os.path.join(base_path, (split + '_pairs.csv'))
elif (benchmark == 'spair'):
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1], (split + '.txt'))
else:
self.spt_path = os.path.join(base_path, self.metadata[benchmark][1])
self.img_path = os.path.join(base_path, self.metadata[benchmark][2])
if (benchmark == 'spair'):
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3], split)
else:
self.ann_path = os.path.join(base_path, self.metadata[benchmark][3])
if (benchmark == 'caltech'):
self.max_pts = 400
else:
self.max_pts = 40
self.split = split
self.device = device
self.imside = 240
self.benchmark = benchmark
self.range_ts = torch.arange(self.max_pts)
self.thres = (self.metadata[benchmark][4] if (thres == 'auto') else thres)
self.transform = transforms.Compose([transforms.Resize((self.imside, self.imside)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.train_data = []
self.src_imnames = []
self.trg_imnames = []
self.cls = []
self.cls_ids = []
self.src_kps = []
self.trg_kps = []
def __len__(self):
'Returns the number of pairs'
return len(self.train_data)
def __getitem__(self, idx):
'Constructs and return a batch'
batch = dict()
batch['src_imname'] = self.src_imnames[idx]
batch['trg_imname'] = self.trg_imnames[idx]
batch['category_id'] = self.cls_ids[idx]
batch['category'] = self.cls[batch['category_id']]
src_pil = self.get_image(self.src_imnames, idx)
trg_pil = self.get_image(self.trg_imnames, idx)
batch['src_imsize'] = src_pil.size
batch['trg_imsize'] = trg_pil.size
batch['src_img'] = self.transform(src_pil).to(self.device)
batch['trg_img'] = self.transform(trg_pil).to(self.device)
(batch['src_kps'], num_pts) = self.get_points(self.src_kps, idx, src_pil.size)
(batch['trg_kps'], _) = self.get_points(self.trg_kps, idx, trg_pil.size)
batch['n_pts'] = torch.tensor(num_pts)
batch['datalen'] = len(self.train_data)
return batch
def get_image(self, imnames, idx):
'Reads PIL image from path'
path = os.path.join(self.img_path, imnames[idx])
return Image.open(path).convert('RGB')
def get_pckthres(self, batch, imsize):
'Computes PCK threshold'
if (self.thres == 'bbox'):
bbox = batch['trg_bbox'].clone()
bbox_w = (bbox[2] - bbox[0])
bbox_h = (bbox[3] - bbox[1])
pckthres = torch.max(bbox_w, bbox_h)
elif (self.thres == 'img'):
imsize_t = batch['trg_img'].size()
pckthres = torch.tensor(max(imsize_t[1], imsize_t[2]))
else:
raise Exception(('Invalid pck threshold type: %s' % self.thres))
return pckthres.float().to(self.device)
def get_points(self, pts_list, idx, org_imsize):
'Returns key-points of an image with size of (240,240)'
(xy, n_pts) = pts_list[idx].size()
pad_pts = (torch.zeros((xy, (self.max_pts - n_pts))) - 1)
x_crds = (pts_list[idx][0] * (self.imside / org_imsize[0]))
y_crds = (pts_list[idx][1] * (self.imside / org_imsize[1]))
kps = torch.cat([torch.stack([x_crds, y_crds]), pad_pts], dim=1).to(self.device)
return (kps, n_pts)
def match_idx(self, kps, n_pts):
'Samples the nearst feature (receptive field) indices'
nearest_idx = find_knn(Geometry.rf_center, kps.t())
nearest_idx -= (self.range_ts >= n_pts).to(self.device).long()
return nearest_idx
|
def find_knn(db_vectors, qr_vectors):
'Finds K-nearest neighbors (Euclidean distance)'
db = db_vectors.unsqueeze(1).repeat(1, qr_vectors.size(0), 1)
qr = qr_vectors.unsqueeze(0).repeat(db_vectors.size(0), 1, 1)
dist = (db - qr).pow(2).sum(2).pow(0.5).t()
(_, nearest_idx) = dist.min(dim=1)
return nearest_idx
|
def load_dataset(benchmark, datapath, thres, device, split='test'):
'Instantiates desired correspondence dataset'
correspondence_benchmark = {'pfpascal': pfpascal.PFPascalDataset, 'pfwillow': pfwillow.PFWillowDataset, 'caltech': caltech.CaltechDataset, 'spair': spair.SPairDataset}
dataset = correspondence_benchmark.get(benchmark)
if (dataset is None):
raise Exception(('Invalid benchmark dataset %s.' % benchmark))
return dataset(benchmark, datapath, thres, device, split)
|
def download_from_google(token_id, filename):
'Downloads desired filename from Google drive'
print(('Downloading %s ...' % os.path.basename(filename)))
url = 'https://docs.google.com/uc?export=download'
destination = (filename + '.tar.gz')
session = requests.Session()
response = session.get(url, params={'id': token_id, 'confirm': 't'}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': token_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
file = tarfile.open(destination, 'r:gz')
print(('Extracting %s ...' % destination))
file.extractall(filename)
file.close()
os.remove(destination)
os.rename(filename, (filename + '_tmp'))
os.rename(os.path.join((filename + '_tmp'), os.path.basename(filename)), filename)
os.rmdir((filename + '_tmp'))
|
def get_confirm_token(response):
'Retrieves confirm token'
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination):
'Saves the response to the destination'
chunk_size = 32768
with open(destination, 'wb') as file:
for chunk in response.iter_content(chunk_size):
if chunk:
file.write(chunk)
|
def download_dataset(datapath, benchmark):
'Downloads semantic correspondence benchmark dataset from Google drive'
if (not os.path.isdir(datapath)):
os.mkdir(datapath)
file_data = {'pfwillow': ('1tDP0y8RO5s45L-vqnortRaieiWENQco_', 'PF-WILLOW'), 'pfpascal': ('1OOwpGzJnTsFXYh-YffMQ9XKM_Kl_zdzg', 'PF-PASCAL'), 'caltech': ('1IV0E5sJ6xSdDyIvVSTdZjPHELMwGzsMn', 'Caltech-101'), 'spair': ('1KSvB0k2zXA06ojWNvFjBv0Ake426Y76k', 'SPair-71k')}
(file_id, filename) = file_data[benchmark]
abs_filepath = os.path.join(datapath, filename)
if (not os.path.isdir(abs_filepath)):
download_from_google(file_id, abs_filepath)
|
class SPairDataset(CorrespondenceDataset):
'Inherits CorrespondenceDataset'
def __init__(self, benchmark, datapath, thres, device, split):
'SPair-71k dataset constructor'
super(SPairDataset, self).__init__(benchmark, datapath, thres, device, split)
self.train_data = open(self.spt_path).read().split('\n')
self.train_data = self.train_data[:(len(self.train_data) - 1)]
self.src_imnames = list(map((lambda x: (x.split('-')[1] + '.jpg')), self.train_data))
self.trg_imnames = list(map((lambda x: (x.split('-')[2].split(':')[0] + '.jpg')), self.train_data))
self.cls = os.listdir(self.img_path)
self.cls.sort()
anntn_files = []
for data_name in self.train_data:
anntn_files.append(glob.glob(('%s/%s.json' % (self.ann_path, data_name)))[0])
anntn_files = list(map((lambda x: json.load(open(x))), anntn_files))
self.src_kps = list(map((lambda x: torch.tensor(x['src_kps']).t().float()), anntn_files))
self.trg_kps = list(map((lambda x: torch.tensor(x['trg_kps']).t().float()), anntn_files))
self.src_bbox = list(map((lambda x: torch.tensor(x['src_bndbox']).float()), anntn_files))
self.trg_bbox = list(map((lambda x: torch.tensor(x['trg_bndbox']).float()), anntn_files))
self.cls_ids = list(map((lambda x: self.cls.index(x['category'])), anntn_files))
self.vpvar = list(map((lambda x: torch.tensor(x['viewpoint_variation'])), anntn_files))
self.scvar = list(map((lambda x: torch.tensor(x['scale_variation'])), anntn_files))
self.trncn = list(map((lambda x: torch.tensor(x['truncation'])), anntn_files))
self.occln = list(map((lambda x: torch.tensor(x['occlusion'])), anntn_files))
def __getitem__(self, idx):
'Constructs and return a batch for SPair-71k dataset'
batch = super(SPairDataset, self).__getitem__(idx)
batch['src_bbox'] = self.get_bbox(self.src_bbox, idx, batch['src_imsize'])
batch['trg_bbox'] = self.get_bbox(self.trg_bbox, idx, batch['trg_imsize'])
batch['pckthres'] = self.get_pckthres(batch, batch['trg_imsize'])
batch['src_kpidx'] = self.match_idx(batch['src_kps'], batch['n_pts'])
batch['trg_kpidx'] = self.match_idx(batch['trg_kps'], batch['n_pts'])
batch['vpvar'] = self.vpvar[idx]
batch['scvar'] = self.scvar[idx]
batch['trncn'] = self.trncn[idx]
batch['occln'] = self.occln[idx]
return batch
def get_image(self, img_names, idx):
'Returns image tensor'
path = os.path.join(self.img_path, self.cls[self.cls_ids[idx]], img_names[idx])
return Image.open(path).convert('RGB')
def get_bbox(self, bbox_list, idx, imsize):
'Returns object bounding-box'
bbox = bbox_list[idx].clone()
bbox[0::2] *= (self.imside / imsize[0])
bbox[1::2] *= (self.imside / imsize[1])
return bbox.to(self.device)
|
class Correlation():
@classmethod
def bmm_interp(cls, src_feat, trg_feat, interp_size):
'Performs batch-wise matrix-multiplication after interpolation'
src_feat = F.interpolate(src_feat, interp_size, mode='bilinear', align_corners=True)
trg_feat = F.interpolate(trg_feat, interp_size, mode='bilinear', align_corners=True)
src_feat = src_feat.view(src_feat.size(0), src_feat.size(1), (- 1)).transpose(1, 2)
trg_feat = trg_feat.view(trg_feat.size(0), trg_feat.size(1), (- 1))
return torch.bmm(src_feat, trg_feat)
@classmethod
def mutual_nn_filter(cls, correlation_matrix):
"Mutual nearest neighbor filtering (Rocco et al. NeurIPS'18)"
corr_src_max = torch.max(correlation_matrix, dim=2, keepdim=True)[0]
corr_trg_max = torch.max(correlation_matrix, dim=1, keepdim=True)[0]
corr_src_max[(corr_src_max == 0)] += 1e-30
corr_trg_max[(corr_trg_max == 0)] += 1e-30
corr_src = (correlation_matrix / corr_src_max)
corr_trg = (correlation_matrix / corr_trg_max)
return (correlation_matrix * (corr_src * corr_trg))
|
class Norm():
'Vector normalization'
@classmethod
def feat_normalize(cls, x, interp_size):
'L2-normalizes given 2D feature map after interpolation'
x = F.interpolate(x, interp_size, mode='bilinear', align_corners=True)
return x.pow(2).sum(1).view(x.size(0), (- 1))
@classmethod
def l1normalize(cls, x):
'L1-normalization'
vector_sum = torch.sum(x, dim=2, keepdim=True)
vector_sum[(vector_sum == 0)] = 1.0
return (x / vector_sum)
@classmethod
def unit_gaussian_normalize(cls, x):
'Make each (row) distribution into unit gaussian'
correlation_matrix = (x - x.mean(dim=2).unsqueeze(2).expand_as(x))
with torch.no_grad():
standard_deviation = correlation_matrix.std(dim=2)
standard_deviation[(standard_deviation == 0)] = 1.0
correlation_matrix /= standard_deviation.unsqueeze(2).expand_as(correlation_matrix)
return correlation_matrix
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=2, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, groups=2, bias=False)
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Backbone(nn.Module):
def __init__(self, block, layers, zero_init_residual=False):
super(Backbone, self).__init__()
self.inplanes = 128
self.conv1 = nn.Conv2d(6, 128, kernel_size=7, stride=2, padding=3, groups=2, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0])
self.layer2 = self._make_layer(block, 256, layers[1], stride=2)
self.layer3 = self._make_layer(block, 512, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), 1000)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
|
def resnet50(pretrained=False, **kwargs):
'Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = Backbone(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
weights = model_zoo.load_url(model_urls['resnet50'])
for key in weights:
if (key.split('.')[0] == 'fc'):
weights[key] = weights[key].clone()
continue
weights[key] = torch.cat([weights[key].clone(), weights[key].clone()], dim=0)
model.load_state_dict(weights)
return model
|
def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = Backbone(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
weights = model_zoo.load_url(model_urls['resnet101'])
for key in weights:
if (key.split('.')[0] == 'fc'):
weights[key] = weights[key].clone()
continue
weights[key] = torch.cat([weights[key].clone(), weights[key].clone()], dim=0)
model.load_state_dict(weights)
return model
|
class DynamicHPF():
'Dynamic Hyperpixel Flow (DHPF)'
def __init__(self, backbone, device, img_side=240):
'Constructor for DHPF'
super(DynamicHPF, self).__init__()
if (backbone == 'resnet50'):
self.backbone = resnet.resnet50(pretrained=True).to(device)
self.in_channels = [64, 256, 256, 256, 512, 512, 512, 512, 1024, 1024, 1024, 1024, 1024, 1024, 2048, 2048, 2048]
nbottlenecks = [3, 4, 6, 3]
elif (backbone == 'resnet101'):
self.backbone = resnet.resnet101(pretrained=True).to(device)
self.in_channels = [64, 256, 256, 256, 512, 512, 512, 512, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 2048, 2048, 2048]
nbottlenecks = [3, 4, 23, 3]
else:
raise Exception(('Unavailable backbone: %s' % backbone))
self.bottleneck_ids = reduce(add, list(map((lambda x: list(range(x))), nbottlenecks)))
self.layer_ids = reduce(add, [([(i + 1)] * x) for (i, x) in enumerate(nbottlenecks)])
self.backbone.eval()
self.learner = gating.GumbelFeatureSelection(self.in_channels).to(device)
self.relu = nn.ReLU()
self.upsample_size = ([int((img_side / 4))] * 2)
Geometry.initialize(self.upsample_size, device)
self.rhm = rhm.HoughMatching(Geometry.rfs, torch.tensor([img_side, img_side]).to(device))
def __call__(self, *args, **kwargs):
src_img = args[0]
trg_img = args[1]
(correlation_matrix, layer_sel) = self.hyperimage_correlation(src_img, trg_img)
with torch.no_grad():
geometric_scores = torch.stack([self.rhm.run(c.clone().detach()) for c in correlation_matrix], dim=0)
correlation_matrix *= geometric_scores
return (correlation_matrix, layer_sel)
def hyperimage_correlation(self, src_img, trg_img):
'Dynamically construct hyperimages and compute their correlations'
layer_sel = []
(correlation, src_norm, trg_norm) = (0, 0, 0)
pair_img = torch.cat([src_img, trg_img], dim=1)
with torch.no_grad():
feat = self.backbone.conv1.forward(pair_img)
feat = self.backbone.bn1.forward(feat)
feat = self.backbone.relu.forward(feat)
feat = self.backbone.maxpool.forward(feat)
src_feat = feat.narrow(1, 0, (feat.size(1) // 2)).clone()
trg_feat = feat.narrow(1, (feat.size(1) // 2), (feat.size(1) // 2)).clone()
base_src_feat = self.learner.reduction_ffns[0](src_feat)
base_trg_feat = self.learner.reduction_ffns[0](trg_feat)
base_correlation = Correlation.bmm_interp(base_src_feat, base_trg_feat, self.upsample_size)
base_src_norm = Norm.feat_normalize(base_src_feat, self.upsample_size)
base_trg_norm = Norm.feat_normalize(base_trg_feat, self.upsample_size)
(src_feat, trg_feat, lsel) = self.learner(0, src_feat, trg_feat)
if ((src_feat is not None) and (trg_feat is not None)):
correlation += Correlation.bmm_interp(src_feat, trg_feat, self.upsample_size)
src_norm += Norm.feat_normalize(src_feat, self.upsample_size)
trg_norm += Norm.feat_normalize(trg_feat, self.upsample_size)
layer_sel.append(lsel)
for (hid, (bid, lid)) in enumerate(zip(self.bottleneck_ids, self.layer_ids)):
with torch.no_grad():
res = feat
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].conv1.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].bn1.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].conv2.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].bn2.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].conv3.forward(feat)
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].bn3.forward(feat)
if (bid == 0):
res = self.backbone.__getattr__(('layer%d' % lid))[bid].downsample.forward(res)
feat += res
src_feat = feat.narrow(1, 0, (feat.size(1) // 2)).clone()
trg_feat = feat.narrow(1, (feat.size(1) // 2), (feat.size(1) // 2)).clone()
(src_feat, trg_feat, lsel) = self.learner((hid + 1), src_feat, trg_feat)
if ((src_feat is not None) and (trg_feat is not None)):
correlation += Correlation.bmm_interp(src_feat, trg_feat, self.upsample_size)
src_norm += Norm.feat_normalize(src_feat, self.upsample_size)
trg_norm += Norm.feat_normalize(trg_feat, self.upsample_size)
layer_sel.append(lsel)
with torch.no_grad():
feat = self.backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat)
layer_sel = torch.stack(layer_sel).t()
if ((layer_sel.sum(dim=1) == 0).sum() > 0):
empty_sel = (layer_sel.sum(dim=1) == 0).nonzero().view((- 1)).long()
if (src_img.size(0) == 1):
correlation = base_correlation
src_norm = base_src_norm
trg_norm = base_trg_norm
else:
correlation[empty_sel] += base_correlation[empty_sel]
src_norm[empty_sel] += base_src_norm[empty_sel]
trg_norm[empty_sel] += base_trg_norm[empty_sel]
if self.learner.training:
src_norm[(src_norm == 0.0)] += 0.0001
trg_norm[(trg_norm == 0.0)] += 0.0001
src_norm = src_norm.pow(0.5).unsqueeze(2)
trg_norm = trg_norm.pow(0.5).unsqueeze(1)
correlation_ts = self.relu((correlation / (torch.bmm(src_norm, trg_norm) + 0.001))).pow(2)
return (correlation_ts, layer_sel)
def parameters(self):
return self.learner.parameters()
def state_dict(self):
return self.learner.state_dict()
def load_state_dict(self, state_dict):
self.learner.load_state_dict(state_dict)
def eval(self):
self.learner.eval()
def train(self):
self.learner.train()
|
class Objective():
'Provides training objectives of DHPF'
@classmethod
def initialize(cls, target_rate, alpha):
cls.softmax = torch.nn.Softmax(dim=1)
cls.target_rate = target_rate
cls.alpha = alpha
cls.eps = 1e-30
@classmethod
def weighted_cross_entropy(cls, correlation_matrix, easy_match, hard_match, batch):
'Computes sum of weighted cross-entropy values between ground-truth and prediction'
loss_buf = correlation_matrix.new_zeros(correlation_matrix.size(0))
correlation_matrix = Norm.unit_gaussian_normalize(correlation_matrix)
for (idx, (ct, thres, npt)) in enumerate(zip(correlation_matrix, batch['pckthres'], batch['n_pts'])):
if (len(hard_match['src'][idx]) > 0):
cross_ent = cls.cross_entropy(ct, hard_match['src'][idx], hard_match['trg'][idx])
loss_buf[idx] += cross_ent.sum()
if (len(easy_match['src'][idx]) > 0):
cross_ent = cls.cross_entropy(ct, easy_match['src'][idx], easy_match['trg'][idx])
smooth_weight = (easy_match['dist'][idx] / (thres * cls.alpha)).pow(2)
loss_buf[idx] += (smooth_weight * cross_ent).sum()
loss_buf[idx] /= npt
return torch.mean(loss_buf)
@classmethod
def cross_entropy(cls, correlation_matrix, src_match, trg_match):
'Cross-entropy between predicted pdf and ground-truth pdf (one-hot vector)'
pdf = cls.softmax(correlation_matrix.index_select(0, src_match))
prob = pdf[(range(len(trg_match)), trg_match)]
cross_ent = (- torch.log((prob + cls.eps)))
return cross_ent
@classmethod
def information_entropy(cls, correlation_matrix, rescale_factor=4):
'Computes information entropy of all candidate matches'
bsz = correlation_matrix.size(0)
correlation_matrix = Correlation.mutual_nn_filter(correlation_matrix)
side = int(math.sqrt(correlation_matrix.size(1)))
new_side = (side // rescale_factor)
trg2src_dist = correlation_matrix.view(bsz, (- 1), side, side)
src2trg_dist = correlation_matrix.view(bsz, side, side, (- 1)).permute(0, 3, 1, 2)
trg2src_dist = F.interpolate(trg2src_dist, [new_side, new_side], mode='bilinear', align_corners=True)
src2trg_dist = F.interpolate(src2trg_dist, [new_side, new_side], mode='bilinear', align_corners=True)
src_pdf = Norm.l1normalize(trg2src_dist.view(bsz, (- 1), (new_side * new_side)))
trg_pdf = Norm.l1normalize(src2trg_dist.view(bsz, (- 1), (new_side * new_side)))
src_pdf[(src_pdf == 0.0)] = cls.eps
trg_pdf[(trg_pdf == 0.0)] = cls.eps
src_ent = (- (src_pdf * torch.log2(src_pdf)).sum(dim=2)).view(bsz, (- 1))
trg_ent = (- (trg_pdf * torch.log2(trg_pdf)).sum(dim=2)).view(bsz, (- 1))
score_net = ((src_ent + trg_ent).mean(dim=1) / 2)
return score_net.mean()
@classmethod
def layer_selection_loss(cls, layer_sel):
'Encourages model to select each layer at a certain rate'
return (layer_sel.mean(dim=0) - cls.target_rate).pow(2).sum()
|
def test(model, dataloader):
'Code for testing DHPF'
average_meter = AverageMeter(dataloader.dataset.benchmark)
for (idx, batch) in enumerate(dataloader):
(correlation_matrix, layer_sel) = model(batch['src_img'], batch['trg_img'])
prd_kps = Geometry.transfer_kps(correlation_matrix, batch['src_kps'], batch['n_pts'])
eval_result = Evaluator.evaluate(prd_kps, batch)
average_meter.update(eval_result, layer_sel.detach(), batch['category'])
average_meter.write_process(idx, len(dataloader))
Logger.visualize_selection(average_meter.sel_buffer)
average_meter.write_result('Test')
|
def train(epoch, model, dataloader, strategy, optimizer, training):
'Code for training DHPF'
(model.train() if training else model.eval())
average_meter = AverageMeter(dataloader.dataset.benchmark)
for (idx, batch) in enumerate(dataloader):
(src_img, trg_img) = strategy.get_image_pair(batch, training)
(correlation_matrix, layer_sel) = model(src_img, trg_img)
prd_kps = Geometry.transfer_kps(strategy.get_correlation(correlation_matrix), batch['src_kps'], batch['n_pts'])
eval_result = Evaluator.evaluate(prd_kps, batch)
loss = strategy.compute_loss(correlation_matrix, eval_result, layer_sel, batch)
if training:
optimizer.zero_grad()
loss.backward()
optimizer.step()
average_meter.update(eval_result, layer_sel.detach(), batch['category'], loss.item())
average_meter.write_process(idx, len(dataloader), epoch)
average_meter.write_result(('Training' if training else 'Validation'), epoch)
avg_loss = utils.mean(average_meter.loss_buffer)
avg_pck = utils.mean(average_meter.buffer['pck'])
return (avg_loss, avg_pck)
|
def align_and_split(files):
for f in tqdm(files):
get_ipython().system('/notebooks/MotionCor2_1.3.0-Cuda101 -InMrc {f} -OutMrc tmp/aligned.mrc -Patch 5 5 5 -OutStack 1 >> motioncor2.log')
aligned_stack = mrcfile.open(glob('tmp/*_Stk.mrc')[0], permissive=True)
save_mrc(join(data_path, 'even', basename(f)), np.sum(aligned_stack.data[::2], axis=0), pixel_spacing)
save_mrc(join(data_path, 'odd', basename(f)), np.sum(aligned_stack.data[1::2], axis=0), pixel_spacing)
remove_files('tmp', extension='.mrc')
|
def copy_etomo_files(src, name, target):
if exists(join(src, (name + 'local.xf'))):
cp(join(src, (name + 'local.xf')), target)
cp(join(src, (name + '.xf')), target)
cp(join(src, 'eraser.com'), target)
cp(join(src, 'ctfcorrection.com'), target)
cp(join(src, 'tilt.com'), target)
cp(join(src, 'newst.com'), target)
cp(join(src, (name + '.xtilt')), target)
cp(join(src, (name + '.tlt')), target)
cp(join(src, (name + '.defocus')), target)
cp(join(src, 'rotation.xf'), target)
|
def augment(x, y):
rot_k = np.random.randint(0, 4, x.shape[0])
X = x.copy()
Y = y.copy()
for i in range(X.shape[0]):
if (np.random.rand() < 0.5):
X[i] = np.rot90(x[i], k=rot_k[i], axes=(0, 2))
Y[i] = np.rot90(y[i], k=rot_k[i], axes=(0, 2))
else:
X[i] = np.rot90(y[i], k=rot_k[i], axes=(0, 2))
Y[i] = np.rot90(x[i], k=rot_k[i], axes=(0, 2))
return (X, Y)
|
class CryoDataWrapper(Sequence):
def __init__(self, X, Y, batch_size):
(self.X, self.Y) = (X, Y)
self.batch_size = batch_size
self.perm = np.random.permutation(len(self.X))
def __len__(self):
return int(np.ceil((len(self.X) / float(self.batch_size))))
def on_epoch_end(self):
self.perm = np.random.permutation(len(self.X))
def __getitem__(self, i):
idx = slice((i * self.batch_size), ((i + 1) * self.batch_size))
idx = self.perm[idx]
return self.__augment__(self.X[idx], self.Y[idx])
def __augment__(self, x, y):
return augment(x, y)
|
class CryoCARE(CARE):
def train(self, X, Y, validation_data, epochs=None, steps_per_epoch=None):
'Train the neural network with the given data.\n Parameters\n ----------\n X : :class:`numpy.ndarray`\n Array of source images.\n Y : :class:`numpy.ndarray`\n Array of target images.\n validation_data : tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`)\n Tuple of arrays for source and target validation images.\n epochs : int\n Optional argument to use instead of the value from ``config``.\n steps_per_epoch : int\n Optional argument to use instead of the value from ``config``.\n Returns\n -------\n ``History`` object\n See `Keras training history <https://keras.io/models/model/#fit>`_.\n '
((isinstance(validation_data, (list, tuple)) and (len(validation_data) == 2)) or _raise(ValueError('validation_data must be a pair of numpy arrays')))
(n_train, n_val) = (len(X), len(validation_data[0]))
frac_val = ((1.0 * n_val) / (n_train + n_val))
frac_warn = 0.05
if (frac_val < frac_warn):
warnings.warn(('small number of validation images (only %.1f%% of all images)' % (100 * frac_val)))
axes = axes_check_and_normalize(('S' + self.config.axes), X.ndim)
ax = axes_dict(axes)
for (a, div_by) in zip(axes, self._axes_div_by(axes)):
n = X.shape[ax[a]]
if ((n % div_by) != 0):
raise ValueError(('training images must be evenly divisible by %d along axis %s (which has incompatible size %d)' % (div_by, a, n)))
if (epochs is None):
epochs = self.config.train_epochs
if (steps_per_epoch is None):
steps_per_epoch = self.config.train_steps_per_epoch
if (not self._model_prepared):
self.prepare_for_training()
training_data = CryoDataWrapper(X, Y, self.config.train_batch_size)
history = self.keras_model.fit_generator(generator=training_data, validation_data=validation_data, epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=self.callbacks, verbose=1)
if (self.basedir is not None):
self.keras_model.save_weights(str((self.logdir / 'weights_last.h5')))
if (self.config.train_checkpoint is not None):
print()
self._find_and_load_weights(self.config.train_checkpoint)
try:
(self.logdir / 'weights_now.h5').unlink()
except FileNotFoundError:
pass
return history
|
@contextmanager
def cd(newdir):
'Context manager to temporarily change the working directory'
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
(yield)
finally:
os.chdir(prevdir)
|
def save_mrc(path, data, pixel_spacing):
'\n Save data in a mrc-file and set the pixel spacing with the `alterheader` command from IMOD.\n\n Parameters\n ----------\n path : str\n Path of the new file.\n data : array(float)\n The data to save.\n pixel_spacing : float\n The pixel spacing in Angstrom.\n '
mrc = mrcfile.open(path, mode='w+')
mrc.set_data(data)
mrc.close()
cmd = ['alterheader', '-del', '{0},{0},{0}'.format(pixel_spacing), path]
result = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
result.check_returncode()
|
def remove_files(dir, extension='.mrc'):
"\n Removes all files in a directory with the given extension.\n\n Parameters\n ----------\n dir : str\n The directory to clean.\n extension : str\n The file extension. Default: ``'.mrc'``\n "
files = glob(join(dir, ('*' + extension)))
for f in files:
os.remove(f)
|
def modify_newst(path, bin_factor):
'\n Modifies the bin-factor of a given newst.com file.\n\n Note: This will overwrite the file!\n\n Parameters\n ----------\n path : str\n Path to the newst.com file.\n bin_factor : int\n The new bin-factor.\n '
f = open(path, 'r')
content = [l.strip() for l in f]
f.close()
bin_fac_idx = [i for (i, s) in enumerate(content) if ('BinByFactor' in s)][0]
content[bin_fac_idx] = ('BinByFactor ' + str(bin_factor))
f = open(path, 'w')
for l in content:
f.writelines(('%s\n' % l))
print(l)
f.close()
|
def modify_ctfcorrection(path, bin_factor, pixel_spacing):
'\n Modifies the bin-factor of a given ctfcorrection.com file.\n\n Note: This will overwrite the file!\n\n Parameters\n ----------\n path : str\n Path to the ctfcorrection.com file.\n bin_factor : int\n The new bin-factor.\n pixel_spacing : float\n The pixel-spacing of the input tilt-angles in Angstrom.\n '
f = open(path, 'r')
content = [l.strip() for l in f]
f.close()
ps_idx = [i for (i, s) in enumerate(content) if ('PixelSize' in s)][0]
content[ps_idx] = ('PixelSize ' + str(np.round((bin_factor * pixel_spacing), decimals=3)))
f = open(path, 'w')
for l in content:
f.writelines(('%s\n' % l))
print(l)
f.close()
|
def modify_tilt(path, bin_factor, exclude_angles=[]):
'\n Modifies the bin-factor and exclude-angles of a given tilt.com file.\n\n Note: This will overwrite the file!\n\n Parameters\n ----------\n path : str\n Path to the tilt.com file.\n bin_factor : int\n The new bin-factor.\n exclude_angles : List<int>\n List of the tilt-angles to exclude during reconstruction. Default: ``[]``\n '
f = open(path, 'r')
content = [l.strip() for l in f]
f.close()
if (not ('UseGPU 0' in content)):
content.insert((len(content) - 1), 'UseGPU 0')
binned_idx = [i for (i, s) in enumerate(content) if ('IMAGEBINNED' in s)][0]
content[binned_idx] = ('IMAGEBINNED ' + str(bin_factor))
if (len(exclude_angles) > 0):
exclude_idx = [i for (i, s) in enumerate(content) if ('EXCLUDELIST2 ' in s)]
if (len(exclude_idx) == 0):
exclude_idx = (len(content) - 1)
else:
exclude_idx = exclude_idx[0]
content[exclude_idx] = ('EXCLUDELIST2 ' + str(exclude_angles)[1:(- 1)])
f = open(path, 'w')
for l in content:
f.writelines(('%s\n' % l))
print(l)
f.close()
|
def modify_com_scripts(path, bin_factor, pixel_spacing, exclude_angles=[]):
'\n Modifies the bin-factor and exclude-angles of the newst.com, ctfcorrection.com and tilt.com scripts.\n\n Note: This will overwrite the files!\n\n Parameters\n ----------\n path : str\n Path to the parent directory of the scripts.\n bin_factor : int\n The new bin-factor.\n pixel_spacing : float\n The pixel-spacing of the input tilt-angles in Angstrom.\n exclude_angles : List<int>\n List of the tilt-angles to exclude during reconstruction. Default: ``[]``\n '
print("Modified 'newst.com' file:")
modify_newst(join(path, 'newst.com'), bin_factor)
print('')
print('------------------------------------------------------------------------')
print('')
print("Modified 'ctfcorrection.com' file:")
modify_ctfcorrection(join(path, 'ctfcorrection.com'), bin_factor, pixel_spacing)
print('')
print('------------------------------------------------------------------------')
print('')
print("Modified 'tilt.com' file:")
modify_tilt(join(path, 'tilt.com'), bin_factor, exclude_angles)
|
def reconstruct_tomo(path, name, dfix, init, volt=300, rotate_X=True):
'\n Reconstruct a tomogram with IMOD-com scripts. This also applies mtffilter after ctfcorrection.\n\n A reconstruction log will be placed in the reconstruction-directory.\n\n Parameters\n ----------\n path : str\n Path to the reconstruction-directory.\n name : str\n Name of the tomogram (the prefix).\n dfix : float\n dfixed parameter of mtffilter: Fixed dose for each image of the input file, in electrons/square Angstrom\n init : float\n initial parameter of mtffilter: Dose applied before any of the images in the input file were taken\n volt : int\n volt parameter of mtffilter. Microscope voltage in kV; must be either 200 or 300. Default: ``300``\n rotate_X : bool\n If the reconstructed tomogram should be rotated 90 degree about X. Default: ``True``\n '
with cd(path):
mrc_files = glob('*.mrc')
mrc_files.sort()
with open((name + '_reconstruction.log'), 'a') as log:
cmd = ((['newstack'] + mrc_files) + [(name + '.st')])
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['submfg', 'eraser.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
mv((name + '.st'), (name + '_orig.st'))
mv((name + '_fixed.st'), (name + '.st'))
cmd = ['submfg', 'newst.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['submfg', 'ctfcorrection.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['mtffilter', '-dfixed', str(dfix), '-initial', str(init), '-volt', str(volt), (name + '_ctfcorr.ali'), (name + '.ali')]
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['submfg', 'tilt.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
if rotate_X:
cmd = ['trimvol', '-rx', (name + '_full.rec'), (name + '.rec')]
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
else:
print('mv {0}_full.rec {0}.rec'.format(name))
mv((name + '_full.rec'), (name + '.rec'))
os.remove((name + '.st'))
os.remove((name + '_full.rec'))
mv((name + '_orig.st'), (name + '.st'))
os.remove((name + '.ali'))
os.remove((name + '_ctfcorr.ali'))
tomName = (name + '.rec')
split_name = os.path.basename(os.path.normpath(path))
tomRename = (((name + '_') + split_name) + '.rec')
mv(tomName, tomRename)
|
def parse_layers(layer_ids):
'Parse list of layer ids (int) into string format'
layer_str = ''.join(list(map((lambda x: ('%d,' % x)), layer_ids)))[:(- 1)]
layer_str = (('(' + layer_str) + ')')
return layer_str
|
def find_topk(membuf, kval):
'Return top-k performance along with layer combinations'
membuf.sort(key=(lambda x: x[0]), reverse=True)
return membuf[:kval]
|
def log_evaluation(layers, score, elapsed):
'Log a single evaluation result'
logging.info(('%20s: %4.2f %% %5.1f sec' % (layers, score, elapsed)))
|
def log_selected(depth, membuf_topk):
'Log selected layers at each depth'
logging.info((' ===================== Depth %d =====================' % depth))
for (score, layers) in membuf_topk:
logging.info(('%20s: %4.2f %%' % (layers, score)))
logging.info(' ====================================================')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.