file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
src/lib/utils/tracker.py | Python | import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from numba import jit
import copy
class Tracker(object):
def __init__(self, opt):
self.opt = opt
self.reset()
def init_track(self, results):
for item in results:
if item['score'] > self.opt.new_thresh:
self.id_count += 1
# active and age are never used in the paper
item['active'] = 1
item['age'] = 1
item['tracking_id'] = self.id_count
if not ('ct' in item):
bbox = item['bbox']
item['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
self.tracks.append(item)
def reset(self):
self.id_count = 0
self.tracks = []
def step(self, results, public_det=None):
N = len(results)
M = len(self.tracks)
dets = np.array(
[det['ct'] + det['tracking'] for det in results], np.float32) # N x 2
track_size = np.array([((track['bbox'][2] - track['bbox'][0]) * \
(track['bbox'][3] - track['bbox'][1])) \
for track in self.tracks], np.float32) # M
track_cat = np.array([track['class'] for track in self.tracks], np.int32) # M
item_size = np.array([((item['bbox'][2] - item['bbox'][0]) * \
(item['bbox'][3] - item['bbox'][1])) \
for item in results], np.float32) # N
item_cat = np.array([item['class'] for item in results], np.int32) # N
tracks = np.array(
[pre_det['ct'] for pre_det in self.tracks], np.float32) # M x 2
dist = (((tracks.reshape(1, -1, 2) - \
dets.reshape(-1, 1, 2)) ** 2).sum(axis=2)) # N x M
invalid = ((dist > track_size.reshape(1, M)) + \
(dist > item_size.reshape(N, 1)) + \
(item_cat.reshape(N, 1) != track_cat.reshape(1, M))) > 0
dist = dist + invalid * 1e18
if self.opt.hungarian:
item_score = np.array([item['score'] for item in results], np.float32) # N
dist[dist > 1e18] = 1e18
matched_indices = linear_assignment(dist)
else:
matched_indices = greedy_assignment(copy.deepcopy(dist))
unmatched_dets = [d for d in range(dets.shape[0]) \
if not (d in matched_indices[:, 0])]
unmatched_tracks = [d for d in range(tracks.shape[0]) \
if not (d in matched_indices[:, 1])]
if self.opt.hungarian:
matches = []
for m in matched_indices:
if dist[m[0], m[1]] > 1e16:
unmatched_dets.append(m[0])
unmatched_tracks.append(m[1])
else:
matches.append(m)
matches = np.array(matches).reshape(-1, 2)
else:
matches = matched_indices
ret = []
for m in matches:
track = results[m[0]]
track['tracking_id'] = self.tracks[m[1]]['tracking_id']
track['age'] = 1
track['active'] = self.tracks[m[1]]['active'] + 1
ret.append(track)
if self.opt.public_det and len(unmatched_dets) > 0:
# Public detection: only create tracks from provided detections
pub_dets = np.array([d['ct'] for d in public_det], np.float32)
dist3 = ((dets.reshape(-1, 1, 2) - pub_dets.reshape(1, -1, 2)) ** 2).sum(
axis=2)
matched_dets = [d for d in range(dets.shape[0]) \
if not (d in unmatched_dets)]
dist3[matched_dets] = 1e18
for j in range(len(pub_dets)):
i = dist3[:, j].argmin()
if dist3[i, j] < item_size[i]:
dist3[i, :] = 1e18
track = results[i]
if track['score'] > self.opt.new_thresh:
self.id_count += 1
track['tracking_id'] = self.id_count
track['age'] = 1
track['active'] = 1
ret.append(track)
else:
# Private detection: create tracks for all un-matched detections
for i in unmatched_dets:
track = results[i]
if track['score'] > self.opt.new_thresh:
self.id_count += 1
track['tracking_id'] = self.id_count
track['age'] = 1
track['active'] = 1
ret.append(track)
for i in unmatched_tracks:
track = self.tracks[i]
if track['age'] < self.opt.max_age:
track['age'] += 1
track['active'] = 0
bbox = track['bbox']
ct = track['ct']
v = [0, 0]
track['bbox'] = [
bbox[0] + v[0], bbox[1] + v[1],
bbox[2] + v[0], bbox[3] + v[1]]
track['ct'] = [ct[0] + v[0], ct[1] + v[1]]
ret.append(track)
self.tracks = ret
return ret
def greedy_assignment(dist):
matched_indices = []
if dist.shape[1] == 0:
return np.array(matched_indices, np.int32).reshape(-1, 2)
for i in range(dist.shape[0]):
j = dist[i].argmin()
if dist[i][j] < 1e16:
dist[:, j] = 1e18
matched_indices.append([i, j])
return np.array(matched_indices, np.int32).reshape(-1, 2)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/utils/utils.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count > 0:
self.avg = self.sum / self.count | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/main.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from model.model import create_model, load_model, save_model
from model.data_parallel import DataParallel
from logger import Logger
from dataset.dataset_factory import get_dataset
from trainer import Trainer
def get_optimizer(opt, model):
if opt.optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
elif opt.optim == 'sgd':
print('Using SGD')
optimizer = torch.optim.SGD(
model.parameters(), opt.lr, momentum=0.9, weight_decay=0.0001)
else:
assert 0, opt.optim
return optimizer
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
if not opt.not_set_cuda_env:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
logger = Logger(opt)
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
optimizer = get_optimizer(opt, model)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, opt, optimizer)
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
if opt.val_intervals < opt.num_epochs or opt.test:
print('Setting up validation data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'), batch_size=1, shuffle=False, num_workers=1,
pin_memory=True)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
print('Setting up train data...')
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'), batch_size=opt.batch_size, shuffle=True,
num_workers=opt.num_workers, pin_memory=True, drop_last=True
)
print('Starting training...')
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
if opt.eval_val:
val_loader.dataset.run_eval(preds, opt.save_dir)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.save_point:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
if epoch in opt.lr_step:
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/test.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import cv2
import numpy as np
import time
from progress.bar import Bar
import torch
import copy
from opts import opts
from logger import Logger
from utils.utils import AverageMeter
from dataset.dataset_factory import dataset_factory
from detector import Detector
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.load_image_func = dataset.coco.loadImgs
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.get_default_calib = dataset.get_default_calib
self.opt = opt
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.load_image_func(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
image = cv2.imread(img_path)
images, meta = {}, {}
for scale in opt.test_scales:
input_meta = {}
calib = img_info['calib'] if 'calib' in img_info \
else self.get_default_calib(image.shape[1], image.shape[0])
input_meta['calib'] = calib
images[scale], meta[scale] = self.pre_process_func(
image, scale, input_meta)
ret = {'images': images, 'image': image, 'meta': meta}
if 'frame_id' in img_info and img_info['frame_id'] == 1:
ret['is_first_frame'] = 1
ret['video_id'] = img_info['video_id']
return img_id, ret
def __len__(self):
return len(self.images)
def prefetch_test(opt):
if not opt.not_set_cuda_env:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.test_dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
split = 'val' if not opt.trainval else 'test'
dataset = Dataset(opt, split)
detector = Detector(opt)
if opt.load_results != '':
load_results = json.load(open(opt.load_results, 'r'))
for img_id in load_results:
for k in range(len(load_results[img_id])):
if load_results[img_id][k]['class'] - 1 in opt.ignore_loaded_cats:
load_results[img_id][k]['score'] = -1
else:
load_results = {}
data_loader = torch.utils.data.DataLoader(
PrefetchDataset(opt, dataset, detector.pre_process),
batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
results = {}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'track']
avg_time_stats = {t: AverageMeter() for t in time_stats}
if opt.use_loaded_results:
for img_id in data_loader.dataset.images:
results[img_id] = load_results['{}'.format(img_id)]
num_iters = 0
for ind, (img_id, pre_processed_images) in enumerate(data_loader):
if ind >= num_iters:
break
if opt.tracking and ('is_first_frame' in pre_processed_images):
if '{}'.format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
pre_processed_images['meta']['pre_dets'] = \
load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
else:
print()
print('No pre_dets for', int(img_id.numpy().astype(np.int32)[0]),
'. Use empty initialization.')
pre_processed_images['meta']['pre_dets'] = []
detector.reset_tracking()
print('Start tracking video', int(pre_processed_images['video_id']))
if opt.public_det:
if '{}'.format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
pre_processed_images['meta']['cur_dets'] = \
load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
else:
print('No cur_dets for', int(img_id.numpy().astype(np.int32)[0]))
pre_processed_images['meta']['cur_dets'] = []
ret = detector.run(pre_processed_images)
results[int(img_id.numpy().astype(np.int32)[0])] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
t, tm = avg_time_stats[t])
if opt.print_iter > 0:
if ind % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
bar.finish()
if opt.save_results:
print('saving results to', opt.save_dir + '/save_results_{}{}.json'.format(
opt.test_dataset, opt.dataset_version))
json.dump(_to_list(copy.deepcopy(results)),
open(opt.save_dir + '/save_results_{}{}.json'.format(
opt.test_dataset, opt.dataset_version), 'w'))
dataset.run_eval(results, opt.save_dir)
def test(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.test_dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
split = 'val' if not opt.trainval else 'test'
dataset = Dataset(opt, split)
detector = Detector(opt)
if opt.load_results != '': # load results in json
load_results = json.load(open(opt.load_results, 'r'))
results = {}
num_iters = len(dataset) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
avg_time_stats = {t: AverageMeter() for t in time_stats}
for ind in range(num_iters):
img_id = dataset.images[ind]
img_info = dataset.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(dataset.img_dir, img_info['file_name'])
input_meta = {}
if 'calib' in img_info:
input_meta['calib'] = img_info['calib']
if (opt.tracking and ('frame_id' in img_info) and img_info['frame_id'] == 1):
detector.reset_tracking()
input_meta['pre_dets'] = load_results[img_id]
ret = detector.run(img_path, input_meta)
results[img_id] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg)
bar.next()
bar.finish()
if opt.save_results:
print('saving results to', opt.save_dir + '/save_results_{}{}.json'.format(
opt.test_dataset, opt.dataset_version))
json.dump(_to_list(copy.deepcopy(results)),
open(opt.save_dir + '/save_results_{}{}.json'.format(
opt.test_dataset, opt.dataset_version), 'w'))
dataset.run_eval(results, opt.save_dir)
def _to_list(results):
for img_id in results:
for t in range(len(results[img_id])):
for k in results[img_id][t]:
if isinstance(results[img_id][t][k], (np.ndarray, np.float32)):
results[img_id][t][k] = results[img_id][t][k].tolist()
return results
if __name__ == '__main__':
opt = opts().parse()
if opt.not_prefetch_test:
test(opt)
else:
prefetch_test(opt)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/_init_paths.py | Python | import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, '../lib')
add_path(lib_path)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/annot_bbox.py | Python | import os
import sys
import json
import cv2
import argparse
import numpy as np
image_ext = ['jpg', 'jpeg', 'png', 'webp']
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', default='')
parser.add_argument('--save_path', default='')
MAX_CACHE = 20
CAT_NAMES = ['cat']
def _sort_expt(pts):
t, l, b, r = 0, 0, 0, 0
for i in range(4):
if pts[i][0] < pts[l][0]:
l = i
if pts[i][1] < pts[t][1]:
t = i
if pts[i][0] > pts[r][0]:
r = i
if pts[i][1] > pts[b][1]:
b = i
ret = [pts[t], pts[l], pts[b], pts[r]]
return ret
def _expt2bbox(expt):
expt = np.array(expt, dtype=np.int32)
bbox = [int(expt[:, 0].min()), int(expt[:, 1].min()),
int(expt[:, 0].max()), int(expt[:, 1].max())]
return bbox
def save_txt(txt_name, pts_cls):
ret = []
for i in range(len(pts_cls)):
ret.append(np.array(pts_cls[i][:4], dtype=np.int32).reshape(8).tolist() \
+ [pts_cls[i][4]])
np.savetxt(txt_name, np.array(ret, dtype=np.int32), fmt='%d')
def click(event, x, y, flags, param):
global expt_cls, bboxes, pts
if event == cv2.EVENT_LBUTTONDOWN:
pts.append([x, y])
cv2.circle(img, (x, y), 5, (255, 0, 255), -1)
if len(pts) == 4:
expt = _sort_expt(pts)
bbox = _expt2bbox(expt)
expt_cls.append(expt + [cls])
cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]),
(255, 0, 255), 2, cv2.LINE_AA)
pts = []
if __name__ == '__main__':
cat_info = []
for i, cat in enumerate(CAT_NAMES):
cat_info.append({'name': cat, 'id': i + 1})
args = parser.parse_args()
if args.save_path == '':
args.save_path = os.path.join(args.image_path, '..', 'click_annotation')
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
ann_path = os.path.join(args.save_path, 'annotations.json')
if os.path.exists(ann_path):
anns = json.load(open(ann_path, 'r'))
else:
anns = {'annotations': [], 'images': [], 'categories': cat_info}
assert os.path.exists(args.image_path)
ls = os.listdir(args.image_path)
image_names = []
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if (ext in image_ext):
image_names.append(file_name)
i = 0
cls = 1
cached = 0
while i < len(image_names):
image_name = image_names[i]
txt_name = os.path.join(
args.save_path, image_name[:image_name.rfind('.')] + '.txt')
if os.path.exists(txt_name) or image_name in anns:
i = i + 1
continue
image_path = os.path.join(args.image_path, image_name)
img = cv2.imread(image_path)
cv2.namedWindow(image_name)
cv2.setMouseCallback(image_name, click)
expt_cls, pts = [], []
while True:
finished = False
cv2.imshow(image_name, img)
key = cv2.waitKey(1)
if key == 100:
i = i + 1
save_txt(txt_name, expt_cls)
image_id = len(anns['images'])
image_info = {'file_name': image_name, 'id': image_id}
anns['images'].append(image_info)
for ann in expt_cls:
ann_id = len(anns['annotations'])
ann_dict = {'image_id': image_id, 'id': ann_id, 'categoty_id': ann[4],
'bbox': _expt2bbox(ann[:4]), 'extreme_points': ann[:4]}
anns['annotations'].append(ann_dict)
cached = cached + 1
print('saved to ', txt_name)
if cached > MAX_CACHE:
print('Saving json', ann_path)
json.dump(anns, open(ann_path, 'w'))
cached = 0
break
elif key == 97:
i = i - 1
break
elif key == 27:
json.dump(anns, open(ann_path, 'w'))
sys.exit(0)
cv2.destroyAllWindows()
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/convert_crowdhuman_to_coco.py | Python | import os
import numpy as np
import json
import cv2
DATA_PATH = '../../data/crowdhuman/'
OUT_PATH = DATA_PATH + 'annotations/'
SPLITS = ['val', 'train']
DEBUG = False
def load_func(fpath):
print('fpath', fpath)
assert os.path.exists(fpath)
with open(fpath,'r') as fid:
lines = fid.readlines()
records =[json.loads(line.strip('\n')) for line in lines]
return records
if __name__ == '__main__':
if not os.exists(OUT_PATH):
os.mkdir(OUT_PATH)
for split in SPLITS:
data_path = DATA_PATH + split
out_path = OUT_PATH + '{}.json'.format(split)
out = {'images': [], 'annotations': [],
'categories': [{'id': 1, 'name': 'person'}]}
ann_path = DATA_PATH + '/annotation_{}.odgt'.format(split)
anns_data = load_func(ann_path)
image_cnt = 0
ann_cnt = 0
video_cnt = 0
for ann_data in anns_data:
image_cnt += 1
image_info = {'file_name': '{}.jpg'.format(ann_data['ID']),
'id': image_cnt}
out['images'].append(image_info)
if split != 'test':
anns = ann_data['gtboxes']
for i in range(len(anns)):
ann_cnt += 1
ann = {'id': ann_cnt,
'category_id': 1,
'image_id': image_cnt,
'bbox_vis': anns[i]['vbox'],
'bbox': anns[i]['fbox'],
'iscrowd': 1 if 'extra' in anns[i] and \
'ignore' in anns[i]['extra'] and \
anns[i]['extra']['ignore'] == 1 else 0}
out['annotations'].append(ann)
print('loaded {} for {} images and {} samples'.format(
split, len(out['images']), len(out['annotations'])))
json.dump(out, open(out_path, 'w'))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/convert_kittitrack_to_coco.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import json
import numpy as np
import os
import cv2
DATA_PATH = '../../data/kitti_tracking/'
SPLITS = ['train_half', 'val_half', 'train', 'test']
VIDEO_SETS = {'train': range(21), 'test': range(29),
'train_half': range(21), 'val_half': range(21)}
CREATE_HALF_LABEL = True
DEBUG = False
'''
#Values Name Description
----------------------------------------------------------------------------
1 frame Frame within the sequence where the object appearers
1 track id Unique tracking id of this object within this sequence
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Integer (0,1,2) indicating the level of truncation.
Note that this is in contrast to the object detection
benchmark where truncation is a float in [0,1].
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
'''
def project_to_image(pts_3d, P):
# pts_3d: n x 3
# P: 3 x 4
# return: n x 2
pts_3d_homo = np.concatenate(
[pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1)
pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0)
pts_2d = pts_2d[:, :2] / pts_2d[:, 2:]
return pts_2d
def read_clib(calib_path):
f = open(calib_path, 'r')
for i, line in enumerate(f):
if i == 2:
calib = np.array(line.strip().split(' ')[1:], dtype=np.float32)
calib = calib.reshape(3, 4)
return calib
def _bbox_to_coco_bbox(bbox):
return [(bbox[0]), (bbox[1]),
(bbox[2] - bbox[0]), (bbox[3] - bbox[1])]
cats = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting',
'Tram', 'Misc', 'DontCare']
cat_ids = {cat: i + 1 for i, cat in enumerate(cats)}
cat_ids['Person'] = cat_ids['Person_sitting']
cat_info = []
for i, cat in enumerate(cats):
cat_info.append({'name': cat, 'id': i + 1})
if __name__ == '__main__':
for split in SPLITS:
ann_dir = DATA_PATH + '/label_02/'
ret = {'images': [], 'annotations': [], "categories": cat_info,
'videos': []}
num_images = 0
for i in VIDEO_SETS[split]:
image_id_base = num_images
video_name = '{:04d}'.format(i)
ret['videos'].append({'id': i + 1, 'file_name': video_name})
ann_dir = 'train' if not ('test' in split) else split
video_path = DATA_PATH + \
'/data_tracking_image_2/{}ing/image_02/{}'.format(ann_dir, video_name)
calib_path = DATA_PATH + 'data_tracking_calib/{}ing/calib/'.format(ann_dir) \
+ '{}.txt'.format(video_name)
calib = read_clib(calib_path)
image_files = sorted(os.listdir(video_path))
num_images_video = len(image_files)
if CREATE_HALF_LABEL and 'half' in split:
image_range = [0, num_images_video // 2 - 1] if split == 'train_half' else \
[num_images_video // 2, num_images_video - 1]
else:
image_range = [0, num_images_video - 1]
print('num_frames', video_name, image_range[1] - image_range[0] + 1)
for j, image_name in enumerate(image_files):
if (j < image_range[0] or j > image_range[1]):
continue
num_images += 1
image_info = {'file_name': '{}/{:06d}.png'.format(video_name, j),
'id': num_images,
'calib': calib.tolist(),
'video_id': i + 1,
'frame_id': j + 1 - image_range[0]}
ret['images'].append(image_info)
if split == 'test':
continue
# 0 -1 DontCare -1 -1 -10.000000 219.310000 188.490000 245.500000 218.560000 -1000.000000 -1000.000000 -1000.000000 -10.000000 -1.000000 -1.000000 -1.000000
ann_path = DATA_PATH + 'label_02/{}.txt'.format(video_name)
anns = open(ann_path, 'r')
if CREATE_HALF_LABEL and 'half' in split:
label_out_folder = DATA_PATH + 'label_02_{}/'.format(split)
label_out_path = label_out_folder + '{}.txt'.format(video_name)
if not os.path.exists(label_out_folder):
os.mkdir(label_out_folder)
label_out_file = open(label_out_path, 'w')
for ann_ind, txt in enumerate(anns):
tmp = txt[:-1].split(' ')
frame_id = int(tmp[0])
track_id = int(tmp[1])
cat_id = cat_ids[tmp[2]]
truncated = int(float(tmp[3]))
occluded = int(tmp[4])
alpha = float(tmp[5])
bbox = [float(tmp[6]), float(tmp[7]), float(tmp[8]), float(tmp[9])]
dim = [float(tmp[10]), float(tmp[11]), float(tmp[12])]
location = [float(tmp[13]), float(tmp[14]), float(tmp[15])]
rotation_y = float(tmp[16])
amodel_center = project_to_image(
np.array([location[0], location[1] - dim[0] / 2, location[2]],
np.float32).reshape(1, 3), calib)[0].tolist()
ann = {'image_id': frame_id + 1 - image_range[0] + image_id_base,
'id': int(len(ret['annotations']) + 1),
'category_id': cat_id,
'dim': dim,
'bbox': _bbox_to_coco_bbox(bbox),
'depth': location[2],
'alpha': alpha,
'truncated': truncated,
'occluded': occluded,
'location': location,
'rotation_y': rotation_y,
'amodel_center': amodel_center,
'track_id': track_id + 1}
if CREATE_HALF_LABEL and 'half' in split:
if (frame_id < image_range[0] or frame_id > image_range[1]):
continue
out_frame_id = frame_id - image_range[0]
label_out_file.write('{} {}'.format(
out_frame_id, txt[txt.find(' ') + 1:]))
ret['annotations'].append(ann)
print("# images: ", len(ret['images']))
print("# annotations: ", len(ret['annotations']))
out_dir = '{}/annotations/'.format(DATA_PATH)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
out_path = '{}/annotations/tracking_{}.json'.format(
DATA_PATH, split)
json.dump(ret, open(out_path, 'w'))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/convert_mot_det_to_results.py | Python | import json
import numpy as np
import os
from collections import defaultdict
split = 'val_half'
DET_PATH = '../../data/mot17/'
ANN_PATH = '../../data/mot17/annotations/{}.json'.format(split)
OUT_DIR = '../../data/mot17/results/'
OUT_PATH = OUT_DIR + '{}_det.json'.format(split)
if __name__ == '__main__':
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
seqs = [s for s in os.listdir(DET_PATH) if '_det' in s]
data = json.load(open(ANN_PATH, 'r'))
images = data['images']
image_to_anns = defaultdict(list)
for seq in sorted(seqs):
print('seq', seq)
seq_path = '{}/{}/'.format(DET_PATH, seq)
if split == 'val_half':
ann_path = seq_path + 'det/det_val_half.txt'
train_ann_path = seq_path + 'det/det_train_half.txt'
train_anns = np.loadtxt(train_ann_path, dtype=np.float32, delimiter=',')
frame_base = int(train_anns[:, 0].max())
else:
ann_path = seq_path + 'det/det.txt'
frame_base = 0
if not IS_THIRD_PARTY:
anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')
for i in range(len(anns)):
frame_id = int(anns[i][0])
file_name = '{}/img1/{:06d}.jpg'.format(seq, frame_id + frame_base)
bbox = (anns[i][2:6]).tolist()
score = 1 # float(anns[i][8])
image_to_anns[file_name].append(bbox + [score])
results = {}
for image_info in images:
image_id = image_info['id']
file_name = image_info['file_name']
dets = image_to_anns[file_name]
results[image_id] = []
for det in dets:
bbox = [float(det[0]), float(det[1]), \
float(det[0] + det[2]), float(det[1] + det[3])]
ct = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
results[image_id].append(
{'bbox': bbox, 'score': float(det[4]), 'class': 1, 'ct': ct})
out_path = OUT_PATH
json.dump(results, open(out_path, 'w'))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/convert_mot_to_coco.py | Python | import os
import numpy as np
import json
import cv2
# Use the same script for MOT16
# DATA_PATH = '../../data/mot16/'
DATA_PATH = '../../data/mot17/'
OUT_PATH = DATA_PATH + 'annotations/'
SPLITS = ['train_half', 'val_half', 'train', 'test']
HALF_VIDEO = True
CREATE_SPLITTED_ANN = True
CREATE_SPLITTED_DET = True
if __name__ == '__main__':
for split in SPLITS:
data_path = DATA_PATH + (split if not HALF_VIDEO else 'train')
out_path = OUT_PATH + '{}.json'.format(split)
out = {'images': [], 'annotations': [],
'categories': [{'id': 1, 'name': 'pedestrain'}],
'videos': []}
seqs = os.listdir(data_path)
image_cnt = 0
ann_cnt = 0
video_cnt = 0
for seq in sorted(seqs):
if '.DS_Store' in seq:
continue
if 'mot17' in DATA_PATH and (split != 'test' and not ('FRCNN' in seq)):
continue
video_cnt += 1
out['videos'].append({
'id': video_cnt,
'file_name': seq})
seq_path = '{}/{}/'.format(data_path, seq)
img_path = seq_path + 'img1/'
ann_path = seq_path + 'gt/gt.txt'
images = os.listdir(img_path)
num_images = len([image for image in images if 'jpg' in image])
if HALF_VIDEO and ('half' in split):
image_range = [0, num_images // 2] if 'train' in split else \
[num_images // 2 + 1, num_images - 1]
else:
image_range = [0, num_images - 1]
for i in range(num_images):
if (i < image_range[0] or i > image_range[1]):
continue
image_info = {'file_name': '{}/img1/{:06d}.jpg'.format(seq, i + 1),
'id': image_cnt + i + 1,
'frame_id': i + 1 - image_range[0],
'prev_image_id': image_cnt + i if i > 0 else -1,
'next_image_id': \
image_cnt + i + 2 if i < num_images - 1 else -1,
'video_id': video_cnt}
out['images'].append(image_info)
print('{}: {} images'.format(seq, num_images))
if split != 'test':
det_path = seq_path + 'det/det.txt'
anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')
dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',')
if CREATE_SPLITTED_ANN and ('half' in split):
anns_out = np.array([anns[i] for i in range(anns.shape[0]) if \
int(anns[i][0]) - 1 >= image_range[0] and \
int(anns[i][0]) - 1 <= image_range[1]], np.float32)
anns_out[:, 0] -= image_range[0]
gt_out = seq_path + '/gt/gt_{}.txt'.format(split)
fout = open(gt_out, 'w')
for o in anns_out:
fout.write(
'{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\n'.format(
int(o[0]),int(o[1]),int(o[2]),int(o[3]),int(o[4]),int(o[5]),
int(o[6]),int(o[7]),o[8]))
fout.close()
if CREATE_SPLITTED_DET and ('half' in split):
dets_out = np.array([dets[i] for i in range(dets.shape[0]) if \
int(dets[i][0]) - 1 >= image_range[0] and \
int(dets[i][0]) - 1 <= image_range[1]], np.float32)
dets_out[:, 0] -= image_range[0]
det_out = seq_path + '/det/det_{}.txt'.format(split)
dout = open(det_out, 'w')
for o in dets_out:
dout.write(
'{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\n'.format(
int(o[0]),int(o[1]),float(o[2]),float(o[3]),float(o[4]),float(o[5]),
float(o[6])))
dout.close()
print(' {} ann images'.format(int(anns[:, 0].max())))
for i in range(anns.shape[0]):
frame_id = int(anns[i][0])
if (frame_id - 1 < image_range[0] or frame_id - 1> image_range[1]):
continue
track_id = int(anns[i][1])
cat_id = int(anns[i][7])
ann_cnt += 1
if not ('15' in DATA_PATH):
if not (float(anns[i][8]) >= 0.25):
continue
if not (int(anns[i][6]) == 1):
continue
if (int(anns[i][7]) in [3, 4, 5, 6, 9, 10, 11]): # Non-person
continue
if (int(anns[i][7]) in [2, 7, 8, 12]): # Ignored person
category_id = -1
else:
category_id = 1
else:
category_id = 1
ann = {'id': ann_cnt,
'category_id': category_id,
'image_id': image_cnt + frame_id,
'track_id': track_id,
'bbox': anns[i][2:6].tolist(),
'conf': float(anns[i][6])}
out['annotations'].append(ann)
image_cnt += num_images
print('loaded {} for {} images and {} samples'.format(
split, len(out['images']), len(out['annotations'])))
json.dump(out, open(out_path, 'w'))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/convert_nuScenes.py | Python | # Copyright (c) Xingyi Zhou. All Rights Reserved
'''
nuScenes pre-processing script.
This file convert the nuScenes annotation into COCO format.
'''
import json
import numpy as np
import cv2
import copy
import matplotlib.pyplot as plt
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import BoxVisibility, transform_matrix
from nuScenes_lib.utils_kitti import KittiDB
from nuscenes.eval.detection.utils import category_to_detection_name
from pyquaternion import Quaternion
import _init_paths
from utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
DATA_PATH = '../../data/nuscenes/'
OUT_PATH = DATA_PATH + 'annotations/'
SPLITS = {'val': 'v1.0-trainval', 'train': 'v1.0-trainval', 'test': 'v1.0-test'}
DEBUG = False
CATS = ['car', 'truck', 'bus', 'trailer', 'construction_vehicle',
'pedestrian', 'motorcycle', 'bicycle',
'traffic_cone', 'barrier']
SENSOR_ID = {'RADAR_FRONT': 7, 'RADAR_FRONT_LEFT': 9,
'RADAR_FRONT_RIGHT': 10, 'RADAR_BACK_LEFT': 11,
'RADAR_BACK_RIGHT': 12, 'LIDAR_TOP': 8,
'CAM_FRONT': 1, 'CAM_FRONT_RIGHT': 2,
'CAM_BACK_RIGHT': 3, 'CAM_BACK': 4, 'CAM_BACK_LEFT': 5,
'CAM_FRONT_LEFT': 6}
USED_SENSOR = ['CAM_FRONT', 'CAM_FRONT_RIGHT',
'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT',
'CAM_FRONT_LEFT']
CAT_IDS = {v: i + 1 for i, v in enumerate(CATS)}
def _rot_y2alpha(rot_y, x, cx, fx):
"""
Get rotation_y by alpha + theta - 180
alpha : Observation angle of object, ranging [-pi..pi]
x : Object center x to the camera center (x-W/2), in pixels
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
"""
alpha = rot_y - np.arctan2(x - cx, fx)
if alpha > np.pi:
alpha -= 2 * np.pi
if alpha < -np.pi:
alpha += 2 * np.pi
return alpha
def _bbox_inside(box1, box2):
return box1[0] > box2[0] and box1[0] + box1[2] < box2[0] + box2[2] and \
box1[1] > box2[1] and box1[1] + box1[3] < box2[1] + box2[3]
ATTRIBUTE_TO_ID = {
'': 0, 'cycle.with_rider' : 1, 'cycle.without_rider' : 2,
'pedestrian.moving': 3, 'pedestrian.standing': 4,
'pedestrian.sitting_lying_down': 5,
'vehicle.moving': 6, 'vehicle.parked': 7,
'vehicle.stopped': 8}
def main():
SCENE_SPLITS['mini-val'] = SCENE_SPLITS['val']
if not os.path.exists(OUT_PATH):
os.mkdir(OUT_PATH)
for split in SPLITS:
data_path = DATA_PATH + '{}/'.format(SPLITS[split])
nusc = NuScenes(
version=SPLITS[split], dataroot=data_path, verbose=True)
out_path = OUT_PATH + '{}.json'.format(split)
categories_info = [{'name': CATS[i], 'id': i + 1} for i in range(len(CATS))]
ret = {'images': [], 'annotations': [], 'categories': categories_info,
'videos': [], 'attributes': ATTRIBUTE_TO_ID}
num_images = 0
num_anns = 0
num_videos = 0
# A "sample" in nuScenes refers to a timestamp with 6 cameras and 1 LIDAR.
for sample in nusc.sample:
scene_name = nusc.get('scene', sample['scene_token'])['name']
if not (split in ['mini', 'test']) and \
not (scene_name in SCENE_SPLITS[split]):
continue
if sample['prev'] == '':
print('scene_name', scene_name)
num_videos += 1
ret['videos'].append({'id': num_videos, 'file_name': scene_name})
frame_ids = {k: 0 for k in sample['data']}
track_ids = {}
# We decompose a sample into 6 images in our case.
for sensor_name in sample['data']:
if sensor_name in USED_SENSOR:
image_token = sample['data'][sensor_name]
image_data = nusc.get('sample_data', image_token)
num_images += 1
# Complex coordinate transform. This will take time to understand.
sd_record = nusc.get('sample_data', image_token)
cs_record = nusc.get(
'calibrated_sensor', sd_record['calibrated_sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
global_from_car = transform_matrix(pose_record['translation'],
Quaternion(pose_record['rotation']), inverse=False)
car_from_sensor = transform_matrix(
cs_record['translation'], Quaternion(cs_record['rotation']),
inverse=False)
trans_matrix = np.dot(global_from_car, car_from_sensor)
_, boxes, camera_intrinsic = nusc.get_sample_data(
image_token, box_vis_level=BoxVisibility.ANY)
calib = np.eye(4, dtype=np.float32)
calib[:3, :3] = camera_intrinsic
calib = calib[:3]
frame_ids[sensor_name] += 1
# image information in COCO format
image_info = {'id': num_images,
'file_name': image_data['filename'],
'calib': calib.tolist(),
'video_id': num_videos,
'frame_id': frame_ids[sensor_name],
'sensor_id': SENSOR_ID[sensor_name],
'sample_token': sample['token'],
'trans_matrix': trans_matrix.tolist(),
'width': sd_record['width'],
'height': sd_record['height'],
'pose_record_trans': pose_record['translation'],
'pose_record_rot': pose_record['rotation'],
'cs_record_trans': cs_record['translation'],
'cs_record_rot': cs_record['rotation']}
ret['images'].append(image_info)
anns = []
for box in boxes:
det_name = category_to_detection_name(box.name)
if det_name is None:
continue
num_anns += 1
v = np.dot(box.rotation_matrix, np.array([1, 0, 0]))
yaw = -np.arctan2(v[2], v[0])
box.translate(np.array([0, box.wlh[2] / 2, 0]))
category_id = CAT_IDS[det_name]
amodel_center = project_to_image(
np.array([box.center[0], box.center[1] - box.wlh[2] / 2, box.center[2]],
np.float32).reshape(1, 3), calib)[0].tolist()
sample_ann = nusc.get(
'sample_annotation', box.token)
instance_token = sample_ann['instance_token']
if not (instance_token in track_ids):
track_ids[instance_token] = len(track_ids) + 1
attribute_tokens = sample_ann['attribute_tokens']
attributes = [nusc.get('attribute', att_token)['name'] \
for att_token in attribute_tokens]
att = '' if len(attributes) == 0 else attributes[0]
if len(attributes) > 1:
print(attributes)
import pdb; pdb.set_trace()
track_id = track_ids[instance_token]
vel = nusc.box_velocity(box.token) # global frame
vel = np.dot(np.linalg.inv(trans_matrix),
np.array([vel[0], vel[1], vel[2], 0], np.float32)).tolist()
# instance information in COCO format
ann = {
'id': num_anns,
'image_id': num_images,
'category_id': category_id,
'dim': [box.wlh[2], box.wlh[0], box.wlh[1]],
'location': [box.center[0], box.center[1], box.center[2]],
'depth': box.center[2],
'occluded': 0,
'truncated': 0,
'rotation_y': yaw,
'amodel_center': amodel_center,
'iscrowd': 0,
'track_id': track_id,
'attributes': ATTRIBUTE_TO_ID[att],
'velocity': vel
}
bbox = KittiDB.project_kitti_box_to_image(
copy.deepcopy(box), camera_intrinsic, imsize=(1600, 900))
alpha = _rot_y2alpha(yaw, (bbox[0] + bbox[2]) / 2,
camera_intrinsic[0, 2], camera_intrinsic[0, 0])
ann['bbox'] = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
ann['area'] = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
ann['alpha'] = alpha
anns.append(ann)
# Filter out bounding boxes outside the image
visable_anns = []
for i in range(len(anns)):
vis = True
for j in range(len(anns)):
if anns[i]['depth'] - min(anns[i]['dim']) / 2 > \
anns[j]['depth'] + max(anns[j]['dim']) / 2 and \
_bbox_inside(anns[i]['bbox'], anns[j]['bbox']):
vis = False
break
if vis:
visable_anns.append(anns[i])
else:
pass
for ann in visable_anns:
ret['annotations'].append(ann)
if DEBUG:
img_path = data_path + image_info['file_name']
img = cv2.imread(img_path)
img_3d = img.copy()
for ann in visable_anns:
bbox = ann['bbox']
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])),
(int(bbox[2] + bbox[0]), int(bbox[3] + bbox[1])),
(0, 0, 255), 3, lineType=cv2.LINE_AA)
box_3d = compute_box_3d(ann['dim'], ann['location'], ann['rotation_y'])
box_2d = project_to_image(box_3d, calib)
img_3d = draw_box_3d(img_3d, box_2d)
pt_3d = unproject_2d_to_3d(ann['amodel_center'], ann['depth'], calib)
pt_3d[1] += ann['dim'][0] / 2
print('location', ann['location'])
print('loc model', pt_3d)
pt_2d = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2],
dtype=np.float32)
pt_3d = unproject_2d_to_3d(pt_2d, ann['depth'], calib)
pt_3d[1] += ann['dim'][0] / 2
print('loc ', pt_3d)
cv2.imshow('img', img)
cv2.imshow('img_3d', img_3d)
cv2.waitKey()
nusc.render_sample_data(image_token)
plt.show()
print('reordering images')
images = ret['images']
video_sensor_to_images = {}
for image_info in images:
tmp_seq_id = image_info['video_id'] * 20 + image_info['sensor_id']
if tmp_seq_id in video_sensor_to_images:
video_sensor_to_images[tmp_seq_id].append(image_info)
else:
video_sensor_to_images[tmp_seq_id] = [image_info]
ret['images'] = []
for tmp_seq_id in sorted(video_sensor_to_images):
ret['images'] = ret['images'] + video_sensor_to_images[tmp_seq_id]
print('{} {} images {} boxes'.format(
split, len(ret['images']), len(ret['annotations'])))
print('out_path', out_path)
json.dump(ret, open(out_path, 'w'))
# Official train/ val split from
# https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/utils/splits.py
SCENE_SPLITS = {
'train':
['scene-0001', 'scene-0002', 'scene-0004', 'scene-0005', 'scene-0006', 'scene-0007', 'scene-0008', 'scene-0009',
'scene-0010', 'scene-0011', 'scene-0019', 'scene-0020', 'scene-0021', 'scene-0022', 'scene-0023', 'scene-0024',
'scene-0025', 'scene-0026', 'scene-0027', 'scene-0028', 'scene-0029', 'scene-0030', 'scene-0031', 'scene-0032',
'scene-0033', 'scene-0034', 'scene-0041', 'scene-0042', 'scene-0043', 'scene-0044', 'scene-0045', 'scene-0046',
'scene-0047', 'scene-0048', 'scene-0049', 'scene-0050', 'scene-0051', 'scene-0052', 'scene-0053', 'scene-0054',
'scene-0055', 'scene-0056', 'scene-0057', 'scene-0058', 'scene-0059', 'scene-0060', 'scene-0061', 'scene-0062',
'scene-0063', 'scene-0064', 'scene-0065', 'scene-0066', 'scene-0067', 'scene-0068', 'scene-0069', 'scene-0070',
'scene-0071', 'scene-0072', 'scene-0073', 'scene-0074', 'scene-0075', 'scene-0076', 'scene-0120', 'scene-0121',
'scene-0122', 'scene-0123', 'scene-0124', 'scene-0125', 'scene-0126', 'scene-0127', 'scene-0128', 'scene-0129',
'scene-0130', 'scene-0131', 'scene-0132', 'scene-0133', 'scene-0134', 'scene-0135', 'scene-0138', 'scene-0139',
'scene-0149', 'scene-0150', 'scene-0151', 'scene-0152', 'scene-0154', 'scene-0155', 'scene-0157', 'scene-0158',
'scene-0159', 'scene-0160', 'scene-0161', 'scene-0162', 'scene-0163', 'scene-0164', 'scene-0165', 'scene-0166',
'scene-0167', 'scene-0168', 'scene-0170', 'scene-0171', 'scene-0172', 'scene-0173', 'scene-0174', 'scene-0175',
'scene-0176', 'scene-0177', 'scene-0178', 'scene-0179', 'scene-0180', 'scene-0181', 'scene-0182', 'scene-0183',
'scene-0184', 'scene-0185', 'scene-0187', 'scene-0188', 'scene-0190', 'scene-0191', 'scene-0192', 'scene-0193',
'scene-0194', 'scene-0195', 'scene-0196', 'scene-0199', 'scene-0200', 'scene-0202', 'scene-0203', 'scene-0204',
'scene-0206', 'scene-0207', 'scene-0208', 'scene-0209', 'scene-0210', 'scene-0211', 'scene-0212', 'scene-0213',
'scene-0214', 'scene-0218', 'scene-0219', 'scene-0220', 'scene-0222', 'scene-0224', 'scene-0225', 'scene-0226',
'scene-0227', 'scene-0228', 'scene-0229', 'scene-0230', 'scene-0231', 'scene-0232', 'scene-0233', 'scene-0234',
'scene-0235', 'scene-0236', 'scene-0237', 'scene-0238', 'scene-0239', 'scene-0240', 'scene-0241', 'scene-0242',
'scene-0243', 'scene-0244', 'scene-0245', 'scene-0246', 'scene-0247', 'scene-0248', 'scene-0249', 'scene-0250',
'scene-0251', 'scene-0252', 'scene-0253', 'scene-0254', 'scene-0255', 'scene-0256', 'scene-0257', 'scene-0258',
'scene-0259', 'scene-0260', 'scene-0261', 'scene-0262', 'scene-0263', 'scene-0264', 'scene-0283', 'scene-0284',
'scene-0285', 'scene-0286', 'scene-0287', 'scene-0288', 'scene-0289', 'scene-0290', 'scene-0291', 'scene-0292',
'scene-0293', 'scene-0294', 'scene-0295', 'scene-0296', 'scene-0297', 'scene-0298', 'scene-0299', 'scene-0300',
'scene-0301', 'scene-0302', 'scene-0303', 'scene-0304', 'scene-0305', 'scene-0306', 'scene-0315', 'scene-0316',
'scene-0317', 'scene-0318', 'scene-0321', 'scene-0323', 'scene-0324', 'scene-0328', 'scene-0347', 'scene-0348',
'scene-0349', 'scene-0350', 'scene-0351', 'scene-0352', 'scene-0353', 'scene-0354', 'scene-0355', 'scene-0356',
'scene-0357', 'scene-0358', 'scene-0359', 'scene-0360', 'scene-0361', 'scene-0362', 'scene-0363', 'scene-0364',
'scene-0365', 'scene-0366', 'scene-0367', 'scene-0368', 'scene-0369', 'scene-0370', 'scene-0371', 'scene-0372',
'scene-0373', 'scene-0374', 'scene-0375', 'scene-0376', 'scene-0377', 'scene-0378', 'scene-0379', 'scene-0380',
'scene-0381', 'scene-0382', 'scene-0383', 'scene-0384', 'scene-0385', 'scene-0386', 'scene-0388', 'scene-0389',
'scene-0390', 'scene-0391', 'scene-0392', 'scene-0393', 'scene-0394', 'scene-0395', 'scene-0396', 'scene-0397',
'scene-0398', 'scene-0399', 'scene-0400', 'scene-0401', 'scene-0402', 'scene-0403', 'scene-0405', 'scene-0406',
'scene-0407', 'scene-0408', 'scene-0410', 'scene-0411', 'scene-0412', 'scene-0413', 'scene-0414', 'scene-0415',
'scene-0416', 'scene-0417', 'scene-0418', 'scene-0419', 'scene-0420', 'scene-0421', 'scene-0422', 'scene-0423',
'scene-0424', 'scene-0425', 'scene-0426', 'scene-0427', 'scene-0428', 'scene-0429', 'scene-0430', 'scene-0431',
'scene-0432', 'scene-0433', 'scene-0434', 'scene-0435', 'scene-0436', 'scene-0437', 'scene-0438', 'scene-0439',
'scene-0440', 'scene-0441', 'scene-0442', 'scene-0443', 'scene-0444', 'scene-0445', 'scene-0446', 'scene-0447',
'scene-0448', 'scene-0449', 'scene-0450', 'scene-0451', 'scene-0452', 'scene-0453', 'scene-0454', 'scene-0455',
'scene-0456', 'scene-0457', 'scene-0458', 'scene-0459', 'scene-0461', 'scene-0462', 'scene-0463', 'scene-0464',
'scene-0465', 'scene-0467', 'scene-0468', 'scene-0469', 'scene-0471', 'scene-0472', 'scene-0474', 'scene-0475',
'scene-0476', 'scene-0477', 'scene-0478', 'scene-0479', 'scene-0480', 'scene-0499', 'scene-0500', 'scene-0501',
'scene-0502', 'scene-0504', 'scene-0505', 'scene-0506', 'scene-0507', 'scene-0508', 'scene-0509', 'scene-0510',
'scene-0511', 'scene-0512', 'scene-0513', 'scene-0514', 'scene-0515', 'scene-0517', 'scene-0518', 'scene-0525',
'scene-0526', 'scene-0527', 'scene-0528', 'scene-0529', 'scene-0530', 'scene-0531', 'scene-0532', 'scene-0533',
'scene-0534', 'scene-0535', 'scene-0536', 'scene-0537', 'scene-0538', 'scene-0539', 'scene-0541', 'scene-0542',
'scene-0543', 'scene-0544', 'scene-0545', 'scene-0546', 'scene-0566', 'scene-0568', 'scene-0570', 'scene-0571',
'scene-0572', 'scene-0573', 'scene-0574', 'scene-0575', 'scene-0576', 'scene-0577', 'scene-0578', 'scene-0580',
'scene-0582', 'scene-0583', 'scene-0584', 'scene-0585', 'scene-0586', 'scene-0587', 'scene-0588', 'scene-0589',
'scene-0590', 'scene-0591', 'scene-0592', 'scene-0593', 'scene-0594', 'scene-0595', 'scene-0596', 'scene-0597',
'scene-0598', 'scene-0599', 'scene-0600', 'scene-0639', 'scene-0640', 'scene-0641', 'scene-0642', 'scene-0643',
'scene-0644', 'scene-0645', 'scene-0646', 'scene-0647', 'scene-0648', 'scene-0649', 'scene-0650', 'scene-0651',
'scene-0652', 'scene-0653', 'scene-0654', 'scene-0655', 'scene-0656', 'scene-0657', 'scene-0658', 'scene-0659',
'scene-0660', 'scene-0661', 'scene-0662', 'scene-0663', 'scene-0664', 'scene-0665', 'scene-0666', 'scene-0667',
'scene-0668', 'scene-0669', 'scene-0670', 'scene-0671', 'scene-0672', 'scene-0673', 'scene-0674', 'scene-0675',
'scene-0676', 'scene-0677', 'scene-0678', 'scene-0679', 'scene-0681', 'scene-0683', 'scene-0684', 'scene-0685',
'scene-0686', 'scene-0687', 'scene-0688', 'scene-0689', 'scene-0695', 'scene-0696', 'scene-0697', 'scene-0698',
'scene-0700', 'scene-0701', 'scene-0703', 'scene-0704', 'scene-0705', 'scene-0706', 'scene-0707', 'scene-0708',
'scene-0709', 'scene-0710', 'scene-0711', 'scene-0712', 'scene-0713', 'scene-0714', 'scene-0715', 'scene-0716',
'scene-0717', 'scene-0718', 'scene-0719', 'scene-0726', 'scene-0727', 'scene-0728', 'scene-0730', 'scene-0731',
'scene-0733', 'scene-0734', 'scene-0735', 'scene-0736', 'scene-0737', 'scene-0738', 'scene-0739', 'scene-0740',
'scene-0741', 'scene-0744', 'scene-0746', 'scene-0747', 'scene-0749', 'scene-0750', 'scene-0751', 'scene-0752',
'scene-0757', 'scene-0758', 'scene-0759', 'scene-0760', 'scene-0761', 'scene-0762', 'scene-0763', 'scene-0764',
'scene-0765', 'scene-0767', 'scene-0768', 'scene-0769', 'scene-0786', 'scene-0787', 'scene-0789', 'scene-0790',
'scene-0791', 'scene-0792', 'scene-0803', 'scene-0804', 'scene-0805', 'scene-0806', 'scene-0808', 'scene-0809',
'scene-0810', 'scene-0811', 'scene-0812', 'scene-0813', 'scene-0815', 'scene-0816', 'scene-0817', 'scene-0819',
'scene-0820', 'scene-0821', 'scene-0822', 'scene-0847', 'scene-0848', 'scene-0849', 'scene-0850', 'scene-0851',
'scene-0852', 'scene-0853', 'scene-0854', 'scene-0855', 'scene-0856', 'scene-0858', 'scene-0860', 'scene-0861',
'scene-0862', 'scene-0863', 'scene-0864', 'scene-0865', 'scene-0866', 'scene-0868', 'scene-0869', 'scene-0870',
'scene-0871', 'scene-0872', 'scene-0873', 'scene-0875', 'scene-0876', 'scene-0877', 'scene-0878', 'scene-0880',
'scene-0882', 'scene-0883', 'scene-0884', 'scene-0885', 'scene-0886', 'scene-0887', 'scene-0888', 'scene-0889',
'scene-0890', 'scene-0891', 'scene-0892', 'scene-0893', 'scene-0894', 'scene-0895', 'scene-0896', 'scene-0897',
'scene-0898', 'scene-0899', 'scene-0900', 'scene-0901', 'scene-0902', 'scene-0903', 'scene-0945', 'scene-0947',
'scene-0949', 'scene-0952', 'scene-0953', 'scene-0955', 'scene-0956', 'scene-0957', 'scene-0958', 'scene-0959',
'scene-0960', 'scene-0961', 'scene-0975', 'scene-0976', 'scene-0977', 'scene-0978', 'scene-0979', 'scene-0980',
'scene-0981', 'scene-0982', 'scene-0983', 'scene-0984', 'scene-0988', 'scene-0989', 'scene-0990', 'scene-0991',
'scene-0992', 'scene-0994', 'scene-0995', 'scene-0996', 'scene-0997', 'scene-0998', 'scene-0999', 'scene-1000',
'scene-1001', 'scene-1002', 'scene-1003', 'scene-1004', 'scene-1005', 'scene-1006', 'scene-1007', 'scene-1008',
'scene-1009', 'scene-1010', 'scene-1011', 'scene-1012', 'scene-1013', 'scene-1014', 'scene-1015', 'scene-1016',
'scene-1017', 'scene-1018', 'scene-1019', 'scene-1020', 'scene-1021', 'scene-1022', 'scene-1023', 'scene-1024',
'scene-1025', 'scene-1044', 'scene-1045', 'scene-1046', 'scene-1047', 'scene-1048', 'scene-1049', 'scene-1050',
'scene-1051', 'scene-1052', 'scene-1053', 'scene-1054', 'scene-1055', 'scene-1056', 'scene-1057', 'scene-1058',
'scene-1074', 'scene-1075', 'scene-1076', 'scene-1077', 'scene-1078', 'scene-1079', 'scene-1080', 'scene-1081',
'scene-1082', 'scene-1083', 'scene-1084', 'scene-1085', 'scene-1086', 'scene-1087', 'scene-1088', 'scene-1089',
'scene-1090', 'scene-1091', 'scene-1092', 'scene-1093', 'scene-1094', 'scene-1095', 'scene-1096', 'scene-1097',
'scene-1098', 'scene-1099', 'scene-1100', 'scene-1101', 'scene-1102', 'scene-1104', 'scene-1105', 'scene-1106',
'scene-1107', 'scene-1108', 'scene-1109', 'scene-1110'],
'val':
['scene-0003', 'scene-0012', 'scene-0013', 'scene-0014', 'scene-0015', 'scene-0016', 'scene-0017', 'scene-0018',
'scene-0035', 'scene-0036', 'scene-0038', 'scene-0039', 'scene-0092', 'scene-0093', 'scene-0094', 'scene-0095',
'scene-0096', 'scene-0097', 'scene-0098', 'scene-0099', 'scene-0100', 'scene-0101', 'scene-0102', 'scene-0103',
'scene-0104', 'scene-0105', 'scene-0106', 'scene-0107', 'scene-0108', 'scene-0109', 'scene-0110', 'scene-0221',
'scene-0268', 'scene-0269', 'scene-0270', 'scene-0271', 'scene-0272', 'scene-0273', 'scene-0274', 'scene-0275',
'scene-0276', 'scene-0277', 'scene-0278', 'scene-0329', 'scene-0330', 'scene-0331', 'scene-0332', 'scene-0344',
'scene-0345', 'scene-0346', 'scene-0519', 'scene-0520', 'scene-0521', 'scene-0522', 'scene-0523', 'scene-0524',
'scene-0552', 'scene-0553', 'scene-0554', 'scene-0555', 'scene-0556', 'scene-0557', 'scene-0558', 'scene-0559',
'scene-0560', 'scene-0561', 'scene-0562', 'scene-0563', 'scene-0564', 'scene-0565', 'scene-0625', 'scene-0626',
'scene-0627', 'scene-0629', 'scene-0630', 'scene-0632', 'scene-0633', 'scene-0634', 'scene-0635', 'scene-0636',
'scene-0637', 'scene-0638', 'scene-0770', 'scene-0771', 'scene-0775', 'scene-0777', 'scene-0778', 'scene-0780',
'scene-0781', 'scene-0782', 'scene-0783', 'scene-0784', 'scene-0794', 'scene-0795', 'scene-0796', 'scene-0797',
'scene-0798', 'scene-0799', 'scene-0800', 'scene-0802', 'scene-0904', 'scene-0905', 'scene-0906', 'scene-0907',
'scene-0908', 'scene-0909', 'scene-0910', 'scene-0911', 'scene-0912', 'scene-0913', 'scene-0914', 'scene-0915',
'scene-0916', 'scene-0917', 'scene-0919', 'scene-0920', 'scene-0921', 'scene-0922', 'scene-0923', 'scene-0924',
'scene-0925', 'scene-0926', 'scene-0927', 'scene-0928', 'scene-0929', 'scene-0930', 'scene-0931', 'scene-0962',
'scene-0963', 'scene-0966', 'scene-0967', 'scene-0968', 'scene-0969', 'scene-0971', 'scene-0972', 'scene-1059',
'scene-1060', 'scene-1061', 'scene-1062', 'scene-1063', 'scene-1064', 'scene-1065', 'scene-1066', 'scene-1067',
'scene-1068', 'scene-1069', 'scene-1070', 'scene-1071', 'scene-1072', 'scene-1073']
}
if __name__ == '__main__':
main()
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/eval_kitti_track/evaluate_tracking.py | Python | #!/usr/bin/env python
# encoding: utf-8
"""
function that does the evaluation
input:
- result_sha (sha key where the results are located
- mail (messenger object for output messages sent via email and to cout)
output:
- True if at least one of the sub-benchmarks could be processed successfully
- False otherwise
data:
- at this point the submitted files are located in results/<result_sha>/data
- the results shall be saved as follows
-> summary statistics of the method: results/<result_sha>/stats_task.txt
here task refers to the sub-benchmark (e.g., um_lane, uu_road etc.)
file contents: numbers for main table, format: %.6f (single space separated)
note: only files with successful sub-benchmark evaluation must be created
-> detailed results/graphics/plots: results/<result_sha>/subdir
with appropriate subdir and file names (all subdir's need to be created)
"""
import sys,os,copy,math
from munkres import Munkres
from collections import defaultdict
try:
from ordereddict import OrderedDict # can be installed using pip
except:
from collections import OrderedDict # only included from python 2.7 on
import mailpy
class tData:
"""
Utility class to load data.
"""
def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\
obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\
X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1):
"""
Constructor, initializes the object given the parameters.
"""
# init object data
self.frame = frame
self.track_id = track_id
self.obj_type = obj_type
self.truncation = truncation
self.occlusion = occlusion
self.obs_angle = obs_angle
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.w = w
self.h = h
self.l = l
self.X = X
self.Y = Y
self.Z = Z
self.yaw = yaw
self.score = score
self.ignored = False
self.valid = False
self.tracker = -1
def __str__(self):
"""
Print read data.
"""
attrs = vars(self)
return '\n'.join("%s: %s" % item for item in attrs.items())
class trackingEvaluation(object):
""" tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall)
MOTA - Multi-object tracking accuracy in [0,100]
MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D)
MOTAL - Multi-object tracking accuracy in [0,100] with log10(id-switches)
id-switches - number of id switches
fragments - number of fragmentations
MT, PT, ML - number of mostly tracked, partially tracked and mostly lost trajectories
recall - recall = percentage of detected targets
precision - precision = percentage of correctly detected targets
FAR - number of false alarms per frame
falsepositives - number of false positives (FP)
missed - number of missed targets (FN)
"""
def __init__(self, t_sha, gt_path="./tools/eval_kitti_track/data/tracking",\
split_version='', min_overlap=0.5, max_truncation = 0, min_height = 25,
max_occlusion = 2, mail=None, cls="car"):
# get number of sequences and
# get number of frames per sequence from test mapping
# (created while extracting the benchmark)
filename_test_mapping = "./tools/eval_kitti_track/data/tracking/" + \
"evaluate_tracking{}.seqmap".format(split_version)
self.n_frames = []
self.sequence_name = []
with open(filename_test_mapping, "r") as fh:
for i,l in enumerate(fh):
fields = l.split(" ")
self.sequence_name.append("%04d" % int(fields[0]))
self.n_frames.append(int(fields[3]) - int(fields[2])+1)
fh.close()
self.n_sequences = i+1
# mail object
self.mail = mail
# class to evaluate, i.e. pedestrian or car
self.cls = cls
# data and parameter
if 'half' in split_version:
self.gt_path = os.path.join(
gt_path, 'label_02_{}'.format(split_version))
else:
self.gt_path = os.path.join(gt_path, "label_02")
self.t_sha = t_sha
self.t_path = os.path.join(t_sha)
# statistics and numbers for evaluation
self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives
self.n_igt = 0 # number of ignored ground truth detections
self.n_gts = [] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE
self.n_igts = [] # number of ground ignored truth detections PER SEQUENCE
self.n_gt_trajectories = 0
self.n_gt_seq = []
self.n_tr = 0 # number of tracker detections minus ignored tracker detections
self.n_trs = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE
self.n_itr = 0 # number of ignored tracker detections
self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE
self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored
self.n_tr_trajectories = 0
self.n_tr_seq = []
self.MOTA = 0
self.MOTP = 0
self.MOTAL = 0
self.MODA = 0
self.MODP = 0
self.MODP_t = []
self.recall = 0
self.precision = 0
self.F1 = 0
self.FAR = 0
self.total_cost = 0
self.itp = 0 # number of ignored true positives
self.itps = [] # number of ignored true positives PER SEQUENCE
self.tp = 0 # number of true positives including ignored true positives!
self.tps = [] # number of true positives including ignored true positives PER SEQUENCE
self.fn = 0 # number of false negatives WITHOUT ignored false negatives
self.fns = [] # number of false negatives WITHOUT ignored false negatives PER SEQUENCE
self.ifn = 0 # number of ignored false negatives
self.ifns = [] # number of ignored false negatives PER SEQUENCE
self.fp = 0 # number of false positives
# a bit tricky, the number of ignored false negatives and ignored true positives
# is subtracted, but if both tracker detection and ground truth detection
# are ignored this number is added again to avoid double counting
self.fps = [] # above PER SEQUENCE
self.mme = 0
self.fragments = 0
self.id_switches = 0
self.MT = 0
self.PT = 0
self.ML = 0
self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics
self.max_truncation = max_truncation # maximum truncation of an object for evaluation
self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation
self.min_height = min_height # minimum height of an object for evaluation
self.n_sample_points = 500
# this should be enough to hold all groundtruth trajectories
# is expanded if necessary and reduced in any case
self.gt_trajectories = [[] for x in range(self.n_sequences)]
self.ign_trajectories = [[] for x in range(self.n_sequences)]
def createEvalDir(self):
"""
Creates directory to store evaluation results and data for visualization.
"""
# self.eval_dir = os.path.join("./results/", self.t_sha, "eval", self.cls)
self.eval_dir = os.path.join(self.t_sha, "eval", self.cls)
if not os.path.exists(self.eval_dir):
print("create directory:", self.eval_dir,)
os.makedirs(self.eval_dir)
print("done")
def loadGroundtruth(self):
"""
Helper function to load ground truth.
"""
try:
self._loadData(self.gt_path, cls=self.cls, loading_groundtruth=True)
except IOError:
return False
return True
def loadTracker(self):
"""
Helper function to load tracker data.
"""
try:
if not self._loadData(self.t_path, cls=self.cls, loading_groundtruth=False):
return False
except IOError:
return False
return True
def _loadData(self, root_dir, cls, min_score=-1000, loading_groundtruth=False):
"""
Generic loader for ground truth and tracking data.
Use loadGroundtruth() or loadTracker() to load this data.
Loads detections in KITTI format from textfiles.
"""
# construct objectDetections object to hold detection data
t_data = tData()
data = []
eval_2d = True
eval_3d = True
seq_data = []
n_trajectories = 0
n_trajectories_seq = []
for seq, s_name in enumerate(self.sequence_name):
i = 0
filename = os.path.join(root_dir, "%s.txt" % s_name)
f = open(filename, "r")
f_data = [[] for x in range(self.n_frames[seq])] # current set has only 1059 entries, sufficient length is checked anyway
ids = []
n_in_seq = 0
id_frame_cache = []
for line in f:
# KITTI tracking benchmark data format:
# (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)
line = line.strip()
fields = line.split(" ")
# classes that should be loaded (ignored neighboring classes)
if "car" in cls.lower():
classes = ["car","van"]
elif "pedestrian" in cls.lower():
classes = ["pedestrian","person_sitting"]
else:
classes = [cls.lower()]
classes += ["dontcare"]
if not any([s for s in classes if s in fields[2].lower()]):
continue
# get fields from table
t_data.frame = int(float(fields[0])) # frame
t_data.track_id = int(float(fields[1])) # id
t_data.obj_type = fields[2].lower() # object type [car, pedestrian, cyclist, ...]
t_data.truncation = int(float(fields[3])) # truncation [-1,0,1,2]
t_data.occlusion = int(float(fields[4])) # occlusion [-1,0,1,2]
t_data.obs_angle = float(fields[5]) # observation angle [rad]
t_data.x1 = float(fields[6]) # left [px]
t_data.y1 = float(fields[7]) # top [px]
t_data.x2 = float(fields[8]) # right [px]
t_data.y2 = float(fields[9]) # bottom [px]
t_data.h = float(fields[10]) # height [m]
t_data.w = float(fields[11]) # width [m]
t_data.l = float(fields[12]) # length [m]
t_data.X = float(fields[13]) # X [m]
t_data.Y = float(fields[14]) # Y [m]
t_data.Z = float(fields[15]) # Z [m]
t_data.yaw = float(fields[16]) # yaw angle [rad]
if not loading_groundtruth:
if len(fields) == 17:
t_data.score = -1
elif len(fields) == 18:
t_data.score = float(fields[17]) # detection score
else:
self.mail.msg("file is not in KITTI format")
return
# do not consider objects marked as invalid
if t_data.track_id is -1 and t_data.obj_type != "dontcare":
continue
idx = t_data.frame
# check if length for frame data is sufficient
if idx >= len(f_data):
print("extend f_data", idx, len(f_data))
f_data += [[] for x in range(max(500, idx-len(f_data)))]
try:
id_frame = (t_data.frame,t_data.track_id)
if id_frame in id_frame_cache and not loading_groundtruth:
self.mail.msg("track ids are not unique for sequence %d: frame %d" % (seq,t_data.frame))
self.mail.msg("track id %d occured at least twice for this frame" % t_data.track_id)
self.mail.msg("Exiting...")
#continue # this allows to evaluate non-unique result files
return False
id_frame_cache.append(id_frame)
f_data[t_data.frame].append(copy.copy(t_data))
except:
print(len(f_data), idx)
raise
if t_data.track_id not in ids and t_data.obj_type!="dontcare":
ids.append(t_data.track_id)
n_trajectories +=1
n_in_seq +=1
# check if uploaded data provides information for 2D and 3D evaluation
if not loading_groundtruth and eval_2d is True and(t_data.x1==-1 or t_data.x2==-1 or t_data.y1==-1 or t_data.y2==-1):
eval_2d = False
if not loading_groundtruth and eval_3d is True and(t_data.X==-1000 or t_data.Y==-1000 or t_data.Z==-1000):
eval_3d = False
# only add existing frames
n_trajectories_seq.append(n_in_seq)
seq_data.append(f_data)
f.close()
if not loading_groundtruth:
self.tracker=seq_data
self.n_tr_trajectories=n_trajectories
self.eval_2d = eval_2d
self.eval_3d = eval_3d
self.n_tr_seq = n_trajectories_seq
if self.n_tr_trajectories==0:
return False
else:
# split ground truth and DontCare areas
self.dcareas = []
self.groundtruth = []
for seq_idx in range(len(seq_data)):
seq_gt = seq_data[seq_idx]
s_g, s_dc = [],[]
for f in range(len(seq_gt)):
all_gt = seq_gt[f]
g,dc = [],[]
for gg in all_gt:
if gg.obj_type=="dontcare":
dc.append(gg)
else:
g.append(gg)
s_g.append(g)
s_dc.append(dc)
self.dcareas.append(s_dc)
self.groundtruth.append(s_g)
self.n_gt_seq=n_trajectories_seq
self.n_gt_trajectories=n_trajectories
return True
def boxoverlap(self,a,b,criterion="union"):
"""
boxoverlap computes intersection over union for bbox a and b in KITTI format.
If the criterion is 'union', overlap = (a inter b) / a union b).
If the criterion is 'a', overlap = (a inter b) / a, where b should be a dontcare area.
"""
x1 = max(a.x1, b.x1)
y1 = max(a.y1, b.y1)
x2 = min(a.x2, b.x2)
y2 = min(a.y2, b.y2)
w = x2-x1
h = y2-y1
if w<=0. or h<=0.:
return 0.
inter = w*h
aarea = (a.x2-a.x1) * (a.y2-a.y1)
barea = (b.x2-b.x1) * (b.y2-b.y1)
# intersection over union overlap
if criterion.lower()=="union":
o = inter / float(aarea+barea-inter)
elif criterion.lower()=="a":
o = float(inter) / float(aarea)
else:
raise TypeError("Unkown type for criterion")
return o
def compute3rdPartyMetrics(self):
"""
Computes the metrics defined in
- Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics
MOTA, MOTAL, MOTP
- Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows
MT/PT/ML
"""
# construct Munkres object for Hungarian Method association
hm = Munkres()
max_cost = 1e9
# go through all frames and associate ground truth and tracker results
# groundtruth and tracker contain lists for every single frame containing lists of KITTI format detections
fr, ids = 0,0
for seq_idx in range(len(self.groundtruth)):
seq_gt = self.groundtruth[seq_idx]
seq_dc = self.dcareas[seq_idx] # don't care areas
seq_tracker = self.tracker[seq_idx]
seq_trajectories = defaultdict(list)
seq_ignored = defaultdict(list)
# statistics over the current sequence, check the corresponding
# variable comments in __init__ to get their meaning
seqtp = 0
seqitp = 0
seqfn = 0
seqifn = 0
seqfp = 0
seqigt = 0
seqitr = 0
last_ids = [[],[]]
n_gts = 0
n_trs = 0
for f in range(len(seq_gt)):
g = seq_gt[f]
dc = seq_dc[f]
t = seq_tracker[f]
# counting total number of ground truth and tracker objects
self.n_gt += len(g)
self.n_tr += len(t)
n_gts += len(g)
n_trs += len(t)
# use hungarian method to associate, using boxoverlap 0..1 as cost
# build cost matrix
cost_matrix = []
this_ids = [[],[]]
for gg in g:
# save current ids
this_ids[0].append(gg.track_id)
this_ids[1].append(-1)
gg.tracker = -1
gg.id_switch = 0
gg.fragmentation = 0
cost_row = []
for tt in t:
# overlap == 1 is cost ==0
c = 1-self.boxoverlap(gg,tt)
# gating for boxoverlap
if c<=self.min_overlap:
cost_row.append(c)
else:
cost_row.append(max_cost) # = 1e9
cost_matrix.append(cost_row)
# all ground truth trajectories are initially not associated
# extend groundtruth trajectories lists (merge lists)
seq_trajectories[gg.track_id].append(-1)
seq_ignored[gg.track_id].append(False)
if len(g) is 0:
cost_matrix=[[]]
# associate
association_matrix = hm.compute(cost_matrix)
# tmp variables for sanity checks and MODP computation
tmptp = 0
tmpfp = 0
tmpfn = 0
tmpc = 0 # this will sum up the overlaps for all true positives
tmpcs = [0]*len(g) # this will save the overlaps for all true positives
# the reason is that some true positives might be ignored
# later such that the corrsponding overlaps can
# be subtracted from tmpc for MODP computation
# mapping for tracker ids and ground truth ids
for row,col in association_matrix:
# apply gating on boxoverlap
c = cost_matrix[row][col]
if c < max_cost:
g[row].tracker = t[col].track_id
this_ids[1][row] = t[col].track_id
t[col].valid = True
g[row].distance = c
self.total_cost += 1-c
tmpc += 1-c
tmpcs[row] = 1-c
seq_trajectories[g[row].track_id][-1] = t[col].track_id
# true positives are only valid associations
self.tp += 1
tmptp += 1
else:
g[row].tracker = -1
self.fn += 1
tmpfn += 1
# associate tracker and DontCare areas
# ignore tracker in neighboring classes
nignoredtracker = 0 # number of ignored tracker detections
ignoredtrackers = dict() # will associate the track_id with -1
# if it is not ignored and 1 if it is
# ignored;
# this is used to avoid double counting ignored
# cases, see the next loop
for tt in t:
ignoredtrackers[tt.track_id] = -1
# ignore detection if it belongs to a neighboring class or is
# smaller or equal to the minimum height
tt_height = abs(tt.y1 - tt.y2)
if ((self.cls=="car" and tt.obj_type=="van") or (self.cls=="pedestrian" and tt.obj_type=="person_sitting") or tt_height<=self.min_height) and not tt.valid:
nignoredtracker+= 1
tt.ignored = True
ignoredtrackers[tt.track_id] = 1
continue
for d in dc:
overlap = self.boxoverlap(tt,d,"a")
if overlap>0.5 and not tt.valid:
tt.ignored = True
nignoredtracker+= 1
ignoredtrackers[tt.track_id] = 1
break
# check for ignored FN/TP (truncation or neighboring object class)
ignoredfn = 0 # the number of ignored false negatives
nignoredtp = 0 # the number of ignored true positives
nignoredpairs = 0 # the number of ignored pairs, i.e. a true positive
# which is ignored but where the associated tracker
# detection has already been ignored
gi = 0
for gg in g:
if gg.tracker < 0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
ignoredfn += 1
elif gg.tracker>=0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
nignoredtp += 1
# if the associated tracker detection is already ignored,
# we want to avoid double counting ignored detections
if ignoredtrackers[gg.tracker] > 0:
nignoredpairs += 1
# for computing MODP, the overlaps from ignored detections
# are subtracted
tmpc -= tmpcs[gi]
gi += 1
# the below might be confusion, check the comments in __init__
# to see what the individual statistics represent
# correct TP by number of ignored TP due to truncation
# ignored TP are shown as tracked in visualization
tmptp -= nignoredtp
# count the number of ignored true positives
self.itp += nignoredtp
# adjust the number of ground truth objects considered
self.n_gt -= (ignoredfn + nignoredtp)
# count the number of ignored ground truth objects
self.n_igt += ignoredfn + nignoredtp
# count the number of ignored tracker objects
self.n_itr += nignoredtracker
# count the number of ignored pairs, i.e. associated tracker and
# ground truth objects that are both ignored
self.n_igttr += nignoredpairs
# false negatives = associated gt bboxes exceding association threshold + non-associated gt bboxes
#
tmpfn += len(g)-len(association_matrix)-ignoredfn
self.fn += len(g)-len(association_matrix)-ignoredfn
self.ifn += ignoredfn
# false positives = tracker bboxes - associated tracker bboxes
# mismatches (mme_t)
tmpfp += len(t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
self.fp += len(t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
#tmpfp = len(t) - tmptp - nignoredtp # == len(t) - (tp - ignoredtp) - ignoredtp
#self.fp += len(t) - tmptp - nignoredtp
# update sequence data
seqtp += tmptp
seqitp += nignoredtp
seqfp += tmpfp
seqfn += tmpfn
seqifn += ignoredfn
seqigt += ignoredfn + nignoredtp
seqitr += nignoredtracker
# sanity checks
# - the number of true positives minues ignored true positives
# should be greater or equal to 0
# - the number of false negatives should be greater or equal to 0
# - the number of false positives needs to be greater or equal to 0
# otherwise ignored detections might be counted double
# - the number of counted true positives (plus ignored ones)
# and the number of counted false negatives (plus ignored ones)
# should match the total number of ground truth objects
# - the number of counted true positives (plus ignored ones)
# and the number of counted false positives
# plus the number of ignored tracker detections should
# match the total number of tracker detections; note that
# nignoredpairs is subtracted here to avoid double counting
# of ignored detection sin nignoredtp and nignoredtracker
if tmptp<0:
print(tmptp, nignoredtp)
raise NameError("Something went wrong! TP is negative")
if tmpfn<0:
print(tmpfn, len(g), len(association_matrix), ignoredfn, nignoredpairs)
raise NameError("Something went wrong! FN is negative")
if tmpfp<0:
print(tmpfp, len(t), tmptp, nignoredtracker, nignoredtp, nignoredpairs)
raise NameError("Something went wrong! FP is negative")
if tmptp + tmpfn is not len(g)-ignoredfn-nignoredtp:
print("seqidx", seq_idx)
print("frame ", f)
print("TP ", tmptp)
print("FN ", tmpfn)
print("FP ", tmpfp)
print("nGT ", len(g))
print("nAss ", len(association_matrix))
print("ign GT", ignoredfn)
print("ign TP", nignoredtp)
raise NameError("Something went wrong! nGroundtruth is not TP+FN")
if tmptp+tmpfp+nignoredtp+nignoredtracker-nignoredpairs is not len(t):
print(seq_idx, f, len(t), tmptp, tmpfp)
print(len(association_matrix), association_matrix)
raise NameError("Something went wrong! nTracker is not TP+FP")
# check for id switches or fragmentations
for i,tt in enumerate(this_ids[0]):
if tt in last_ids[0]:
idx = last_ids[0].index(tt)
tid = this_ids[1][i]
lid = last_ids[1][idx]
if tid != lid and lid != -1 and tid != -1:
if g[i].truncation<self.max_truncation:
g[i].id_switch = 1
ids +=1
if tid != lid and lid != -1:
if g[i].truncation<self.max_truncation:
g[i].fragmentation = 1
fr +=1
# save current index
last_ids = this_ids
# compute MOTP_t
MODP_t = 1
if tmptp!=0:
MODP_t = tmpc/float(tmptp)
self.MODP_t.append(MODP_t)
# remove empty lists for current gt trajectories
self.gt_trajectories[seq_idx] = seq_trajectories
self.ign_trajectories[seq_idx] = seq_ignored
# gather statistics for "per sequence" statistics.
self.n_gts.append(n_gts)
self.n_trs.append(n_trs)
self.tps.append(seqtp)
self.itps.append(seqitp)
self.fps.append(seqfp)
self.fns.append(seqfn)
self.ifns.append(seqifn)
self.n_igts.append(seqigt)
self.n_itrs.append(seqitr)
# compute MT/PT/ML, fragments, idswitches for all groundtruth trajectories
n_ignored_tr_total = 0
for seq_idx, (seq_trajectories,seq_ignored) in enumerate(zip(self.gt_trajectories, self.ign_trajectories)):
if len(seq_trajectories)==0:
continue
tmpMT, tmpML, tmpPT, tmpId_switches, tmpFragments = [0]*5
n_ignored_tr = 0
for g, ign_g in zip(seq_trajectories.values(), seq_ignored.values()):
# all frames of this gt trajectory are ignored
if all(ign_g):
n_ignored_tr+=1
n_ignored_tr_total+=1
continue
# all frames of this gt trajectory are not assigned to any detections
if all([this==-1 for this in g]):
tmpML+=1
self.ML+=1
continue
# compute tracked frames in trajectory
last_id = g[0]
# first detection (necessary to be in gt_trajectories) is always tracked
tracked = 1 if g[0]>=0 else 0
lgt = 0 if ign_g[0] else 1
for f in range(1,len(g)):
if ign_g[f]:
last_id = -1
continue
lgt+=1
if last_id != g[f] and last_id != -1 and g[f] != -1 and g[f-1] != -1:
tmpId_switches += 1
self.id_switches += 1
if f < len(g)-1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and g[f+1] != -1:
tmpFragments += 1
self.fragments += 1
if g[f] != -1:
tracked += 1
last_id = g[f]
# handle last frame; tracked state is handled in for loop (g[f]!=-1)
if len(g)>1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and not ign_g[f]:
tmpFragments += 1
self.fragments += 1
# compute MT/PT/ML
tracking_ratio = tracked / float(len(g) - sum(ign_g))
if tracking_ratio > 0.8:
tmpMT += 1
self.MT += 1
elif tracking_ratio < 0.2:
tmpML += 1
self.ML += 1
else: # 0.2 <= tracking_ratio <= 0.8
tmpPT += 1
self.PT += 1
if (self.n_gt_trajectories-n_ignored_tr_total)==0:
self.MT = 0.
self.PT = 0.
self.ML = 0.
else:
self.MT /= float(self.n_gt_trajectories-n_ignored_tr_total)
self.PT /= float(self.n_gt_trajectories-n_ignored_tr_total)
self.ML /= float(self.n_gt_trajectories-n_ignored_tr_total)
# precision/recall etc.
if (self.fp+self.tp)==0 or (self.tp+self.fn)==0:
self.recall = 0.
self.precision = 0.
else:
self.recall = self.tp/float(self.tp+self.fn)
self.precision = self.tp/float(self.fp+self.tp)
if (self.recall+self.precision)==0:
self.F1 = 0.
else:
self.F1 = 2.*(self.precision*self.recall)/(self.precision+self.recall)
if sum(self.n_frames)==0:
self.FAR = "n/a"
else:
self.FAR = self.fp/float(sum(self.n_frames))
# compute CLEARMOT
if self.n_gt==0:
self.MOTA = -float("inf")
self.MODA = -float("inf")
else:
self.MOTA = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt)
self.MODA = 1 - (self.fn + self.fp) / float(self.n_gt)
if self.tp==0:
self.MOTP = float("inf")
else:
self.MOTP = self.total_cost / float(self.tp)
if self.n_gt!=0:
if self.id_switches==0:
self.MOTAL = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt)
else:
self.MOTAL = 1 - (self.fn + self.fp + math.log10(self.id_switches))/float(self.n_gt)
else:
self.MOTAL = -float("inf")
if sum(self.n_frames)==0:
self.MODP = "n/a"
else:
self.MODP = sum(self.MODP_t)/float(sum(self.n_frames))
return True
def createSummary(self):
"""
Generate and mail a summary of the results.
If mailpy.py is present, the summary is instead printed.
"""
summary = ""
summary += "tracking evaluation summary".center(80,"=") + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n"
summary += self.printEntry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n"
summary += self.printEntry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n"
summary += self.printEntry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n"
summary += "\n"
summary += self.printEntry("Recall", self.recall) + "\n"
summary += self.printEntry("Precision", self.precision) + "\n"
summary += self.printEntry("F1", self.F1) + "\n"
summary += self.printEntry("False Alarm Rate", self.FAR) + "\n"
summary += "\n"
summary += self.printEntry("Mostly Tracked", self.MT) + "\n"
summary += self.printEntry("Partly Tracked", self.PT) + "\n"
summary += self.printEntry("Mostly Lost", self.ML) + "\n"
summary += "\n"
summary += self.printEntry("True Positives", self.tp) + "\n"
#summary += self.printEntry("True Positives per Sequence", self.tps) + "\n"
summary += self.printEntry("Ignored True Positives", self.itp) + "\n"
#summary += self.printEntry("Ignored True Positives per Sequence", self.itps) + "\n"
summary += self.printEntry("False Positives", self.fp) + "\n"
#summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
summary += self.printEntry("False Negatives", self.fn) + "\n"
#summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
summary += self.printEntry("ID-switches", self.id_switches) + "\n"
self.fp = self.fp / self.n_gt
self.fn = self.fn / self.n_gt
self.id_switches = self.id_switches / self.n_gt
summary += self.printEntry("False Positives Ratio", self.fp) + "\n"
#summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
summary += self.printEntry("False Negatives Ratio", self.fn) + "\n"
#summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
summary += self.printEntry("Ignored False Negatives Ratio", self.ifn) + "\n"
#summary += self.printEntry("Ignored False Negatives per Sequence", self.ifns) + "\n"
summary += self.printEntry("Missed Targets", self.fn) + "\n"
summary += self.printEntry("ID-switches", self.id_switches) + "\n"
summary += self.printEntry("Fragmentations", self.fragments) + "\n"
summary += "\n"
summary += self.printEntry("Ground Truth Objects (Total)", self.n_gt + self.n_igt) + "\n"
#summary += self.printEntry("Ground Truth Objects (Total) per Sequence", self.n_gts) + "\n"
summary += self.printEntry("Ignored Ground Truth Objects", self.n_igt) + "\n"
#summary += self.printEntry("Ignored Ground Truth Objects per Sequence", self.n_igts) + "\n"
summary += self.printEntry("Ground Truth Trajectories", self.n_gt_trajectories) + "\n"
summary += "\n"
summary += self.printEntry("Tracker Objects (Total)", self.n_tr) + "\n"
#summary += self.printEntry("Tracker Objects (Total) per Sequence", self.n_trs) + "\n"
summary += self.printEntry("Ignored Tracker Objects", self.n_itr) + "\n"
#summary += self.printEntry("Ignored Tracker Objects per Sequence", self.n_itrs) + "\n"
summary += self.printEntry("Tracker Trajectories", self.n_tr_trajectories) + "\n"
#summary += "\n"
#summary += self.printEntry("Ignored Tracker Objects with Associated Ignored Ground Truth Objects", self.n_igttr) + "\n"
summary += "="*80
return summary
def printEntry(self, key, val,width=(70,10)):
"""
Pretty print an entry in a table fashion.
"""
s_out = key.ljust(width[0])
if type(val)==int:
s = "%%%dd" % width[1]
s_out += s % val
elif type(val)==float:
s = "%%%df" % (width[1])
s_out += s % val
else:
s_out += ("%s"%val).rjust(width[1])
return s_out
def saveToStats(self):
"""
Save the statistics in a whitespace separate file.
"""
# create pretty summary
summary = self.createSummary()
# mail or print the summary.
mail.msg(summary)
# write summary to file summary_cls.txt
# filename = os.path.join("./results", self.t_sha, "summary_%s.txt" % self.cls)
filename = os.path.join(self.t_sha, "summary_%s.txt" % self.cls)
dump = open(filename, "w+")
# print>>dump, summary
dump.write(summary)
dump.close()
# dump all the statistics to the corresponding stats_cls.txt file
# filename = os.path.join("./results", self.t_sha, "stats_%s.txt" % self.cls)
filename = os.path.join(self.t_sha, "stats_%s.txt" % self.cls)
dump = open(filename, "w+")
# print>>dump, "%.6f " * 21 \
dump.write("%.6f " * 21 \
% (self.MOTA, self.MOTP, self.MOTAL, self.MODA, self.MODP, \
self.recall, self.precision, self.F1, self.FAR, \
self.MT, self.PT, self.ML, self.tp, self.fp, self.fn, self.id_switches, self.fragments, \
self.n_gt, self.n_gt_trajectories, self.n_tr, self.n_tr_trajectories))
dump.close()
# write description of statistics to description.txt
# filename = os.path.join("./results", self.t_sha, "description.txt")
filename = os.path.join(self.t_sha, "description.txt")
dump = open(filename, "w+")
# print>>dump, "MOTA", "MOTP", "MOTAL", "MODA", "MODP", "recall", "precision", "F1", "FAR",
# print>>dump, "MT", "PT", "ML", "tp", "fp", "fn", "id_switches", "fragments",
# print>>dump, "n_gt", "n_gt_trajectories", "n_tr", "n_tr_trajectories"
dump.write("MOTA" + "MOTP" + "MOTAL" + "MODA" + "MODP" + "recall" + "precision" + "F1" + "FAR")
dump.write("MT" + "PT" + "ML" + "tp" + "fp" + "fn" + "id_switches" + "fragments")
dump.write("n_gt" + "n_gt_trajectories" + "n_tr" + "n_tr_trajectories")
def evaluate(result_sha,mail, split_version=''):
"""
Entry point for evaluation, will load the data and start evaluation for
CAR and PEDESTRIAN if available.
"""
# start evaluation and instanciated eval object
mail.msg("Processing Result for KITTI Tracking Benchmark")
classes = []
for c in ("car", "pedestrian"):
e = trackingEvaluation(
t_sha=result_sha, mail=mail,cls=c,split_version=split_version)
# load tracker data and check provided classes
try:
if not e.loadTracker():
continue
mail.msg("Loading Results - Success")
mail.msg("Evaluate Object Class: %s" % c.upper())
classes.append(c)
except:
mail.msg("Feel free to contact us (lenz@kit.edu), if you receive this error message:")
mail.msg(" Caught exception while loading result data.")
break
# load groundtruth data for this class
if not e.loadGroundtruth():
raise ValueError("Ground truth not found.")
mail.msg("Loading Groundtruth - Success")
# sanity checks
if len(e.groundtruth) is not len(e.tracker):
mail.msg("The uploaded data does not provide results for every sequence.")
return False
mail.msg("Loaded %d Sequences." % len(e.groundtruth))
mail.msg("Start Evaluation...")
# create needed directories, evaluate and save stats
try:
e.createEvalDir()
except:
mail.msg("Feel free to contact us (lenz@kit.edu), if you receive this error message:")
mail.msg(" Caught exception while creating results.")
if e.compute3rdPartyMetrics():
e.saveToStats()
else:
mail.msg("There seem to be no true positives or false positives at all in the submitted data.")
# finish
if len(classes)==0:
mail.msg("The uploaded results could not be evaluated. Check for format errors.")
return False
mail.msg("Thank you for participating in our benchmark!")
return True
#########################################################################
# entry point of evaluation script
# input:
# - result_sha (unique key of results)
# - user_sha (key of user who submitted the results, optional)
# - user_sha (email of user who submitted the results, optional)
if __name__ == "__main__":
# check for correct number of arguments. if user_sha and email are not supplied,
# no notification email is sent (this option is used for auto-updates)
# if len(sys.argv)!=2 and len(sys.argv)!=4:
# print("Usage: python eval_tracking.py result_sha [user_sha email]")
# sys.exit(1);
# get unique sha key of submitted results
result_sha = sys.argv[1]
split_version = sys.argv[2] if len(sys.argv) >= 3 else ''
mail = mailpy.Mail("")
# create mail messenger and debug output object
# if len(sys.argv)==4:
# mail = mailpy.Mail(sys.argv[3])
# else:
# mail = mailpy.Mail("")
# evaluate results and send notification email to user
success = evaluate(result_sha,mail,split_version=split_version)
if len(sys.argv)==4: mail.finalize(success,"tracking",result_sha,split_version)
else: mail.finalize(success,"tracking",result_sha,"")
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/eval_kitti_track/mailpy.py | Python | class Mail:
""" Dummy class to print messages without sending e-mails"""
def __init__(self,mailaddress):
pass
def msg(self,msg):
print(msg)
def finalize(self,success,benchmark,sha_key,mailaddress=None):
if success:
print("Results for %s (benchmark: %s) sucessfully created" % (benchmark,sha_key))
else:
print("Creating results for %s (benchmark: %s) failed" % (benchmark,sha_key))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/eval_kitti_track/munkres.py | Python | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Documentation is intended to be processed by Epydoc.
"""
Introduction
============
The Munkres module provides an implementation of the Munkres algorithm
(also called the Hungarian algorithm or the Kuhn-Munkres algorithm),
useful for solving the Assignment Problem.
Assignment Problem
==================
Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers
to perform any of *n* jobs. The assignment problem is to assign jobs to
workers in a way that minimizes the total cost. Since each worker can perform
only one job and each job can be assigned to only one worker the assignments
represent an independent set of the matrix *C*.
One way to generate the optimal set is to create all permutations of
the indexes necessary to traverse the matrix so that no row and column
are used more than once. For instance, given this matrix (expressed in
Python)::
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
You could use this code to generate the traversal indexes::
def permute(a, results):
if len(a) == 1:
results.insert(len(results), a)
else:
for i in range(0, len(a)):
element = a[i]
a_copy = [a[j] for j in range(0, len(a)) if j != i]
subresults = []
permute(a_copy, subresults)
for subresult in subresults:
result = [element] + subresult
results.insert(len(results), result)
results = []
permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix
After the call to permute(), the results matrix would look like this::
[[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]]
You could then use that index matrix to loop over the original cost matrix
and calculate the smallest cost of the combinations::
n = len(matrix)
minval = sys.maxint
for row in range(n):
cost = 0
for col in range(n):
cost += matrix[row][col]
minval = min(cost, minval)
print minval
While this approach works fine for small matrices, it does not scale. It
executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n*
matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600
traversals. Even if you could manage to perform each traversal in just one
millisecond, it would still take more than 133 hours to perform the entire
traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At
an optimistic millisecond per operation, that's more than 77 million years.
The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This
package provides an implementation of that algorithm.
This version is based on
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html.
This version was written for Python by Brian Clapper from the (Ada) algorithm
at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was
clearly adapted from the same web site.)
Usage
=====
Construct a Munkres object::
from munkres import Munkres
m = Munkres()
Then use it to compute the lowest cost assignment from a cost matrix. Here's
a sample program::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
m = Munkres()
indexes = m.compute(matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total cost: %d' % total
Running that program produces::
Lowest cost through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 0) -> 5
(1, 1) -> 3
(2, 2) -> 4
total cost=12
The instantiated Munkres object can be used multiple times on different
matrices.
Non-square Cost Matrices
========================
The Munkres algorithm assumes that the cost matrix is square. However, it's
possible to use a rectangular matrix if you first pad it with 0 values to make
it square. This module automatically pads rectangular cost matrices to make
them square.
Notes:
- The module operates on a *copy* of the caller's matrix, so any padding will
not be seen by the caller.
- The cost matrix must be rectangular or square. An irregular matrix will
*not* work.
Calculating Profit, Rather than Cost
====================================
The cost matrix is just that: A cost matrix. The Munkres algorithm finds
the combination of elements (one from each row and column) that results in
the smallest cost. It's also possible to use the algorithm to maximize
profit. To do that, however, you have to convert your profit matrix to a
cost matrix. The simplest way to do that is to subtract all elements from a
large value. For example::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = []
for row in matrix:
cost_row = []
for col in row:
cost_row += [sys.maxint - col]
cost_matrix += [cost_row]
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Highest profit through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
Running that program produces::
Highest profit through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 1) -> 9
(1, 0) -> 10
(2, 2) -> 4
total profit=23
The ``munkres`` module provides a convenience method for creating a cost
matrix from a profit matrix. Since it doesn't know whether the matrix contains
floating point numbers, decimals, or integers, you have to provide the
conversion function; but the convenience method takes care of the actual
creation of the cost matrix::
import munkres
cost_matrix = munkres.make_cost_matrix(matrix,
lambda cost: sys.maxint - cost)
So, the above profit-calculation program can be recast as::
from munkres import Munkres, print_matrix, make_cost_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxint - cost)
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
References
==========
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
Copyright and License
=====================
This software is released under a BSD license, adapted from
<http://opensource.org/licenses/bsd-license.php>
Copyright (c) 2008 Brian M. Clapper
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name "clapper.org" nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = 'restructuredtext'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Munkres', 'make_cost_matrix']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# Info about the module
__version__ = "1.0.5.4"
__author__ = "Brian Clapper, bmc@clapper.org"
__url__ = "http://bmc.github.com/munkres/"
__copyright__ = "(c) 2008 Brian M. Clapper"
__license__ = "BSD-style license"
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Munkres:
"""
Calculate the Munkres solution to the classical assignment problem.
See the module documentation for usage.
"""
def __init__(self):
"""Create a new instance"""
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def make_cost_matrix(profit_matrix, inversion_function):
"""
**DEPRECATED**
Please use the module function ``make_cost_matrix()``.
"""
import munkres
return munkres.make_cost_matrix(profit_matrix, inversion_function)
make_cost_matrix = staticmethod(make_cost_matrix)
def pad_matrix(self, matrix, pad_value=0):
"""
Pad a possibly non-square matrix to make it square.
:Parameters:
matrix : list of lists
matrix to pad
pad_value : int
value to use to pad the matrix
:rtype: list of lists
:return: a new, possibly padded, matrix
"""
max_columns = 0
total_rows = len(matrix)
for row in matrix:
max_columns = max(max_columns, len(row))
total_rows = max(max_columns, total_rows)
new_matrix = []
for row in matrix:
row_len = len(row)
new_row = row[:]
if total_rows > row_len:
# Row too short. Pad it.
new_row += [0] * (total_rows - row_len)
new_matrix += [new_row]
while len(new_matrix) < total_rows:
new_matrix += [[0] * total_rows]
return new_matrix
def compute(self, cost_matrix):
"""
Compute the indexes for the lowest-cost pairings between rows and
columns in the database. Returns a list of (row, column) tuples
that can be used to traverse the matrix.
:Parameters:
cost_matrix : list of lists
The cost matrix. If this cost matrix is not square, it
will be padded with zeros, via a call to ``pad_matrix()``.
(This method does *not* modify the caller's matrix. It
operates on a copy of the matrix.)
**WARNING**: This code handles square and rectangular
matrices. It does *not* handle irregular matrices.
:rtype: list
:return: A list of ``(row, column)`` tuples that describe the lowest
cost path through the matrix
"""
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix(self.n * 2, 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = { 1 : self.__step1,
2 : self.__step2,
3 : self.__step3,
4 : self.__step4,
5 : self.__step5,
6 : self.__step6 }
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if self.marked[i][j] == 1:
results += [(i, j)]
return results
def __copy_matrix(self, matrix):
"""Return an exact copy of the supplied matrix"""
return copy.deepcopy(matrix)
def __make_matrix(self, n, val):
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self):
"""
For each row of the matrix, find the smallest element and
subtract it from every element in its row. Go to Step 2.
"""
C = self.C
n = self.n
for i in range(n):
minval = min(self.C[i])
# Find the minimum value for this row and subtract that minimum
# from every element in the row.
for j in range(n):
self.C[i][j] -= minval
return 2
def __step2(self):
"""
Find a zero (Z) in the resulting matrix. If there is no starred
zero in its row or column, star Z. Repeat for each element in the
matrix. Go to Step 3.
"""
n = self.n
for i in range(n):
for j in range(n):
if (self.C[i][j] == 0) and \
(not self.col_covered[j]) and \
(not self.row_covered[i]):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
self.__clear_covers()
return 3
def __step3(self):
"""
Cover each column containing a starred zero. If K columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
n = self.n
count = 0
for i in range(n):
for j in range(n):
if self.marked[i][j] == 1:
self.col_covered[j] = True
count += 1
if count >= n:
step = 7 # done
else:
step = 4
return step
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while not done:
row = self.__find_star_in_col(path[count][1])
if row >= 0:
count += 1
path[count][0] = row
path[count][1] = path[count-1][1]
else:
done = True
if not done:
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[count-1][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
minval = self.__find_smallest()
for i in range(self.n):
for j in range(self.n):
if self.row_covered[i]:
self.C[i][j] += minval
if not self.col_covered[j]:
self.C[i][j] -= minval
return 4
def __find_smallest(self):
"""Find the smallest uncovered value in the matrix."""
minval = 2e9 # sys.maxint
for i in range(self.n):
for j in range(self.n):
if (not self.row_covered[i]) and (not self.col_covered[j]):
if minval > self.C[i][j]:
minval = self.C[i][j]
return minval
def __find_a_zero(self):
"""Find the first uncovered element with value 0"""
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if (self.C[i][j] == 0) and \
(not self.row_covered[i]) and \
(not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
return (row, col)
def __find_star_in_row(self, row):
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col
def __convert_path(self, path, count):
for i in range(count+1):
if self.marked[path[i][0]][path[i][1]] == 1:
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self):
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self):
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def make_cost_matrix(profit_matrix, inversion_function):
"""
Create a cost matrix from a profit matrix by calling
'inversion_function' to invert each value. The inversion
function must take one numeric argument (of any type) and return
another numeric argument which is presumed to be the cost inverse
of the original profit.
This is a static method. Call it like this:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
For example:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxint - x)
:Parameters:
profit_matrix : list of lists
The matrix to convert from a profit to a cost matrix
inversion_function : function
The function to use to invert each entry in the profit matrix
:rtype: list of lists
:return: The converted matrix
"""
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix
def print_matrix(matrix, msg=None):
"""
Convenience function: Displays the contents of a matrix of integers.
:Parameters:
matrix : list of lists
Matrix to print
msg : str
Optional message to print before displaying the matrix
"""
import math
if msg is not None:
print(msg)
# Calculate the appropriate format width.
width = 0
for row in matrix:
for val in row:
width = max(width, int(math.log10(val)) + 1)
# Make the format string
format = '%%%dd' % width
# Print the matrix
for row in matrix:
sep = '['
for val in row:
sys.stdout.write(sep + format % val)
sep = ', '
sys.stdout.write(']\n')
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[ 9, 8, 1],
[ 9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[ 9, 8, 1, 1],
[ 9, 7, 4, 10]],
15
),
]
m = Munkres()
for cost_matrix, expected_total in matrices:
print_matrix(cost_matrix, msg='cost matrix')
indexes = m.compute(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r][c]
total_cost += x
print('(%d, %d) -> %d' % (r, c, x))
print('lowest cost=%d' % total_cost)
assert expected_total == total_cost
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/eval_motchallenge.py | Python | """py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.
Christoph Heindl, 2017
https://github.com/cheind/py-motmetrics
Modified by Xingyi Zhou
"""
import argparse
import glob
import os
import logging
import motmetrics as mm
import pandas as pd
from collections import OrderedDict
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(description="""
Compute metrics for trackers using MOTChallenge ground-truth data.
Files
-----
All file content, ground truth and test files, have to comply with the
format described in
Milan, Anton, et al.
"Mot16: A benchmark for multi-object tracking."
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/
Structure
---------
Layout for ground truth data
<GT_ROOT>/<SEQUENCE_1>/gt/gt.txt
<GT_ROOT>/<SEQUENCE_2>/gt/gt.txt
...
Layout for test data
<TEST_ROOT>/<SEQUENCE_1>.txt
<TEST_ROOT>/<SEQUENCE_2>.txt
...
Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string.""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('groundtruths', type=str, help='Directory containing ground truth files.')
parser.add_argument('tests', type=str, help='Directory containing tracker result files')
parser.add_argument('--gt_type', type=str, default='')
parser.add_argument('--eval_official', action='store_true')
parser.add_argument('--loglevel', type=str, help='Log level', default='info')
parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D')
parser.add_argument('--solver', type=str, help='LAP solver to use')
return parser.parse_args()
def compare_dataframes(gts, ts):
accs = []
names = []
for k, tsacc in ts.items():
if k in gts:
logging.info('Comparing {}...'.format(k))
accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
names.append(k)
else:
logging.warning('No ground truth for {}, skipping.'.format(k))
return accs, names
if __name__ == '__main__':
args = parse_args()
loglevel = getattr(logging, args.loglevel.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: {} '.format(args.loglevel))
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')
if args.solver:
mm.lap.default_solver = args.solver
gt_type = args.gt_type
print('gt_type', gt_type)
gtfiles = glob.glob(
os.path.join(args.groundtruths, '*/gt/gt{}.txt'.format(gt_type)))
print('gt_files', gtfiles)
tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if not os.path.basename(f).startswith('eval')]
logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
logging.info('Loading files.')
gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1)) for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt)) for f in tsfiles])
mh = mm.metrics.create()
accs, names = compare_dataframes(gt, ts)
logging.info('Running metrics')
metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', \
'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', \
'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']
summary = mh.compute_many(
accs, names=names,
metrics=metrics, generate_overall=True)
# summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
# print(mm.io.render_summary(
# summary, formatters=mh.formatters,
# namemap=mm.io.motchallenge_metric_names))
div_dict = {
'num_objects': ['num_false_positives', 'num_misses',
'num_switches', 'num_fragmentations'],
'num_unique_objects': ['mostly_tracked', 'partially_tracked',
'mostly_lost']}
for divisor in div_dict:
for divided in div_dict[divisor]:
summary[divided] = (summary[divided] / summary[divisor])
fmt = mh.formatters
change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches',
'num_fragmentations', 'mostly_tracked', 'partially_tracked',
'mostly_lost']
for k in change_fmt_list:
fmt[k] = fmt['mota']
print(mm.io.render_summary(
summary, formatters=fmt,
namemap=mm.io.motchallenge_metric_names))
if args.eval_official:
metrics = mm.metrics.motchallenge_metrics + ['num_objects']
summary = mh.compute_many(
accs, names=names,
metrics=metrics, generate_overall=True)
print(mm.io.render_summary(
summary, formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names))
logging.info('Completed')
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/get_mot_17.sh | Shell | mkdir ../../data/mot17
cd ../../data/mot17
wget https://motchallenge.net/data/MOT17.zip
unzip MOT17.zip
rm MOT17.zip
mkdir annotations
cd ../../src/tools/
python convert_mot_to_coco.py
python convert_mot_det_to_results | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/nuScenes_lib/export_kitti.py | Python | # nuScenes dev-kit.
# Code written by Holger Caesar, 2019.
# Licensed under the Creative Commons [see licence.txt]
"""
This script converts nuScenes data to KITTI format and KITTI results to nuScenes.
It is used for compatibility with software that uses KITTI-style annotations.
We do not encourage this, as:
- KITTI has only front-facing cameras, whereas nuScenes has a 360 degree horizontal fov.
- KITTI has no radar data.
- The nuScenes database format is more modular.
- KITTI fields like occluded and truncated cannot be exactly reproduced from nuScenes data.
- KITTI has different categories.
Limitations:
- We don't specify the KITTI imu_to_velo_kitti projection in this code base.
- We map nuScenes categories to nuScenes detection categories, rather than KITTI categories.
- Attributes are not part of KITTI and therefore set to '' in the nuScenes result format.
- Velocities are not part of KITTI and therefore set to 0 in the nuScenes result format.
- This script uses the `train` and `val` splits of nuScenes, whereas standard KITTI has `training` and `testing` splits.
This script includes three main functions:
- nuscenes_gt_to_kitti(): Converts nuScenes GT annotations to KITTI format.
- render_kitti(): Render the annotations of the (generated or real) KITTI dataset.
- kitti_res_to_nuscenes(): Converts a KITTI detection result to the nuScenes detection results format.
To launch these scripts run:
- python export_kitti.py nuscenes_gt_to_kitti --nusc_kitti_dir ~/nusc_kitti
- python export_kitti.py render_kitti --nusc_kitti_dir ~/nusc_kitti --render_2d False
- python export_kitti.py kitti_res_to_nuscenes --nusc_kitti_dir ~/nusc_kitti
Note: The parameter --render_2d specifies whether to draw 2d or 3d boxes.
To work with the original KITTI dataset, use these parameters:
--nusc_kitti_dir /data/sets/kitti --split training
See https://www.nuscenes.org/object-detection for more information on the nuScenes result format.
"""
import os
import json
from typing import List, Dict, Any
from pyquaternion import Quaternion
import numpy as np
# import fire
from PIL import Image
import matplotlib.pyplot as plt
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import transform_matrix
from nuscenes.utils.geometry_utils import BoxVisibility
from nuscenes.utils.data_classes import LidarPointCloud, Box
from nuscenes.utils.splits import create_splits_logs
# from nuscenes.utils.kitti import KittiDB
from .utils_kitti import KittiDB
from nuscenes.eval.detection.utils import category_to_detection_name
class KittiConverter:
def __init__(self,
nusc_kitti_dir: str = '~/nusc_kitti',
cam_name: str = 'CAM_FRONT',
lidar_name: str = 'LIDAR_TOP',
image_count: int = 10,
nusc_version: str = 'v1.0-mini',
split: str = 'mini_train'):
"""
:param nusc_kitti_dir: Where to write the KITTI-style annotations.
:param cam_name: Name of the camera to export. Note that only one camera is allowed in KITTI.
:param lidar_name: Name of the lidar sensor.
:param image_count: Number of images to convert.
:param nusc_version: nuScenes version to use.
:param split: Dataset split to use.
"""
self.nusc_kitti_dir = os.path.expanduser(nusc_kitti_dir)
self.cam_name = cam_name
self.lidar_name = lidar_name
self.image_count = image_count
self.nusc_version = nusc_version
self.split = split
# Create nusc_kitti_dir.
if not os.path.isdir(self.nusc_kitti_dir):
os.makedirs(self.nusc_kitti_dir)
# Select subset of the data to look at.
self.nusc = NuScenes(version=nusc_version)
def nuscenes_gt_to_kitti(self) -> None:
"""
Converts nuScenes GT annotations to KITTI format.
"""
kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
kitti_to_nu_lidar_inv = kitti_to_nu_lidar.inverse
imsize = (1600, 900)
token_idx = 0 # Start tokens from 0.
# Get assignment of scenes to splits.
split_logs = create_splits_logs(self.split, self.nusc)
# Create output folders.
label_folder = os.path.join(self.nusc_kitti_dir, self.split, 'label_2')
calib_folder = os.path.join(self.nusc_kitti_dir, self.split, 'calib')
image_folder = os.path.join(self.nusc_kitti_dir, self.split, 'image_2')
lidar_folder = os.path.join(self.nusc_kitti_dir, self.split, 'velodyne')
for folder in [label_folder, calib_folder, image_folder, lidar_folder]:
if not os.path.isdir(folder):
os.makedirs(folder)
# Use only the samples from the current split.
sample_tokens = self._split_to_samples(split_logs)
sample_tokens = sample_tokens[:self.image_count]
tokens = []
for sample_token in sample_tokens:
# Get sample data.
sample = self.nusc.get('sample', sample_token)
sample_annotation_tokens = sample['anns']
cam_front_token = sample['data'][self.cam_name]
lidar_token = sample['data'][self.lidar_name]
# Retrieve sensor records.
sd_record_cam = self.nusc.get('sample_data', cam_front_token)
sd_record_lid = self.nusc.get('sample_data', lidar_token)
cs_record_cam = self.nusc.get('calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
cs_record_lid = self.nusc.get('calibrated_sensor', sd_record_lid['calibrated_sensor_token'])
# Combine transformations and convert to KITTI format.
# Note: cam uses same conventions in KITTI and nuScenes.
lid_to_ego = transform_matrix(cs_record_lid['translation'], Quaternion(cs_record_lid['rotation']),
inverse=False)
ego_to_cam = transform_matrix(cs_record_cam['translation'], Quaternion(cs_record_cam['rotation']),
inverse=True)
velo_to_cam = np.dot(ego_to_cam, lid_to_ego)
# Convert from KITTI to nuScenes LIDAR coordinates, where we apply velo_to_cam.
velo_to_cam_kitti = np.dot(velo_to_cam, kitti_to_nu_lidar.transformation_matrix)
# Currently not used.
imu_to_velo_kitti = np.zeros((3, 4)) # Dummy values.
r0_rect = Quaternion(axis=[1, 0, 0], angle=0) # Dummy values.
# Projection matrix.
p_left_kitti = np.zeros((3, 4))
p_left_kitti[:3, :3] = cs_record_cam['camera_intrinsic'] # Cameras are always rectified.
# Create KITTI style transforms.
velo_to_cam_rot = velo_to_cam_kitti[:3, :3]
velo_to_cam_trans = velo_to_cam_kitti[:3, 3]
# Check that the rotation has the same format as in KITTI.
assert (velo_to_cam_rot.round(0) == np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]])).all()
assert (velo_to_cam_trans[1:3] < 0).all()
# Retrieve the token from the lidar.
# Note that this may be confusing as the filename of the camera will include the timestamp of the lidar,
# not the camera.
filename_cam_full = sd_record_cam['filename']
filename_lid_full = sd_record_lid['filename']
# token = '%06d' % token_idx # Alternative to use KITTI names.
token_idx += 1
# Convert image (jpg to png).
src_im_path = os.path.join(self.nusc.dataroot, filename_cam_full)
dst_im_path = os.path.join(image_folder, sample_token + '.png')
if not os.path.exists(dst_im_path):
im = Image.open(src_im_path)
im.save(dst_im_path, "PNG")
# Convert lidar.
# Note that we are only using a single sweep, instead of the commonly used n sweeps.
src_lid_path = os.path.join(self.nusc.dataroot, filename_lid_full)
dst_lid_path = os.path.join(lidar_folder, sample_token + '.bin')
assert not dst_lid_path.endswith('.pcd.bin')
pcl = LidarPointCloud.from_file(src_lid_path)
pcl.rotate(kitti_to_nu_lidar_inv.rotation_matrix) # In KITTI lidar frame.
with open(dst_lid_path, "w") as lid_file:
pcl.points.T.tofile(lid_file)
# Add to tokens.
tokens.append(sample_token)
# Create calibration file.
kitti_transforms = dict()
kitti_transforms['P0'] = np.zeros((3, 4)) # Dummy values.
kitti_transforms['P1'] = np.zeros((3, 4)) # Dummy values.
kitti_transforms['P2'] = p_left_kitti # Left camera transform.
kitti_transforms['P3'] = np.zeros((3, 4)) # Dummy values.
kitti_transforms['R0_rect'] = r0_rect.rotation_matrix # Cameras are already rectified.
kitti_transforms['Tr_velo_to_cam'] = np.hstack((velo_to_cam_rot, velo_to_cam_trans.reshape(3, 1)))
kitti_transforms['Tr_imu_to_velo'] = imu_to_velo_kitti
calib_path = os.path.join(calib_folder, sample_token + '.txt')
with open(calib_path, "w") as calib_file:
for (key, val) in kitti_transforms.items():
val = val.flatten()
val_str = '%.12e' % val[0]
for v in val[1:]:
val_str += ' %.12e' % v
calib_file.write('%s: %s\n' % (key, val_str))
# Write label file.
label_path = os.path.join(label_folder, sample_token + '.txt')
if os.path.exists(label_path):
print('Skipping existing file: %s' % label_path)
continue
else:
print('Writing file: %s' % label_path)
with open(label_path, "w") as label_file:
for sample_annotation_token in sample_annotation_tokens:
sample_annotation = self.nusc.get('sample_annotation', sample_annotation_token)
# Get box in LIDAR frame.
_, box_lidar_nusc, _ = self.nusc.get_sample_data(lidar_token, box_vis_level=BoxVisibility.NONE,
selected_anntokens=[sample_annotation_token])
box_lidar_nusc = box_lidar_nusc[0]
# Truncated: Set all objects to 0 which means untruncated.
truncated = 0.0
# Occluded: Set all objects to full visibility as this information is not available in nuScenes.
occluded = 0
# Convert nuScenes category to nuScenes detection challenge category.
detection_name = category_to_detection_name(sample_annotation['category_name'])
# Skip categories that are not part of the nuScenes detection challenge.
if detection_name is None:
continue
# Convert from nuScenes to KITTI box format.
box_cam_kitti = KittiDB.box_nuscenes_to_kitti(
box_lidar_nusc, Quaternion(matrix=velo_to_cam_rot), velo_to_cam_trans, r0_rect)
# Project 3d box to 2d box in image, ignore box if it does not fall inside.
bbox_2d = KittiDB.project_kitti_box_to_image(box_cam_kitti, p_left_kitti, imsize=imsize)
if bbox_2d is None:
continue
# Set dummy score so we can use this file as result.
box_cam_kitti.score = 0
# Convert box to output string format.
output = KittiDB.box_to_string(name=detection_name, box=box_cam_kitti, bbox_2d=bbox_2d,
truncation=truncated, occlusion=occluded)
# Write to disk.
label_file.write(output + '\n')
def render_kitti(self, render_2d: bool) -> None:
"""
Renders the annotations in the KITTI dataset from a lidar and a camera view.
:param render_2d: Whether to render 2d boxes (only works for camera data).
"""
if render_2d:
print('Rendering 2d boxes from KITTI format')
else:
print('Rendering 3d boxes projected from 3d KITTI format')
# Load the KITTI dataset.
kitti = KittiDB(root=self.nusc_kitti_dir, splits=(self.split,))
# Create output folder.
render_dir = os.path.join(self.nusc_kitti_dir, 'render')
if not os.path.isdir(render_dir):
os.mkdir(render_dir)
# Render each image.
for token in kitti.tokens[:self.image_count]:
for sensor in ['lidar', 'camera']:
out_path = os.path.join(render_dir, '%s_%s.png' % (token, sensor))
print('Rendering file to disk: %s' % out_path)
kitti.render_sample_data(token, sensor_modality=sensor, out_path=out_path, render_2d=render_2d)
plt.close() # Close the windows to avoid a warning of too many open windows.
def kitti_res_to_nuscenes(self, meta: Dict[str, bool] = None) -> None:
"""
Converts a KITTI detection result to the nuScenes detection results format.
:param meta: Meta data describing the method used to generate the result. See nuscenes.org/object-detection.
"""
# Dummy meta data, please adjust accordingly.
if meta is None:
meta = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
# Init.
results = {}
# Load the KITTI dataset.
kitti = KittiDB(root=self.nusc_kitti_dir, splits=(self.split, ))
# Get assignment of scenes to splits.
split_logs = create_splits_logs(self.split, self.nusc)
# Use only the samples from the current split.
sample_tokens = self._split_to_samples(split_logs)
sample_tokens = sample_tokens[:self.image_count]
for sample_token in sample_tokens:
# Get the KITTI boxes we just generated in LIDAR frame.
kitti_token = '%s_%s' % (self.split, sample_token)
boxes = kitti.get_boxes(token=kitti_token)
# Convert KITTI boxes to nuScenes detection challenge result format.
sample_results = [self._box_to_sample_result(sample_token, box) for box in boxes]
# Store all results for this image.
results[sample_token] = sample_results
# Store submission file to disk.
submission = {
'meta': meta,
'results': results
}
submission_path = os.path.join(self.nusc_kitti_dir, 'submission.json')
print('Writing submission to: %s' % submission_path)
with open(submission_path, 'w') as f:
json.dump(submission, f, indent=2)
def _box_to_sample_result(self, sample_token: str, box: Box, attribute_name: str = '') -> Dict[str, Any]:
# Prepare data
translation = box.center
size = box.wlh
rotation = box.orientation.q
velocity = box.velocity
detection_name = box.name
detection_score = box.score
# Create result dict
sample_result = dict()
sample_result['sample_token'] = sample_token
sample_result['translation'] = translation.tolist()
sample_result['size'] = size.tolist()
sample_result['rotation'] = rotation.tolist()
sample_result['velocity'] = velocity.tolist()[:2] # Only need vx, vy.
sample_result['detection_name'] = detection_name
sample_result['detection_score'] = detection_score
sample_result['attribute_name'] = attribute_name
return sample_result
def _split_to_samples(self, split_logs: List[str]) -> List[str]:
"""
Convenience function to get the samples in a particular split.
:param split_logs: A list of the log names in this split.
:return: The list of samples.
"""
samples = []
for sample in self.nusc.sample:
scene = self.nusc.get('scene', sample['scene_token'])
log = self.nusc.get('log', scene['log_token'])
logfile = log['logfile']
if logfile in split_logs:
samples.append(sample['token'])
return samples
# if __name__ == '__main__':
# fire.Fire(KittiConverter) | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/nuScenes_lib/utils_kitti.py | Python | # nuScenes dev-kit.
# Code written by Alex Lang and Holger Caesar, 2019.
# Licensed under the Creative Commons [see licence.txt]
import os
from os import path as osp
from typing import List, Tuple, Any, Union
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from matplotlib.axes import Axes
from pyquaternion import Quaternion
from nuscenes.utils.geometry_utils import box_in_image, BoxVisibility, view_points
from nuscenes.utils.data_classes import Box, LidarPointCloud
from nuscenes.nuscenes import NuScenesExplorer
class KittiDB:
"""
KITTI database that abstracts away interactions with KITTI files and handles all required transformations.
This file exists as a utility class for `export_kitti.py`. It may not support more general use cases.
NOTES about KITTI:
- Setup is defined here: http://www.cvlibs.net/datasets/kitti/setup.php
- Box annotations live in CamRect frame
- KITTI lidar frame is 90 degrees rotated from nuScenes lidar frame
- To export to KITTI format from nuScenes lidar requires:
- Rotate to KITTI lidar
- Transform lidar to camera
- Transform camera to camera rectified
- To transform from box annotations to nuScenes lidar requires:
- Inverse of camera rectification
- Inverse transform of lidar to camera
- Rotate to nuScenes lidar
- KITTI 2D boxes cannot always be obtained from the 3D box. The size of a 3D box was fixed for a tracklet
so it can be large for walking pedestrians that stop moving. Those loose 2D boxes were then corrected
using Mechanical Turk.
NOTES about KittiDB:
- The samples tokens are expected to have the format of SPLIT_INT where split is a data folder
{train, val, test} while INT is the integer label of the sample within that data folder.
- The KITTI dataset should be downloaded from http://www.cvlibs.net/datasets/kitti/.
- We use the MV3D splits, not the official KITTI splits (which doesn't have any val).
"""
def __init__(self,
root: str = '/data/sets/kitti',
splits: Tuple[str, ...] = ('train',)):
"""
:param root: Base folder for all KITTI data.
:param splits: Which splits to load.
"""
self.root = root
self.tables = ('calib', 'image_2', 'label_2', 'velodyne')
self._kitti_fileext = {'calib': 'txt', 'image_2': 'png', 'label_2': 'txt', 'velodyne': 'bin'}
# Grab all the expected tokens.
self._kitti_tokens = {}
for split in splits:
split_dir = osp.join(self.root, split, 'image_2')
_tokens = os.listdir(split_dir)
_tokens = [t.replace('.png', '') for t in _tokens]
_tokens.sort()
self._kitti_tokens[split] = _tokens
# Creating the tokens.
self.tokens = []
for split, tokens in self._kitti_tokens.items():
self.tokens += ['{}_{}'.format(split, token) for token in tokens]
# KITTI LIDAR has the x-axis pointing forward, but our LIDAR points to the right. So we need to apply a
# 90 degree rotation around to yaw (z-axis) in order to align.
# The quaternions will be used a lot of time. We store them as instance variables so that we don't have
# to create a new one every single time.
self.kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
self.kitti_to_nu_lidar_inv = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse
@staticmethod
def standardize_sample_token(token: str) -> Tuple[str, str]:
"""
Convert sample token into standard KITTI folder and local filename format.
:param token: KittiDB unique id.
:return: folder (ex. train, val, test), filename (ex. 000001)
"""
splits = token.split('_')
folder = '_'.join(splits[:-1])
filename = splits[-1]
return folder, filename
@staticmethod
def parse_label_line(label_line) -> dict:
"""
Parses single line from label file into a dict. Boxes are in camera frame. See KITTI devkit for details and
http://www.cvlibs.net/datasets/kitti/setup.php for visualizations of the setup.
:param label_line: Single line from KittiDB label file.
:return: Dictionary with all the line details.
"""
parts = label_line.split(' ')
output = {
'name': parts[0].strip(),
'xyz_camera': (float(parts[11]), float(parts[12]), float(parts[13])),
'wlh': (float(parts[9]), float(parts[10]), float(parts[8])),
'yaw_camera': float(parts[14]),
'bbox_camera': (float(parts[4]), float(parts[5]), float(parts[6]), float(parts[7])),
'truncation': float(parts[1]),
'occlusion': float(parts[2]),
'alpha': float(parts[3])
}
# Add score if specified
if len(parts) > 15:
output['score'] = float(parts[15])
else:
output['score'] = np.nan
return output
@staticmethod
def box_nuscenes_to_kitti(box: Box, velo_to_cam_rot: Quaternion,
velo_to_cam_trans: np.ndarray,
r0_rect: Quaternion,
kitti_to_nu_lidar_inv: Quaternion = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse) \
-> Box:
"""
Transform from nuScenes lidar frame to KITTI reference frame.
:param box: Instance in nuScenes lidar frame.
:param velo_to_cam_rot: Quaternion to rotate from lidar to camera frame.
:param velo_to_cam_trans: <np.float: 3>. Translate from lidar to camera frame.
:param r0_rect: Quaternion to rectify camera frame.
:param kitti_to_nu_lidar_inv: Quaternion to rotate nuScenes to KITTI LIDAR.
:return: Box instance in KITTI reference frame.
"""
# Copy box to avoid side-effects.
box = box.copy()
# Rotate to KITTI lidar.
box.rotate(kitti_to_nu_lidar_inv)
# Transform to KITTI camera.
box.rotate(velo_to_cam_rot)
box.translate(velo_to_cam_trans)
# Rotate to KITTI rectified camera.
box.rotate(r0_rect)
# KITTI defines the box center as the bottom center of the object.
# We use the true center, so we need to adjust half height in y direction.
box.translate(np.array([0, box.wlh[2] / 2, 0]))
return box
@staticmethod
def project_kitti_box_to_image(box: Box, p_left: np.ndarray, imsize: Tuple[int, int]) \
-> Union[None, Tuple[int, int, int, int]]:
"""
Projects 3D box into KITTI image FOV.
:param box: 3D box in KITTI reference frame.
:param p_left: <np.float: 3, 4>. Projection matrix.
:param imsize: (width, height). Image size.
:return: (xmin, ymin, xmax, ymax). Bounding box in image plane or None if box is not in the image.
"""
# Create a new box.
# box = box.copy()
# KITTI defines the box center as the bottom center of the object.
# We use the true center, so we need to adjust half height in negative y direction.
box.translate(np.array([0, -box.wlh[2] / 2, 0]))
# Check that some corners are inside the image.
corners = np.array([corner for corner in box.corners().T if corner[2] > 0]).T
if len(corners) == 0:
return None
# Project corners that are in front of the camera to 2d to get bbox in pixel coords.
imcorners = view_points(corners, p_left, normalize=True)[:2]
bbox = (np.min(imcorners[0]), np.min(imcorners[1]), np.max(imcorners[0]), np.max(imcorners[1]))
# Crop bbox to prevent it extending outside image.
bbox_crop = tuple(max(0, b) for b in bbox)
bbox_crop = (min(imsize[0], bbox_crop[0]),
min(imsize[0], bbox_crop[1]),
min(imsize[0], bbox_crop[2]),
min(imsize[1], bbox_crop[3]))
# Detect if a cropped box is empty.
if bbox_crop[0] >= bbox_crop[2] or bbox_crop[1] >= bbox_crop[3]:
return None
return bbox_crop
@staticmethod
def get_filepath(token: str, table: str, root: str='/data/sets/kitti') -> str:
"""
For a token and table, get the filepath to the associated data.
:param token: KittiDB unique id.
:param table: Type of table, for example image or velodyne.
:param root: Base folder for all KITTI data.
:return: Full get_filepath to desired data.
"""
folder, filename = KittiDB.standardize_sample_token(token)
kitti_fileext = {'calib': 'txt', 'image_2': 'png', 'label_2': 'txt', 'velodyne': 'bin'}
ending = kitti_fileext[table]
if token.startswith('test_') and table == 'label_2':
filepath = None
print('No cheating! The test set has no labels.')
else:
filepath = osp.join(root, folder, table, '{}.{}'.format(filename, ending))
return filepath
@staticmethod
def get_transforms(token: str, root: str='/data/sets/kitti') -> dict:
"""
Returns transforms for the input token.
:param token: KittiDB unique id.
:param root: Base folder for all KITTI data.
:return: {
'velo_to_cam': {'R': <np.float: 3, 3>, 'T': <np.float: 3, 1>}. Lidar to camera transformation matrix.
'r0_rect': <np.float: 3, 3>. Rectification matrix.
'p_left': <np.float: 3, 4>. Projection matrix.
'p_combined': <np.float: 4, 4>. Combined rectification and projection matrix.
}. Returns the transformation matrices. For details refer to the KITTI devkit.
"""
calib_filename = KittiDB.get_filepath(token, 'calib', root=root)
lines = [line.rstrip() for line in open(calib_filename)]
velo_to_cam = np.array(lines[5].strip().split(' ')[1:], dtype=np.float32)
velo_to_cam.resize((3, 4))
r0_rect = np.array(lines[4].strip().split(' ')[1:], dtype=np.float32)
r0_rect.resize((3, 3))
p_left = np.array(lines[2].strip().split(' ')[1:], dtype=np.float32)
p_left.resize((3, 4))
# Merge rectification and projection into one matrix.
p_combined = np.eye(4)
p_combined[:3, :3] = r0_rect
p_combined = np.dot(p_left, p_combined)
return {
'velo_to_cam': {
'R': velo_to_cam[:, :3],
'T': velo_to_cam[:, 3]
},
'r0_rect': r0_rect,
'p_left': p_left,
'p_combined': p_combined,
}
@staticmethod
def get_pointcloud(token: str, root: str = '/data/sets/kitti') -> LidarPointCloud:
"""
Load up the pointcloud for a sample.
:param token: KittiDB unique id.
:param root: Base folder for all KITTI data.
:return: LidarPointCloud for the sample in the KITTI Lidar frame.
"""
pc_filename = KittiDB.get_filepath(token, 'velodyne', root=root)
# The lidar PC is stored in the KITTI LIDAR coord system.
pc = LidarPointCloud(np.fromfile(pc_filename, dtype=np.float32).reshape(-1, 4).T)
return pc
def get_boxes(self,
token: str,
filter_classes: List[str] = None,
max_dist: float = None) -> List[Box]:
"""
Load up all the boxes associated with a sample.
Boxes are in nuScenes lidar frame.
:param token: KittiDB unique id.
:param filter_classes: List of Kitti classes to use or None to use all.
:param max_dist: Maximum distance in m to still draw a box.
:return: Boxes in nuScenes lidar reference frame.
"""
# Get transforms for this sample
transforms = self.get_transforms(token, root=self.root)
boxes = []
if token.startswith('test_'):
# No boxes to return for the test set.
return boxes
with open(KittiDB.get_filepath(token, 'label_2', root=self.root), 'r') as f:
for line in f:
# Parse this line into box information.
parsed_line = self.parse_label_line(line)
if parsed_line['name'] in {'DontCare', 'Misc'}:
continue
center = parsed_line['xyz_camera']
wlh = parsed_line['wlh']
yaw_camera = parsed_line['yaw_camera']
name = parsed_line['name']
score = parsed_line['score']
# Optional: Filter classes.
if filter_classes is not None and name not in filter_classes:
continue
# The Box class coord system is oriented the same way as as KITTI LIDAR: x forward, y left, z up.
# For orientation confer: http://www.cvlibs.net/datasets/kitti/setup.php.
# 1: Create box in Box coordinate system with center at origin.
# The second quaternion in yaw_box transforms the coordinate frame from the object frame
# to KITTI camera frame. The equivalent cannot be naively done afterwards, as it's a rotation
# around the local object coordinate frame, rather than the camera frame.
quat_box = Quaternion(axis=(0, 1, 0), angle=yaw_camera) * Quaternion(axis=(1, 0, 0), angle=np.pi/2)
box = Box([0.0, 0.0, 0.0], wlh, quat_box, name=name)
# 2: Translate: KITTI defines the box center as the bottom center of the vehicle. We use true center,
# so we need to add half height in negative y direction, (since y points downwards), to adjust. The
# center is already given in camera coord system.
box.translate(center + np.array([0, -wlh[2] / 2, 0]))
# 3: Transform to KITTI LIDAR coord system. First transform from rectified camera to camera, then
# camera to KITTI lidar.
box.rotate(Quaternion(matrix=transforms['r0_rect']).inverse)
box.translate(-transforms['velo_to_cam']['T'])
box.rotate(Quaternion(matrix=transforms['velo_to_cam']['R']).inverse)
# 4: Transform to nuScenes LIDAR coord system.
box.rotate(self.kitti_to_nu_lidar)
# Set score or NaN.
box.score = score
# Set dummy velocity.
box.velocity = np.array((0.0, 0.0, 0.0))
# Optional: Filter by max_dist
if max_dist is not None:
dist = np.sqrt(np.sum(box.center[:2] ** 2))
if dist > max_dist:
continue
boxes.append(box)
return boxes
def get_boxes_2d(self,
token: str,
filter_classes: List[str] = None) -> Tuple[
List[Tuple[float, float, float, float]],
List[str]
]:
"""
Get the 2d boxes associated with a sample.
:return: A list of boxes in KITTI format (xmin, ymin, xmax, ymax) and a list of the class names.
"""
boxes = []
names = []
with open(KittiDB.get_filepath(token, 'label_2', root=self.root), 'r') as f:
for line in f:
# Parse this line into box information.
parsed_line = self.parse_label_line(line)
if parsed_line['name'] in {'DontCare', 'Misc'}:
continue
bbox_2d = parsed_line['bbox_camera']
name = parsed_line['name']
# Optional: Filter classes.
if filter_classes is not None and name not in filter_classes:
continue
boxes.append(bbox_2d)
names.append(name)
return boxes, names
@staticmethod
def box_to_string(name: str,
box: Box,
bbox_2d: Tuple[float, float, float, float] = (-1.0, -1.0, -1.0, -1.0),
truncation: float = -1.0,
occlusion: int = -1,
alpha: float = -10.0) -> str:
"""
Convert box in KITTI image frame to official label string fromat.
:param name: KITTI name of the box.
:param box: Box class in KITTI image frame.
:param bbox_2d: Optional, 2D bounding box obtained by projected Box into image (xmin, ymin, xmax, ymax).
Otherwise set to KITTI default.
:param truncation: Optional truncation, otherwise set to KITTI default.
:param occlusion: Optional occlusion, otherwise set to KITTI default.
:param alpha: Optional alpha, otherwise set to KITTI default.
:return: KITTI string representation of box.
"""
# Convert quaternion to yaw angle.
v = np.dot(box.rotation_matrix, np.array([1, 0, 0]))
yaw = -np.arctan2(v[2], v[0])
# Prepare output.
name += ' '
trunc = '{:.2f} '.format(truncation)
occ = '{:d} '.format(occlusion)
a = '{:.2f} '.format(alpha)
bb = '{:.2f} {:.2f} {:.2f} {:.2f} '.format(bbox_2d[0], bbox_2d[1], bbox_2d[2], bbox_2d[3])
hwl = '{:.2} {:.2f} {:.2f} '.format(box.wlh[2], box.wlh[0], box.wlh[1]) # height, width, length.
xyz = '{:.2f} {:.2f} {:.2f} '.format(box.center[0], box.center[1], box.center[2]) # x, y, z.
y = '{:.2f}'.format(yaw) # Yaw angle.
s = ' {:.4f}'.format(box.score) # Classification score.
output = name + trunc + occ + a + bb + hwl + xyz + y
if ~np.isnan(box.score):
output += s
return output
def project_pts_to_image(self, pointcloud: LidarPointCloud, token: str) -> np.ndarray:
"""
Project lidar points into image.
:param pointcloud: The LidarPointCloud in nuScenes lidar frame.
:param token: Unique KITTI token.
:return: <np.float: N, 3.> X, Y are points in image pixel coordinates. Z is depth in image.
"""
# Copy and convert pointcloud.
pc_image = LidarPointCloud(points=pointcloud.points.copy())
pc_image.rotate(self.kitti_to_nu_lidar_inv) # Rotate to KITTI lidar.
# Transform pointcloud to camera frame.
transforms = self.get_transforms(token, root=self.root)
pc_image.rotate(transforms['velo_to_cam']['R'])
pc_image.translate(transforms['velo_to_cam']['T'])
# Project to image.
depth = pc_image.points[2, :]
points_fov = view_points(pc_image.points[:3, :], transforms['p_combined'], normalize=True)
points_fov[2, :] = depth
return points_fov
def render_sample_data(self,
token: str,
sensor_modality: str = 'lidar',
with_anns: bool = True,
axes_limit: float = 30,
ax: Axes = None,
view_3d: np.ndarray = np.eye(4),
color_func: Any = None,
augment_previous: bool = False,
box_linewidth: int = 2,
filter_classes: List[str] = None,
max_dist: float = None,
out_path: str = None,
render_2d: bool = False) -> None:
"""
Render sample data onto axis. Visualizes lidar in nuScenes lidar frame and camera in camera frame.
:param token: KITTI token.
:param sensor_modality: The modality to visualize, e.g. lidar or camera.
:param with_anns: Whether to draw annotations.
:param axes_limit: Axes limit for lidar data (measured in meters).
:param ax: Axes onto which to render.
:param view_3d: 4x4 view matrix for 3d views.
:param color_func: Optional function that defines the render color given the class name.
:param augment_previous: Whether to augment an existing plot (does not redraw pointcloud/image).
:param box_linewidth: Width of the box lines.
:param filter_classes: Optionally filter the classes to render.
:param max_dist: Maximum distance in m to still draw a box.
:param out_path: Optional path to save the rendered figure to disk.
:param render_2d: Whether to render 2d boxes (only works for camera data).
"""
# Default settings.
if color_func is None:
color_func = NuScenesExplorer.get_color
boxes = self.get_boxes(token, filter_classes=filter_classes, max_dist=max_dist) # In nuScenes lidar frame.
if sensor_modality == 'lidar':
# Load pointcloud.
pc = self.get_pointcloud(token, self.root) # In KITTI lidar frame.
pc.rotate(self.kitti_to_nu_lidar.rotation_matrix) # In nuScenes lidar frame.
# Alternative options:
# depth = pc.points[1, :]
# height = pc.points[2, :]
intensity = pc.points[3, :]
# Project points to view.
points = view_points(pc.points[:3, :], view_3d, normalize=False)
coloring = intensity
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(9, 9))
if not augment_previous:
ax.scatter(points[0, :], points[1, :], c=coloring, s=1)
ax.set_xlim(-axes_limit, axes_limit)
ax.set_ylim(-axes_limit, axes_limit)
if with_anns:
for box in boxes:
color = np.array(color_func(box.name)) / 255
box.render(ax, view=view_3d, colors=(color, color, 'k'), linewidth=box_linewidth)
elif sensor_modality == 'camera':
im_path = KittiDB.get_filepath(token, 'image_2', root=self.root)
im = Image.open(im_path)
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(9, 16))
if not augment_previous:
ax.imshow(im)
ax.set_xlim(0, im.size[0])
ax.set_ylim(im.size[1], 0)
if with_anns:
if render_2d:
# Use KITTI's 2d boxes.
boxes_2d, names = self.get_boxes_2d(token, filter_classes=filter_classes)
for box, name in zip(boxes_2d, names):
color = np.array(color_func(name)) / 255
ax.plot([box[0], box[0]], [box[1], box[3]], color=color, linewidth=box_linewidth)
ax.plot([box[2], box[2]], [box[1], box[3]], color=color, linewidth=box_linewidth)
ax.plot([box[0], box[2]], [box[1], box[1]], color=color, linewidth=box_linewidth)
ax.plot([box[0], box[2]], [box[3], box[3]], color=color, linewidth=box_linewidth)
else:
# Project 3d boxes to 2d.
transforms = self.get_transforms(token, self.root)
for box in boxes:
# Undo the transformations in get_boxes() to get back to the camera frame.
box.rotate(self.kitti_to_nu_lidar_inv) # In KITTI lidar frame.
box.rotate(Quaternion(matrix=transforms['velo_to_cam']['R']))
box.translate(transforms['velo_to_cam']['T']) # In KITTI camera frame, un-rectified.
box.rotate(Quaternion(matrix=transforms['r0_rect'])) # In KITTI camera frame, rectified.
# Filter boxes outside the image (relevant when visualizing nuScenes data in KITTI format).
if not box_in_image(box, transforms['p_left'][:3, :3], im.size, vis_level=BoxVisibility.ANY):
continue
# Render.
color = np.array(color_func(box.name)) / 255
box.render(ax, view=transforms['p_left'][:3, :3], normalize=True, colors=(color, color, 'k'),
linewidth=box_linewidth)
else:
raise ValueError("Unrecognized modality {}.".format(sensor_modality))
ax.axis('off')
ax.set_title(token)
ax.set_aspect('equal')
# Render to disk.
plt.tight_layout()
if out_path is not None:
plt.savefig(out_path) | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/remove_optimizers.py | Python | import os
import torch
IN_PATH = '../../centertrack_models/'
OUT_PATH = '../../models/'
REMOVE_KEYS = ['base.fc']
if __name__ == '__main__':
models = sorted(os.listdir(IN_PATH))
for model in models:
model_path = IN_PATH + model
print(model)
data = torch.load(model_path)
state_dict = data['state_dict']
keys = state_dict.keys()
delete_keys = []
for k in keys:
should_delete = False
for remove_key in REMOVE_KEYS:
if remove_key in k:
should_delete = True
if should_delete:
delete_keys.append(k)
for k in delete_keys:
print('delete ', k)
del state_dict[k]
out_data = {'epoch': data['epoch'], 'state_dict': state_dict}
torch.save(out_data, OUT_PATH + model)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/vis_tracking_kitti.py | Python | import numpy as np
import cv2
import os
import glob
import sys
from collections import defaultdict
from pathlib import Path
DATA_PATH = '../../data/kitti_tracking/'
IMG_PATH = DATA_PATH + 'data_tracking_image_2/testing/image_02/'
SAVE_VIDEO = False
IS_GT = False
cats = ['Pedestrian', 'Car', 'Cyclist']
cat_ids = {cat: i for i, cat in enumerate(cats)}
COLORS = [(255, 0, 255), (122, 122, 255), (255, 0, 0)]
def draw_bbox(img, bboxes, c=(255, 0, 255)):
for bbox in bboxes:
color = COLORS[int(bbox[5])]
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
color, 2, lineType=cv2.LINE_AA)
ct = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
txt = '{}'.format(int(bbox[4]))
cv2.putText(img, txt, (int(ct[0]), int(ct[1])),
cv2.FONT_HERSHEY_SIMPLEX, 0.5,
color, thickness=1, lineType=cv2.LINE_AA)
if __name__ == '__main__':
seqs = os.listdir(IMG_PATH)
if SAVE_VIDEO:
save_path = sys.argv[1][:sys.argv[1].rfind('/res')] + '/video'
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save_video_path', save_path)
for seq in sorted(seqs):
print('seq', seq)
if '.DS_Store' in seq:
continue
# if SAVE_VIDEO:
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# video = cv2.VideoWriter(
# '{}/{}.avi'.format(save_path, seq),fourcc, 10.0, (1024, 750))
preds = {}
for K in range(1, len(sys.argv)):
pred_path = sys.argv[K] + '/{}.txt'.format(seq)
pred_file = open(pred_path, 'r')
preds[K] = defaultdict(list)
for line in pred_file:
tmp = line[:-1].split(' ')
frame_id = int(tmp[0])
track_id = int(tmp[1])
cat_id = cat_ids[tmp[2]]
bbox = [float(tmp[6]), float(tmp[7]), float(tmp[8]), float(tmp[9])]
score = float(tmp[17])
preds[K][frame_id].append(bbox + [track_id, cat_id, score])
images_path = '{}/{}/'.format(IMG_PATH, seq)
images = os.listdir(images_path)
num_images = len([image for image in images if 'png' in image])
for i in range(num_images):
frame_id = i
file_path = '{}/{:06d}.png'.format(images_path, i)
img = cv2.imread(file_path)
for K in range(1, len(sys.argv)):
img_pred = img.copy()
draw_bbox(img_pred, preds[K][frame_id])
cv2.imshow('pred{}'.format(K), img_pred)
cv2.waitKey()
# if SAVE_VIDEO:
# video.write(img_pred)
# if SAVE_VIDEO:
# video.release()
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/vis_tracking_mot.py | Python | import numpy as np
import cv2
import os
import glob
import sys
from collections import defaultdict
from pathlib import Path
GT_PATH = '../../data/mot17/test/'
IMG_PATH = GT_PATH
SAVE_VIDEO = True
RESIZE = 2
IS_GT = False
def draw_bbox(img, bboxes, c=(255, 0, 255)):
for bbox in bboxes:
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])),
(int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])),
c, 2, lineType=cv2.LINE_AA)
ct = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
txt = '{}'.format(bbox[4])
cv2.putText(img, txt, (int(ct[0]), int(ct[1])),
cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 122, 255), thickness=1, lineType=cv2.LINE_AA)
if __name__ == '__main__':
seqs = os.listdir(GT_PATH)
if SAVE_VIDEO:
save_path = sys.argv[1][:sys.argv[1].rfind('/res')] + '/video'
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save_video_path', save_path)
for seq in sorted(seqs):
print('seq', seq)
# if len(sys.argv) > 2 and not sys.argv[2] in seq:
# continue
if '.DS_Store' in seq:
continue
# if SAVE_VIDEO:
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# video = cv2.VideoWriter(
# '{}/{}.avi'.format(save_path, seq),fourcc, 10.0, (1024, 750))
seq_path = '{}/{}/'.format(GT_PATH, seq)
if IS_GT:
ann_path = seq_path + 'gt/gt.txt'
else:
ann_path = seq_path + 'det/det.txt'
anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')
print('anns shape', anns.shape)
image_to_anns = defaultdict(list)
for i in range(anns.shape[0]):
if (not IS_GT) or (int(anns[i][6]) == 1 and float(anns[i][8]) >= 0.25):
frame_id = int(anns[i][0])
track_id = int(anns[i][1])
bbox = (anns[i][2:6] / RESIZE).tolist()
image_to_anns[frame_id].append(bbox + [track_id])
image_to_preds = {}
for K in range(1, len(sys.argv)):
image_to_preds[K] = defaultdict(list)
pred_path = sys.argv[K] + '/{}.txt'.format(seq)
try:
preds = np.loadtxt(pred_path, dtype=np.float32, delimiter=',')
except:
preds = np.loadtxt(pred_path, dtype=np.float32, delimiter=' ')
for i in range(preds.shape[0]):
frame_id = int(preds[i][0])
track_id = int(preds[i][1])
bbox = (preds[i][2:6] / RESIZE).tolist()
image_to_preds[K][frame_id].append(bbox + [track_id])
img_path = seq_path + 'img1/'
images = os.listdir(img_path)
num_images = len([image for image in images if 'jpg' in image])
for i in range(num_images):
frame_id = i + 1
file_name = '{}/img1/{:06d}.jpg'.format(seq, i + 1)
file_path = IMG_PATH + file_name
img = cv2.imread(file_path)
if RESIZE != 1:
img = cv2.resize(img, (img.shape[1] // RESIZE, img.shape[0] // RESIZE))
for K in range(1, len(sys.argv)):
img_pred = img.copy()
draw_bbox(img_pred, image_to_preds[K][frame_id])
cv2.imshow('pred{}'.format(K), img_pred)
draw_bbox(img, image_to_anns[frame_id])
cv2.imshow('gt', img)
cv2.waitKey()
# if SAVE_VIDEO:
# video.write(img_pred)
# if SAVE_VIDEO:
# video.release()
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
config.py | Python | import os
import numpy as np
class Config:
def __init__(self):
self._configs = {}
self._configs["dataset"] = None
self._configs["sampling_function"] = "kp_detection"
# Training Config
self._configs["display"] = 5
self._configs["snapshot"] = 5000
self._configs["stepsize"] = 450000
self._configs["learning_rate"] = 0.00025
self._configs["decay_rate"] = 10
self._configs["max_iter"] = 500000
self._configs["val_iter"] = 100
self._configs["batch_size"] = 1
self._configs["snapshot_name"] = None
self._configs["prefetch_size"] = 100
self._configs["weight_decay"] = False
self._configs["weight_decay_rate"] = 1e-5
self._configs["weight_decay_type"] = "l2"
self._configs["pretrain"] = None
self._configs["opt_algo"] = "adam"
self._configs["chunk_sizes"] = None
# Directories
self._configs["data_dir"] = "./data"
self._configs["cache_dir"] = "./cache"
self._configs["config_dir"] = "./config"
self._configs["result_dir"] = "./results"
# Split
self._configs["train_split"] = "trainval"
self._configs["val_split"] = "minival"
self._configs["test_split"] = "testdev"
# Rng
self._configs["data_rng"] = np.random.RandomState(123)
self._configs["nnet_rng"] = np.random.RandomState(317)
@property
def chunk_sizes(self):
return self._configs["chunk_sizes"]
@property
def train_split(self):
return self._configs["train_split"]
@property
def val_split(self):
return self._configs["val_split"]
@property
def test_split(self):
return self._configs["test_split"]
@property
def full(self):
return self._configs
@property
def sampling_function(self):
return self._configs["sampling_function"]
@property
def data_rng(self):
return self._configs["data_rng"]
@property
def nnet_rng(self):
return self._configs["nnet_rng"]
@property
def opt_algo(self):
return self._configs["opt_algo"]
@property
def weight_decay_type(self):
return self._configs["weight_decay_type"]
@property
def prefetch_size(self):
return self._configs["prefetch_size"]
@property
def pretrain(self):
return self._configs["pretrain"]
@property
def weight_decay_rate(self):
return self._configs["weight_decay_rate"]
@property
def weight_decay(self):
return self._configs["weight_decay"]
@property
def result_dir(self):
result_dir = os.path.join(self._configs["result_dir"], self.snapshot_name)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
return result_dir
@property
def dataset(self):
return self._configs["dataset"]
@property
def snapshot_name(self):
return self._configs["snapshot_name"]
@property
def snapshot_dir(self):
snapshot_dir = os.path.join(self.cache_dir, "nnet", self.snapshot_name)
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
return snapshot_dir
@property
def snapshot_file(self):
snapshot_file = os.path.join(self.snapshot_dir, self.snapshot_name + "_{}.pkl")
return snapshot_file
@property
def config_dir(self):
return self._configs["config_dir"]
@property
def batch_size(self):
return self._configs["batch_size"]
@property
def max_iter(self):
return self._configs["max_iter"]
@property
def learning_rate(self):
return self._configs["learning_rate"]
@property
def decay_rate(self):
return self._configs["decay_rate"]
@property
def stepsize(self):
return self._configs["stepsize"]
@property
def snapshot(self):
return self._configs["snapshot"]
@property
def display(self):
return self._configs["display"]
@property
def val_iter(self):
return self._configs["val_iter"]
@property
def data_dir(self):
return self._configs["data_dir"]
@property
def cache_dir(self):
if not os.path.exists(self._configs["cache_dir"]):
os.makedirs(self._configs["cache_dir"])
return self._configs["cache_dir"]
def update_config(self, new):
for key in new:
if key in self._configs:
self._configs[key] = new[key]
system_configs = Config()
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
db/base.py | Python | import os
import h5py
import numpy as np
from config import system_configs
class BASE(object):
def __init__(self):
self._split = None
self._db_inds = []
self._image_ids = []
self._data = None
self._image_hdf5 = None
self._image_file = None
self._image_hdf5_file = None
self._mean = np.zeros((3, ), dtype=np.float32)
self._std = np.ones((3, ), dtype=np.float32)
self._eig_val = np.ones((3, ), dtype=np.float32)
self._eig_vec = np.zeros((3, 3), dtype=np.float32)
self._configs = {}
self._configs["data_aug"] = True
self._data_rng = None
@property
def data(self):
if self._data is None:
raise ValueError("data is not set")
return self._data
@property
def configs(self):
return self._configs
@property
def mean(self):
return self._mean
@property
def std(self):
return self._std
@property
def eig_val(self):
return self._eig_val
@property
def eig_vec(self):
return self._eig_vec
@property
def db_inds(self):
return self._db_inds
@property
def split(self):
return self._split
def update_config(self, new):
for key in new:
if key in self._configs:
self._configs[key] = new[key]
def image_ids(self, ind):
return self._image_ids[ind]
def image_file(self, ind):
if self._image_file is None:
raise ValueError("Image path is not initialized")
image_id = self._image_ids[ind]
return self._image_file.format(image_id)
def write_result(self, ind, all_bboxes, all_scores):
pass
def evaluate(self, name):
pass
def shuffle_inds(self, quiet=False):
if self._data_rng is None:
self._data_rng = np.random.RandomState(os.getpid())
if not quiet:
print("shuffling indices...")
rand_perm = self._data_rng.permutation(len(self._db_inds))
self._db_inds = self._db_inds[rand_perm]
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
db/coco.py | Python | import sys
sys.path.insert(0, "data/coco/PythonAPI/")
import os
import json
import numpy as np
import pickle
from tqdm import tqdm
from db.detection import DETECTION
from config import system_configs
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
class MSCOCO(DETECTION):
def __init__(self, db_config, split):
super(MSCOCO, self).__init__(db_config)
data_dir = system_configs.data_dir
result_dir = system_configs.result_dir
cache_dir = system_configs.cache_dir
self._split = split
self._dataset = {
"trainval": "trainval2014",
"minival": "minival2014",
"testdev": "testdev2017"
}[self._split]
self._coco_dir = os.path.join(data_dir, "coco")
self._label_dir = os.path.join(self._coco_dir, "annotations")
self._label_file = os.path.join(self._label_dir, "instances_{}.json")
self._label_file = self._label_file.format(self._dataset)
self._image_dir = os.path.join(self._coco_dir, "images", self._dataset)
self._image_file = os.path.join(self._image_dir, "{}")
self._data = "coco"
self._mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
self._std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self._cat_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90
]
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._cache_file = os.path.join(cache_dir, "coco_{}.pkl".format(self._dataset))
self._load_data()
self._db_inds = np.arange(len(self._image_ids))
self._load_coco_data()
def _load_data(self):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_ids], f)
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_ids = pickle.load(f)
def _load_coco_data(self):
self._coco = COCO(self._label_file)
with open(self._label_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._coco = COCO(self._label_file)
self._cat_ids = self._coco.getCatIds()
coco_image_ids = self._coco.getImgIds()
self._image_ids = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in coco_image_ids
]
self._detections = {}
for ind, (coco_image_id, image_id) in enumerate(tqdm(zip(coco_image_ids, self._image_ids))):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
bboxes = np.array(bboxes, dtype=float)
categories = np.array(categories, dtype=float)
if bboxes.size == 0 or categories.size == 0:
self._detections[image_id] = np.zeros((0, 5), dtype=np.float32)
else:
self._detections[image_id] = np.hstack((bboxes, categories[:, None]))
def detections(self, ind):
image_id = self._image_ids[ind]
detections = self._detections[image_id]
return detections.astype(float).copy()
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_to_coco(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox,
"score": float("{:.2f}".format(score))
}
detections.append(detection)
return detections
def evaluate(self, result_json, cls_ids, image_ids, gt_json=None):
if self._split == "testdev":
return None
coco = self._coco if gt_json is None else COCO(gt_json)
eval_ids = [self._coco_eval_ids[image_id] for image_id in image_ids]
cat_ids = [self._classes[cls_id] for cls_id in cls_ids]
coco_dets = coco.loadRes(result_json)
coco_eval = COCOeval(coco, coco_dets, "bbox")
coco_eval.params.imgIds = eval_ids
coco_eval.params.catIds = cat_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats[0], coco_eval.stats[12:]
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
db/coco_extreme.py | Python | import sys
sys.path.insert(0, "data/coco/PythonAPI/")
import os
import json
import numpy as np
import pickle
from tqdm import tqdm
from db.detection import DETECTION
from config import system_configs
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
class MSCOCOExtreme(DETECTION):
def __init__(self, db_config, split):
super(MSCOCOExtreme, self).__init__(db_config)
data_dir = system_configs.data_dir
cache_dir = system_configs.cache_dir
self._split = split
self._dataset = {
"train": "train2017",
"val": "val2017",
"testdev": "test2017"
}[self._split]
self._coco_dir = os.path.join(data_dir, "coco")
self._label_dir = os.path.join(self._coco_dir, "annotations")
if self._split == 'testdev':
self._label_file = os.path.join(
self._label_dir, "image_info_test-dev2017.json")
else:
self._label_file = os.path.join(self._label_dir,
"instances_extreme_{}.json")
self._label_file = self._label_file.format(self._dataset)
self._image_dir = os.path.join(self._coco_dir, "images", self._dataset)
self._image_file = os.path.join(self._image_dir, "{}")
self._data = "coco_extreme"
self._mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32)
self._std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self._cat_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90
]
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._cache_file = os.path.join(
cache_dir, "coco_extreme_{}.pkl".format(self._dataset))
self._load_data()
self._db_inds = np.arange(len(self._image_ids))
self._load_coco_data()
def _load_data(self):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_ids,
self._extreme_pts], f)
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_ids, \
self._extreme_pts = pickle.load(f)
def _load_coco_data(self):
self._coco = COCO(self._label_file)
with open(self._label_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._coco = COCO(self._label_file)
self._cat_ids = self._coco.getCatIds()
coco_image_ids = self._coco.getImgIds()
self._image_ids = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in coco_image_ids
]
self._detections = {}
self._extreme_pts = {}
for ind, (coco_image_id, image_id) in enumerate(tqdm(zip(coco_image_ids, self._image_ids))):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
extreme_pts = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
if len(annotation['extreme_points']) == 0:
extreme_pts.append(np.zeros((4, 2), dtype=float))
else:
extreme_pt = np.array(annotation['extreme_points'])
extreme_pts.append(extreme_pt)
bboxes = np.array(bboxes, dtype=float)
categories = np.array(categories, dtype=float)
extreme_pts = np.array(extreme_pts, dtype=float)
if bboxes.size == 0 or categories.size == 0:
self._detections[image_id] = np.zeros((0, 5), dtype=np.float32)
self._extreme_pts[image_id] = np.zeros((0, 4, 2),
dtype=np.float32)
else:
self._detections[image_id] = np.hstack((bboxes,
categories[:, None]))
self._extreme_pts[image_id] = extreme_pts
def detections(self, ind):
image_id = self._image_ids[ind]
detections = self._detections[image_id]
extreme_pts = self._extreme_pts[image_id]
return detections.astype(float).copy(), \
extreme_pts.astype(float).copy()
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_to_coco(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def evaluate(self, result_json, cls_ids, image_ids, gt_json=None):
if self._split == "testdev":
return None
coco = self._coco if gt_json is None else COCO(gt_json)
eval_ids = [self._coco_eval_ids[image_id] for image_id in image_ids]
cat_ids = [self._classes[cls_id] for cls_id in cls_ids]
coco_dets = coco.loadRes(result_json)
coco_eval = COCOeval(coco, coco_dets, "bbox")
coco_eval.params.imgIds = eval_ids
coco_eval.params.catIds = cat_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats[0], coco_eval.stats[12:]
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
db/datasets.py | Python | from db.coco import MSCOCO
from db.coco_extreme import MSCOCOExtreme
datasets = {
"MSCOCO": MSCOCO,
"MSCOCOExtreme": MSCOCOExtreme
}
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
db/detection.py | Python | import numpy as np
from db.base import BASE
class DETECTION(BASE):
def __init__(self, db_config):
super(DETECTION, self).__init__()
self._configs["categories"] = 80
self._configs["rand_scales"] = [1]
self._configs["rand_scale_min"] = 0.8
self._configs["rand_scale_max"] = 1.4
self._configs["rand_scale_step"] = 0.2
self._configs["input_size"] = [511]
self._configs["output_sizes"] = [[128, 128]]
self._configs["nms_threshold"] = 0.5
self._configs["max_per_image"] = 100
self._configs["top_k"] = 100
self._configs["ae_threshold"] = 0.5
self._configs["aggr_weight"] = 0.1
self._configs["scores_thresh"] = 0.1
self._configs["center_thresh"] = 0.1
self._configs["suppres_ghost"] = False
self._configs["nms_kernel"] = 3
self._configs["nms_algorithm"] = "exp_soft_nms"
self._configs["weight_exp"] = 8
self._configs["merge_bbox"] = False
self._configs["data_aug"] = True
self._configs["lighting"] = True
self._configs["border"] = 128
self._configs["gaussian_bump"] = True
self._configs["gaussian_iou"] = 0.7
self._configs["gaussian_radius"] = -1
self._configs["rand_crop"] = False
self._configs["rand_color"] = False
self._configs["rand_pushes"] = False
self._configs["rand_samples"] = False
self._configs["special_crop"] = False
self._configs["test_scales"] = [1]
self.update_config(db_config)
if self._configs["rand_scales"] is None:
self._configs["rand_scales"] = np.arange(
self._configs["rand_scale_min"],
self._configs["rand_scale_max"],
self._configs["rand_scale_step"]
)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
demo.py | Python | #!/usr/bin/env python
import os
import json
import torch
import pprint
import argparse
import importlib
import numpy as np
import cv2
import matplotlib
matplotlib.use("Agg")
from config import system_configs
from nnet.py_factory import NetworkFactory
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms_with_points as soft_nms
from utils.color_map import colormap
from utils.visualize import vis_mask, vis_octagon, vis_ex, vis_class, vis_bbox
from dextr import Dextr
torch.backends.cudnn.benchmark = False
class_name = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
image_ext = ['jpg', 'jpeg', 'png', 'webp']
def parse_args():
parser = argparse.ArgumentParser(description="Demo CornerNet")
parser.add_argument("--cfg_file", help="config file",
default='ExtremeNet', type=str)
parser.add_argument("--demo", help="demo image path or folders",
default="", type=str)
parser.add_argument("--model_path",
default='cache/ExtremeNet_250000.pkl')
parser.add_argument("--show_mask", action='store_true',
help="Run Deep extreme cut to obtain accurate mask")
args = parser.parse_args()
return args
def _rescale_dets(detections, ratios, borders, sizes):
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def _rescale_ex_pts(detections, ratios, borders, sizes):
xs, ys = detections[..., 5:13:2], detections[..., 6:13:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def _box_inside(box2, box1):
inside = (box2[0] >= box1[0] and box2[1] >= box1[1] and \
box2[2] <= box1[2] and box2[3] <= box1[3])
return inside
def kp_decode(nnet, images, K, kernel=3, aggr_weight=0.1,
scores_thresh=0.1, center_thresh=0.1, debug=False):
detections = nnet.test(
[images], kernel=kernel, aggr_weight=aggr_weight,
scores_thresh=scores_thresh, center_thresh=center_thresh, debug=debug)
detections = detections.data.cpu().numpy()
return detections
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(
system_configs.config_dir, args.cfg_file + ".json")
print("cfg_file: {}".format(cfg_file))
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
print("system config...")
pprint.pprint(system_configs.full)
print("loading parameters: {}".format(args.model_path))
print("building neural network...")
nnet = NetworkFactory(None)
print("loading parameters...")
nnet.load_pretrained_params(args.model_path)
nnet.cuda()
nnet.eval_mode()
K = configs["db"]["top_k"]
aggr_weight = configs["db"]["aggr_weight"]
scores_thresh = configs["db"]["scores_thresh"]
center_thresh = configs["db"]["center_thresh"]
suppres_ghost = True
nms_kernel = 3
scales = configs["db"]["test_scales"]
weight_exp = 8
categories = configs["db"]["categories"]
nms_threshold = configs["db"]["nms_threshold"]
max_per_image = configs["db"]["max_per_image"]
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}["exp_soft_nms"]
if args.show_mask:
dextr = Dextr()
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
top_bboxes = {}
if os.path.isdir(args.demo):
image_names = []
ls = os.listdir(args.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(args.demo, file_name))
else:
image_names = [args.demo]
for image_id, image_name in enumerate(image_names):
print('Running ', image_name)
image = cv2.imread(image_name)
height, width = image.shape[0:2]
detections = []
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(
resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
normalize_(resized_image, mean, std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
dets = kp_decode(
nnet, images, K, aggr_weight=aggr_weight,
scores_thresh=scores_thresh, center_thresh=center_thresh,
kernel=nms_kernel, debug=True)
dets = dets.reshape(2, -1, 14)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets[1, :, [5, 7, 9, 11]] = out_width - dets[1, :, [5, 7, 9, 11]]
dets[1, :, [7, 8, 11, 12]] = dets[1, :, [11, 12, 7, 8]].copy()
dets = dets.reshape(1, -1, 14)
_rescale_dets(dets, ratios, borders, sizes)
_rescale_ex_pts(dets, ratios, borders, sizes)
dets[:, :, 0:4] /= scale
dets[:, :, 5:13] /= scale
detections.append(dets)
detections = np.concatenate(detections, axis=1)
classes = detections[..., -1]
classes = classes[0]
detections = detections[0]
# reject detections with negative scores
keep_inds = (detections[:, 4] > 0)
detections = detections[keep_inds]
classes = classes[keep_inds]
top_bboxes[image_id] = {}
for j in range(categories):
keep_inds = (classes == j)
top_bboxes[image_id][j + 1] = \
detections[keep_inds].astype(np.float32)
soft_nms(top_bboxes[image_id][j + 1],
Nt=nms_threshold, method=nms_algorithm)
scores = np.hstack([
top_bboxes[image_id][j][:, 4]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, 4] >= thresh)
top_bboxes[image_id][j] = top_bboxes[image_id][j][keep_inds]
if suppres_ghost:
for j in range(1, categories + 1):
n = len(top_bboxes[image_id][j])
for k in range(n):
inside_score = 0
if top_bboxes[image_id][j][k, 4] > 0.2:
for t in range(n):
if _box_inside(top_bboxes[image_id][j][t],
top_bboxes[image_id][j][k]):
inside_score += top_bboxes[image_id][j][t, 4]
if inside_score > top_bboxes[image_id][j][k, 4] * 3:
top_bboxes[image_id][j][k, 4] /= 2
if 1: # visualize
color_list = colormap(rgb=True)
mask_color_id = 0
image = cv2.imread(image_name)
input_image = image.copy()
mask_image = image.copy()
bboxes = {}
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, 4] > 0.5)
cat_name = class_name[j]
for bbox in top_bboxes[image_id][j][keep_inds]:
sc = bbox[4]
ex = bbox[5:13].astype(np.int32).reshape(4, 2)
bbox = bbox[0:4].astype(np.int32)
txt = '{}{:.2f}'.format(cat_name, sc)
color_mask = color_list[mask_color_id % len(color_list), :3]
mask_color_id += 1
image = vis_bbox(image,
(bbox[0], bbox[1],
bbox[2] - bbox[0], bbox[3] - bbox[1]))
image = vis_class(image,
(bbox[0], bbox[1] - 2), txt)
image = vis_octagon(
image, ex, color_mask)
image = vis_ex(image, ex, color_mask)
if args.show_mask:
mask = dextr.segment(input_image[:, :, ::-1], ex) # BGR to RGB
mask = np.asfortranarray(mask.astype(np.uint8))
mask_image = vis_bbox(mask_image,
(bbox[0], bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1]))
mask_image = vis_class(mask_image,
(bbox[0], bbox[1] - 2), txt)
mask_image = vis_mask(mask_image, mask, color_mask)
if args.show_mask:
cv2.imshow('mask', mask_image)
cv2.imshow('out', image)
cv2.waitKey()
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
dextr.py | Python | import os
import torch
from collections import OrderedDict
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
import sys
from torch.nn.functional import upsample
this_dir = os.path.dirname(__file__)
sys.path.insert(0, 'dextr')
import networks.deeplab_resnet as resnet
from dataloaders import helpers as helpers
class Dextr(object):
def __init__(self, model_path='',
gpu_id=0, flip_test=True):
if model_path == '':
model_path = os.path.join(
'cache', 'dextr_pascal-sbd.pth')
self.pad = 50
self.thres = 0.8
self.device = torch.device(
"cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
self.flip_test = flip_test
# Create the network and load the weights
self.net = resnet.resnet101(1, nInputChannels=4, classifier='psp')
print("Initializing weights from: {}".format(model_path))
state_dict_checkpoint = torch.load(
model_path, map_location=lambda storage, loc: storage)
# Remove the prefix .module from the model when it is trained using DataParallel
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:] # remove `module.` from multi-gpu training
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
self.net.load_state_dict(new_state_dict)
self.net.eval()
self.net.to(self.device)
def segment(self, image, extreme_points_ori):
# Crop image to the bounding box from the extreme points and resize
bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=self.pad, zero_pad=True)
crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32)
# Generate extreme point heat map normalized to image values
extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [self.pad,
self.pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10)
extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)
# Concatenate inputs and convert to tensor
input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2)
inputs = input_dextr.transpose((2, 0, 1))[np.newaxis, ...]
# import pdb; pdb.set_trace()
if self.flip_test:
inputs = np.concatenate([inputs, inputs[:, :, :, ::-1]], axis=0)
inputs = torch.from_numpy(inputs)
# Run a forward pass
inputs = inputs.to(self.device)
outputs = self.net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)
outputs = outputs.to(torch.device('cpu'))
outputs = outputs.data.numpy()
if self.flip_test:
outputs = (outputs[:1] + outputs[1:, :, :, ::-1]) / 2
pred = np.transpose(outputs[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=self.pad) > self.thres
return result
if __name__ == '__main__':
dextr = Dextr()
# Read image and click the points
# image = np.array(Image.open('ims/dog-cat.jpg'))
image = np.array(Image.open(sys.argv[1]))
plt.ion()
plt.axis('off')
plt.imshow(image)
plt.title('Click the four extreme points of the objects\nHit enter when done (do not close the window)')
results = []
with torch.no_grad():
while 1:
extreme_points_ori = np.array(plt.ginput(4, timeout=0)).astype(np.int)
result = dextr.segment(image, extreme_points_ori)
# import pdb; pdb.set_trace()
results.append(result)
# Plot the results
plt.imshow(helpers.overlay_masks(image / 255, results))
plt.plot(extreme_points_ori[:, 0], extreme_points_ori[:, 1], 'gx')
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
eval_dextr_mask.py | Python | from dextr.dextr import Dextr
import pycocotools.coco as cocoapi
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as COCOmask
import numpy as np
import sys
import cv2
import json
from progress.bar import Bar
DEBUG = False
ANN_PATH = 'data/coco/annotations/instances_extreme_val2017.json'
IMG_DIR = 'data/coco/images/val2017/'
if __name__ == '__main__':
dextr = Dextr()
coco = cocoapi.COCO(ANN_PATH)
pred_path = sys.argv[1]
out_path = pred_path[:-5] + '_segm.json'
data = json.load(open(pred_path, 'r'))
anns = data
results = []
score_thresh = 0.2
num_boxes = 0
for i, ann in enumerate(anns):
if ann['score'] >= score_thresh:
num_boxes += 1
bar = Bar('Pred + Dextr', max=num_boxes)
for i, ann in enumerate(anns):
if ann['score'] < score_thresh:
continue
ex = np.array(ann['extreme_points'], dtype=np.int32).reshape(4, 2)
img_id = ann['image_id']
img_info = coco.loadImgs(ids=[img_id])[0]
img_path = IMG_DIR + img_info['file_name']
img = cv2.imread(img_path)
mask = dextr.segment(img[:, :, ::-1], ex)
mask = np.asfortranarray(mask.astype(np.uint8))
if DEBUG:
if ann['score'] < 0.1:
continue
print(ann['score'])
img = (0.4 * img + 0.6 * mask.reshape(
mask.shape[0], mask.shape[1], 1) * 255).astype(np.uint8)
cv2.imshow('img', img)
cv2.waitKey()
encode = COCOmask.encode(mask)
if 'counts' in encode:
encode['counts'] = encode['counts'].decode("utf8")
pred = {'image_id': ann['image_id'],
'category_id': ann['category_id'],
'score': ann['score'],
'segmentation': encode,
'extreme_points': ann['extreme_points']}
results.append(pred)
Bar.suffix = '[{0}/{1}]| Total: {total:} | ETA: {eta:} |'.format(
i, num_boxes, total=bar.elapsed_td, eta=bar.eta_td)
bar.next()
bar.finish()
json.dump(results, open(out_path, 'w'))
dets = coco.loadRes(out_path)
coco_eval = COCOeval(coco, dets, "segm")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
external/nms.pyx | Cython | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# ----------------------------------------------------------
# Soft-NMS: Improving Object Detection With One Line of Code
# Copyright (c) University of Maryland, College Park
# Licensed under The MIT License [see LICENSE for details]
# Written by Navaneeth Bodla and Bharat Singh
# ----------------------------------------------------------
import numpy as np
cimport numpy as np
cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
return a if a >= b else b
cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
return a if a <= b else b
def nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
cdef int ndets = dets.shape[0]
cdef np.ndarray[np.int_t, ndim=1] suppressed = \
np.zeros((ndets), dtype=np.int)
# nominal indices
cdef int _i, _j
# sorted indices
cdef int i, j
# temp variables for box i's (the box currently under consideration)
cdef np.float32_t ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
cdef np.float32_t xx1, yy1, xx2, yy2
cdef np.float32_t w, h
cdef np.float32_t inter, ovr
keep = []
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
keep.append(i)
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
return keep
def soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0):
cdef unsigned int N = boxes.shape[0]
cdef float iw, ih, box_area
cdef float ua
cdef int pos = 0
cdef float maxscore = 0
cdef int maxpos = 0
cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
N = N - 1
pos = pos - 1
pos = pos + 1
keep = [i for i in range(N)]
return keep
def soft_nms_with_points(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0):
cdef unsigned int N = boxes.shape[0]
cdef float iw, ih, box_area
cdef float ua
cdef int pos = 0
cdef float maxscore = 0
cdef int maxpos = 0
cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
ttx = boxes[i,5]
tty = boxes[i,6]
tlx = boxes[i,7]
tly = boxes[i,8]
tbx = boxes[i,9]
tby = boxes[i,10]
trx = boxes[i,11]
try_ = boxes[i,12]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
boxes[i,5] = boxes[maxpos,5]
boxes[i,6] = boxes[maxpos,6]
boxes[i,7] = boxes[maxpos,7]
boxes[i,8] = boxes[maxpos,8]
boxes[i,9] = boxes[maxpos,9]
boxes[i,10] = boxes[maxpos,10]
boxes[i,11] = boxes[maxpos,11]
boxes[i,12] = boxes[maxpos,12]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
boxes[maxpos,5] = ttx
boxes[maxpos,6] = tty
boxes[maxpos,7] = tlx
boxes[maxpos,8] = tly
boxes[maxpos,9] = tbx
boxes[maxpos,10] = tby
boxes[maxpos,11] = trx
boxes[maxpos,12] = try_
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
ttx = boxes[i,5]
tty = boxes[i,6]
tlx = boxes[i,7]
tly = boxes[i,8]
tbx = boxes[i,9]
tby = boxes[i,10]
trx = boxes[i,11]
try_ = boxes[i,12]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
boxes[pos,5] = boxes[N-1, 5]
boxes[pos,6] = boxes[N-1, 6]
boxes[pos,7] = boxes[N-1, 7]
boxes[pos,8] = boxes[N-1, 8]
boxes[pos,9] = boxes[N-1, 9]
boxes[pos,10] = boxes[N-1, 10]
boxes[pos,11] = boxes[N-1, 11]
boxes[pos,12] = boxes[N-1, 12]
N = N - 1
pos = pos - 1
pos = pos + 1
keep = [i for i in range(N)]
return keep
def soft_nms_merge(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0, float weight_exp=6):
cdef unsigned int N = boxes.shape[0]
cdef float iw, ih, box_area
cdef float ua
cdef int pos = 0
cdef float maxscore = 0
cdef int maxpos = 0
cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
cdef float mx1,mx2,my1,my2,mts,mbs,mw
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
mx1 = boxes[i, 0] * boxes[i, 5]
my1 = boxes[i, 1] * boxes[i, 5]
mx2 = boxes[i, 2] * boxes[i, 6]
my2 = boxes[i, 3] * boxes[i, 6]
mts = boxes[i, 5]
mbs = boxes[i, 6]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
mw = (1 - weight) ** weight_exp
mx1 = mx1 + boxes[pos, 0] * boxes[pos, 5] * mw
my1 = my1 + boxes[pos, 1] * boxes[pos, 5] * mw
mx2 = mx2 + boxes[pos, 2] * boxes[pos, 6] * mw
my2 = my2 + boxes[pos, 3] * boxes[pos, 6] * mw
mts = mts + boxes[pos, 5] * mw
mbs = mbs + boxes[pos, 6] * mw
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
N = N - 1
pos = pos - 1
pos = pos + 1
boxes[i, 0] = mx1 / mts
boxes[i, 1] = my1 / mts
boxes[i, 2] = mx2 / mbs
boxes[i, 3] = my2 / mbs
keep = [i for i in range(N)]
return keep
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
external/setup.py | Python | import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
extensions = [
Extension(
"nms",
["nms.pyx"],
extra_compile_args=["-Wno-cpp", "-Wno-unused-function"]
)
]
setup(
name="coco",
ext_modules=cythonize(extensions),
include_dirs=[numpy.get_include()]
)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/CornerNet.py | Python | import torch
import torch.nn as nn
from .py_utils import kp, AELoss, _neg_loss, convolution, residual
from .py_utils import TopPool, BottomPool, LeftPool, RightPool
class pool(nn.Module):
def __init__(self, dim, pool1, pool2):
super(pool, self).__init__()
self.p1_conv1 = convolution(3, dim, 128)
self.p2_conv1 = convolution(3, dim, 128)
self.p_conv1 = nn.Conv2d(128, dim, (3, 3), padding=(1, 1), bias=False)
self.p_bn1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, dim, (1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = convolution(3, dim, dim)
self.pool1 = pool1()
self.pool2 = pool2()
def forward(self, x):
# pool 1
p1_conv1 = self.p1_conv1(x)
pool1 = self.pool1(p1_conv1)
# pool 2
p2_conv1 = self.p2_conv1(x)
pool2 = self.pool2(p2_conv1)
# pool 1 + pool 2
p_conv1 = self.p_conv1(pool1 + pool2)
p_bn1 = self.p_bn1(p_conv1)
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(p_bn1 + bn1)
conv2 = self.conv2(relu1)
return conv2
class tl_pool(pool):
def __init__(self, dim):
super(tl_pool, self).__init__(dim, TopPool, LeftPool)
class br_pool(pool):
def __init__(self, dim):
super(br_pool, self).__init__(dim, BottomPool, RightPool)
def make_tl_layer(dim):
return tl_pool(dim)
def make_br_layer(dim):
return br_pool(dim)
def make_pool_layer(dim):
return nn.Sequential()
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class model(kp):
def __init__(self, db):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
out_dim = 80
super(model, self).__init__(
n, 2, dims, modules, out_dim,
make_tl_layer=make_tl_layer,
make_br_layer=make_br_layer,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
loss = AELoss(pull_weight=1e-1, push_weight=1e-1, focal_loss=_neg_loss)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/ExtremeNet.py | Python | import torch
import torch.nn as nn
from .py_utils import exkp, CTLoss, _neg_loss, convolution, residual
def make_pool_layer(dim):
return nn.Sequential()
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class model(exkp):
def __init__(self, db):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
out_dim = 80
super(model, self).__init__(
n, 2, dims, modules, out_dim,
make_tl_layer=None,
make_br_layer=None,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
loss = CTLoss(focal_loss=_neg_loss)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/__init__.py | Python | from .kp import kp, AELoss
from .exkp import exkp, CTLoss
from .kp_utils import _neg_loss
from .utils import convolution, fully_connected, residual
# Un-comment this line if your want to run CornerNet
# from ._cpools import TopPool, BottomPool, LeftPool, RightPool
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/_cpools/__init__.py | Python | import torch
from torch import nn
from torch.autograd import Function
import top_pool, bottom_pool, left_pool, right_pool
class TopPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = top_pool.forward(input)[0]
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_variables[0]
output = top_pool.backward(input, grad_output)[0]
return output
class BottomPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = bottom_pool.forward(input)[0]
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_variables[0]
output = bottom_pool.backward(input, grad_output)[0]
return output
class LeftPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = left_pool.forward(input)[0]
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_variables[0]
output = left_pool.backward(input, grad_output)[0]
return output
class RightPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = right_pool.forward(input)[0]
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_variables[0]
output = right_pool.backward(input, grad_output)[0]
return output
class TopPool(nn.Module):
def forward(self, x):
return TopPoolFunction.apply(x)
class BottomPool(nn.Module):
def forward(self, x):
return BottomPoolFunction.apply(x)
class LeftPool(nn.Module):
def forward(self, x):
return LeftPoolFunction.apply(x)
class RightPool(nn.Module):
def forward(self, x):
return RightPoolFunction.apply(x)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/_cpools/setup.py | Python | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name="cpools",
ext_modules=[
CppExtension("top_pool", ["src/top_pool.cpp"]),
CppExtension("bottom_pool", ["src/bottom_pool.cpp"]),
CppExtension("left_pool", ["src/left_pool.cpp"]),
CppExtension("right_pool", ["src/right_pool.cpp"])
],
cmdclass={
"build_ext": BuildExtension
}
)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/_cpools/src/bottom_pool.cpp | C++ | #include <torch/torch.h>
#include <vector>
std::vector<at::Tensor> pool_forward(
at::Tensor input
) {
// Initialize output
at::Tensor output = at::zeros_like(input);
// Get height
int64_t height = input.size(2);
// Copy the last column
at::Tensor input_temp = input.select(2, 0);
at::Tensor output_temp = output.select(2, 0);
output_temp.copy_(input_temp);
at::Tensor max_temp;
for (int64_t ind = 0; ind < height - 1; ++ind) {
input_temp = input.select(2, ind + 1);
output_temp = output.select(2, ind);
max_temp = output.select(2, ind + 1);
at::max_out(max_temp, input_temp, output_temp);
}
return {
output
};
}
std::vector<at::Tensor> pool_backward(
at::Tensor input,
at::Tensor grad_output
) {
auto output = at::zeros_like(input);
int32_t batch = input.size(0);
int32_t channel = input.size(1);
int32_t height = input.size(2);
int32_t width = input.size(3);
auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width});
auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, width});
auto input_temp = input.select(2, 0);
max_val.copy_(input_temp);
max_ind.fill_(0);
auto output_temp = output.select(2, 0);
auto grad_output_temp = grad_output.select(2, 0);
output_temp.copy_(grad_output_temp);
auto un_max_ind = max_ind.unsqueeze(2);
auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, width});
auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width});
for (int32_t ind = 0; ind < height - 1; ++ind) {
input_temp = input.select(2, ind + 1);
at::gt_out(gt_mask, input_temp, max_val);
at::masked_select_out(max_temp, input_temp, gt_mask);
max_val.masked_scatter_(gt_mask, max_temp);
max_ind.masked_fill_(gt_mask, ind + 1);
grad_output_temp = grad_output.select(2, ind + 1).unsqueeze(2);
output.scatter_add_(2, un_max_ind, grad_output_temp);
}
return {
output
};
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"forward", &pool_forward, "Bottom Pool Forward",
py::call_guard<py::gil_scoped_release>()
);
m.def(
"backward", &pool_backward, "Bottom Pool Backward",
py::call_guard<py::gil_scoped_release>()
);
}
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/_cpools/src/left_pool.cpp | C++ | #include <torch/torch.h>
#include <vector>
std::vector<at::Tensor> pool_forward(
at::Tensor input
) {
// Initialize output
at::Tensor output = at::zeros_like(input);
// Get width
int64_t width = input.size(3);
// Copy the last column
at::Tensor input_temp = input.select(3, width - 1);
at::Tensor output_temp = output.select(3, width - 1);
output_temp.copy_(input_temp);
at::Tensor max_temp;
for (int64_t ind = 1; ind < width; ++ind) {
input_temp = input.select(3, width - ind - 1);
output_temp = output.select(3, width - ind);
max_temp = output.select(3, width - ind - 1);
at::max_out(max_temp, input_temp, output_temp);
}
return {
output
};
}
std::vector<at::Tensor> pool_backward(
at::Tensor input,
at::Tensor grad_output
) {
auto output = at::zeros_like(input);
int32_t batch = input.size(0);
int32_t channel = input.size(1);
int32_t height = input.size(2);
int32_t width = input.size(3);
auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height});
auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, height});
auto input_temp = input.select(3, width - 1);
max_val.copy_(input_temp);
max_ind.fill_(width - 1);
auto output_temp = output.select(3, width - 1);
auto grad_output_temp = grad_output.select(3, width - 1);
output_temp.copy_(grad_output_temp);
auto un_max_ind = max_ind.unsqueeze(3);
auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, height});
auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height});
for (int32_t ind = 1; ind < width; ++ind) {
input_temp = input.select(3, width - ind - 1);
at::gt_out(gt_mask, input_temp, max_val);
at::masked_select_out(max_temp, input_temp, gt_mask);
max_val.masked_scatter_(gt_mask, max_temp);
max_ind.masked_fill_(gt_mask, width - ind - 1);
grad_output_temp = grad_output.select(3, width - ind - 1).unsqueeze(3);
output.scatter_add_(3, un_max_ind, grad_output_temp);
}
return {
output
};
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"forward", &pool_forward, "Left Pool Forward",
py::call_guard<py::gil_scoped_release>()
);
m.def(
"backward", &pool_backward, "Left Pool Backward",
py::call_guard<py::gil_scoped_release>()
);
}
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/_cpools/src/right_pool.cpp | C++ | #include <torch/torch.h>
#include <vector>
std::vector<at::Tensor> pool_forward(
at::Tensor input
) {
// Initialize output
at::Tensor output = at::zeros_like(input);
// Get width
int64_t width = input.size(3);
// Copy the last column
at::Tensor input_temp = input.select(3, 0);
at::Tensor output_temp = output.select(3, 0);
output_temp.copy_(input_temp);
at::Tensor max_temp;
for (int64_t ind = 0; ind < width - 1; ++ind) {
input_temp = input.select(3, ind + 1);
output_temp = output.select(3, ind);
max_temp = output.select(3, ind + 1);
at::max_out(max_temp, input_temp, output_temp);
}
return {
output
};
}
std::vector<at::Tensor> pool_backward(
at::Tensor input,
at::Tensor grad_output
) {
at::Tensor output = at::zeros_like(input);
int32_t batch = input.size(0);
int32_t channel = input.size(1);
int32_t height = input.size(2);
int32_t width = input.size(3);
auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height});
auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, height});
auto input_temp = input.select(3, 0);
max_val.copy_(input_temp);
max_ind.fill_(0);
auto output_temp = output.select(3, 0);
auto grad_output_temp = grad_output.select(3, 0);
output_temp.copy_(grad_output_temp);
auto un_max_ind = max_ind.unsqueeze(3);
auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, height});
auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height});
for (int32_t ind = 0; ind < width - 1; ++ind) {
input_temp = input.select(3, ind + 1);
at::gt_out(gt_mask, input_temp, max_val);
at::masked_select_out(max_temp, input_temp, gt_mask);
max_val.masked_scatter_(gt_mask, max_temp);
max_ind.masked_fill_(gt_mask, ind + 1);
grad_output_temp = grad_output.select(3, ind + 1).unsqueeze(3);
output.scatter_add_(3, un_max_ind, grad_output_temp);
}
return {
output
};
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"forward", &pool_forward, "Right Pool Forward",
py::call_guard<py::gil_scoped_release>()
);
m.def(
"backward", &pool_backward, "Right Pool Backward",
py::call_guard<py::gil_scoped_release>()
);
}
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/_cpools/src/top_pool.cpp | C++ | #include <torch/torch.h>
#include <vector>
std::vector<at::Tensor> top_pool_forward(
at::Tensor input
) {
// Initialize output
at::Tensor output = at::zeros_like(input);
// Get height
int64_t height = input.size(2);
// Copy the last column
at::Tensor input_temp = input.select(2, height - 1);
at::Tensor output_temp = output.select(2, height - 1);
output_temp.copy_(input_temp);
at::Tensor max_temp;
for (int64_t ind = 1; ind < height; ++ind) {
input_temp = input.select(2, height - ind - 1);
output_temp = output.select(2, height - ind);
max_temp = output.select(2, height - ind - 1);
at::max_out(max_temp, input_temp, output_temp);
}
return {
output
};
}
std::vector<at::Tensor> top_pool_backward(
at::Tensor input,
at::Tensor grad_output
) {
auto output = at::zeros_like(input);
int32_t batch = input.size(0);
int32_t channel = input.size(1);
int32_t height = input.size(2);
int32_t width = input.size(3);
auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width});
auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, width});
auto input_temp = input.select(2, height - 1);
max_val.copy_(input_temp);
max_ind.fill_(height - 1);
auto output_temp = output.select(2, height - 1);
auto grad_output_temp = grad_output.select(2, height - 1);
output_temp.copy_(grad_output_temp);
auto un_max_ind = max_ind.unsqueeze(2);
auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, width});
auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width});
for (int32_t ind = 1; ind < height; ++ind) {
input_temp = input.select(2, height - ind - 1);
at::gt_out(gt_mask, input_temp, max_val);
at::masked_select_out(max_temp, input_temp, gt_mask);
max_val.masked_scatter_(gt_mask, max_temp);
max_ind.masked_fill_(gt_mask, height - ind - 1);
grad_output_temp = grad_output.select(2, height - ind - 1).unsqueeze(2);
output.scatter_add_(2, un_max_ind, grad_output_temp);
}
return {
output
};
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"forward", &top_pool_forward, "Top Pool Forward",
py::call_guard<py::gil_scoped_release>()
);
m.def(
"backward", &top_pool_backward, "Top Pool Backward",
py::call_guard<py::gil_scoped_release>()
);
}
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/data_parallel.py | Python | import torch
from torch.nn.modules import Module
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from .scatter_gather import scatter_kwargs
class DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All variables will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
super(DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.chunk_sizes = chunk_sizes
self.output_device = output_device
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids, chunk_sizes):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/exkp.py | Python | import numpy as np
import torch
import torch.nn as nn
from .utils import convolution, residual
from .utils import make_layer, make_layer_revr
from .kp_utils import _tranpose_and_gather_feat, _exct_decode
from .kp_utils import _sigmoid, _regr_loss, _neg_loss
from .kp_utils import make_kp_layer
from .kp_utils import make_pool_layer, make_unpool_layer
from .kp_utils import make_merge_layer, make_inter_layer, make_cnv_layer
from .kp_utils import _h_aggregate, _v_aggregate
from utils.debugger import Debugger
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class exkp(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=None, make_br_layer=None,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(exkp, self).__init__()
self.nstack = nstack
self._decode = _exct_decode
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.t_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.l_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.b_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.r_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.ct_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for t_heat, l_heat, b_heat, r_heat, ct_heat in \
zip(self.t_heats, self.l_heats, self.b_heats, \
self.r_heats, self.ct_heats):
t_heat[-1].bias.data.fill_(-2.19)
l_heat[-1].bias.data.fill_(-2.19)
b_heat[-1].bias.data.fill_(-2.19)
r_heat[-1].bias.data.fill_(-2.19)
ct_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.t_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.l_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.b_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.r_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
t_inds = xs[1]
l_inds = xs[2]
b_inds = xs[3]
r_inds = xs[4]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.t_heats, self.l_heats, self.b_heats, self.r_heats,
self.ct_heats,
self.t_regrs, self.l_regrs, self.b_regrs, self.r_regrs,
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
t_heat_, l_heat_, b_heat_, r_heat_ = layer[2:6]
ct_heat_ = layer[6]
t_regr_, l_regr_, b_regr_, r_regr_ = layer[7:11]
kp = kp_(inter)
cnv = cnv_(kp)
t_heat, l_heat = t_heat_(cnv), l_heat_(cnv)
b_heat, r_heat = b_heat_(cnv), r_heat_(cnv)
ct_heat = ct_heat_(cnv)
t_regr, l_regr = t_regr_(cnv), l_regr_(cnv)
b_regr, r_regr = b_regr_(cnv), r_regr_(cnv)
t_regr = _tranpose_and_gather_feat(t_regr, t_inds)
l_regr = _tranpose_and_gather_feat(l_regr, l_inds)
b_regr = _tranpose_and_gather_feat(b_regr, b_inds)
r_regr = _tranpose_and_gather_feat(r_regr, r_inds)
outs += [t_heat, l_heat, b_heat, r_heat, ct_heat, \
t_regr, l_regr, b_regr, r_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.t_heats, self.l_heats, self.b_heats, self.r_heats,
self.ct_heats,
self.t_regrs, self.l_regrs, self.b_regrs, self.r_regrs,
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
t_heat_, l_heat_, b_heat_, r_heat_ = layer[2:6]
ct_heat_ = layer[6]
t_regr_, l_regr_, b_regr_, r_regr_ = layer[7:11]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
t_heat, l_heat = t_heat_(cnv), l_heat_(cnv)
b_heat, r_heat = b_heat_(cnv), r_heat_(cnv)
ct_heat = ct_heat_(cnv)
t_regr, l_regr = t_regr_(cnv), l_regr_(cnv)
b_regr, r_regr = b_regr_(cnv), r_regr_(cnv)
outs += [t_heat, l_heat, b_heat, r_heat, ct_heat,
t_regr, l_regr, b_regr, r_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
if kwargs['debug']:
_debug(image, t_heat, l_heat, b_heat, r_heat, ct_heat)
del kwargs['debug']
return self._decode(*outs[-9:], **kwargs)
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class CTLoss(nn.Module):
def __init__(self, regr_weight=1, focal_loss=_neg_loss):
super(CTLoss, self).__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.regr_loss = _regr_loss
def forward(self, outs, targets):
stride = 9
t_heats = outs[0::stride]
l_heats = outs[1::stride]
b_heats = outs[2::stride]
r_heats = outs[3::stride]
ct_heats = outs[4::stride]
t_regrs = outs[5::stride]
l_regrs = outs[6::stride]
b_regrs = outs[7::stride]
r_regrs = outs[8::stride]
gt_t_heat = targets[0]
gt_l_heat = targets[1]
gt_b_heat = targets[2]
gt_r_heat = targets[3]
gt_ct_heat = targets[4]
gt_mask = targets[5]
gt_t_regr = targets[6]
gt_l_regr = targets[7]
gt_b_regr = targets[8]
gt_r_regr = targets[9]
# focal loss
focal_loss = 0
t_heats = [_sigmoid(t) for t in t_heats]
l_heats = [_sigmoid(l) for l in l_heats]
b_heats = [_sigmoid(b) for b in b_heats]
r_heats = [_sigmoid(r) for r in r_heats]
ct_heats = [_sigmoid(ct) for ct in ct_heats]
focal_loss += self.focal_loss(t_heats, gt_t_heat)
focal_loss += self.focal_loss(l_heats, gt_l_heat)
focal_loss += self.focal_loss(b_heats, gt_b_heat)
focal_loss += self.focal_loss(r_heats, gt_r_heat)
focal_loss += self.focal_loss(ct_heats, gt_ct_heat)
# regression loss
regr_loss = 0
for t_regr, l_regr, b_regr, r_regr in \
zip(t_regrs, l_regrs, b_regrs, r_regrs):
regr_loss += self.regr_loss(t_regr, gt_t_regr, gt_mask)
regr_loss += self.regr_loss(l_regr, gt_l_regr, gt_mask)
regr_loss += self.regr_loss(b_regr, gt_b_regr, gt_mask)
regr_loss += self.regr_loss(r_regr, gt_r_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + regr_loss) / len(t_heats)
return loss.unsqueeze(0)
def _debug(image, t_heat, l_heat, b_heat, r_heat, ct_heat):
debugger = Debugger(num_classes=80)
k = 0
t_heat = torch.sigmoid(t_heat)
l_heat = torch.sigmoid(l_heat)
b_heat = torch.sigmoid(b_heat)
r_heat = torch.sigmoid(r_heat)
aggr_weight = 0.1
t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight)
l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight)
b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight)
r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight)
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
ct_heat = torch.sigmoid(ct_heat)
t_hm = debugger.gen_colormap(t_heat[k].cpu().data.numpy())
l_hm = debugger.gen_colormap(l_heat[k].cpu().data.numpy())
b_hm = debugger.gen_colormap(b_heat[k].cpu().data.numpy())
r_hm = debugger.gen_colormap(r_heat[k].cpu().data.numpy())
ct_hm = debugger.gen_colormap(ct_heat[k].cpu().data.numpy())
hms = np.maximum(np.maximum(t_hm, l_hm),
np.maximum(b_hm, r_hm))
# debugger.add_img(hms, 'hms')
if image is not None:
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(3, 1, 1)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(3, 1, 1)
img = (image[k].cpu().data.numpy() * std + mean) * 255
img = img.astype(np.uint8).transpose(1, 2, 0)
debugger.add_img(img, 'img')
# debugger.add_blend_img(img, t_hm, 't_hm')
# debugger.add_blend_img(img, l_hm, 'l_hm')
# debugger.add_blend_img(img, b_hm, 'b_hm')
# debugger.add_blend_img(img, r_hm, 'r_hm')
debugger.add_blend_img(img, hms, 'extreme')
debugger.add_blend_img(img, ct_hm, 'center')
debugger.show_all_imgs(pause=False)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/kp.py | Python | import numpy as np
import torch
import torch.nn as nn
from .utils import convolution, residual
from .utils import make_layer, make_layer_revr
from .kp_utils import _tranpose_and_gather_feat, _decode
from .kp_utils import _sigmoid, _ae_loss, _regr_loss, _neg_loss
from .kp_utils import make_tl_layer, make_br_layer, make_kp_layer
from .kp_utils import make_pool_layer, make_unpool_layer
from .kp_utils import make_merge_layer, make_inter_layer, make_cnv_layer
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class kp(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(kp, self).__init__()
self.nstack = nstack
self._decode = _decode
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
## tags
self.tl_tags = nn.ModuleList([
make_tag_layer(cnv_dim, curr_dim, 1) for _ in range(nstack)
])
self.br_tags = nn.ModuleList([
make_tag_layer(cnv_dim, curr_dim, 1) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_tags, self.br_tags,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_tag_, br_tag_ = layer[6:8]
tl_regr_, br_regr_ = layer[8:10]
kp = kp_(inter)
cnv = cnv_(kp)
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_tag, br_tag = tl_tag_(tl_cnv), br_tag_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_tags, self.br_tags,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_tag_, br_tag_ = layer[6:8]
tl_regr_, br_regr_ = layer[8:10]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_tag, br_tag = tl_tag_(tl_cnv), br_tag_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
outs += [tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-6:], **kwargs)
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class AELoss(nn.Module):
def __init__(self, pull_weight=1, push_weight=1, regr_weight=1, focal_loss=_neg_loss):
super(AELoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_loss
self.regr_loss = _regr_loss
def forward(self, outs, targets):
stride = 6
tl_heats = outs[0::stride]
br_heats = outs[1::stride]
tl_tags = outs[2::stride]
br_tags = outs[3::stride]
tl_regrs = outs[4::stride]
br_regrs = outs[5::stride]
gt_tl_heat = targets[0]
gt_br_heat = targets[1]
gt_mask = targets[2]
gt_tl_regr = targets[3]
gt_br_regr = targets[4]
# focal loss
focal_loss = 0
tl_heats = [_sigmoid(t) for t in tl_heats]
br_heats = [_sigmoid(b) for b in br_heats]
focal_loss += self.focal_loss(tl_heats, gt_tl_heat)
focal_loss += self.focal_loss(br_heats, gt_br_heat)
# tag loss
pull_loss = 0
push_loss = 0
for tl_tag, br_tag in zip(tl_tags, br_tags):
pull, push = self.ae_loss(tl_tag, br_tag, gt_mask)
pull_loss += pull
push_loss += push
pull_loss = self.pull_weight * pull_loss
push_loss = self.push_weight * push_loss
regr_loss = 0
for tl_regr, br_regr in zip(tl_regrs, br_regrs):
regr_loss += self.regr_loss(tl_regr, gt_tl_regr, gt_mask)
regr_loss += self.regr_loss(br_regr, gt_br_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + pull_loss + push_loss + regr_loss) / len(tl_heats)
return loss.unsqueeze(0)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/kp_utils.py | Python | import torch
import torch.nn as nn
from .utils import convolution, residual
class MergeUp(nn.Module):
def forward(self, up1, up2):
return up1 + up2
def make_merge_layer(dim):
return MergeUp()
def make_tl_layer(dim):
return None
def make_br_layer(dim):
return None
def make_pool_layer(dim):
return nn.MaxPool2d(kernel_size=2, stride=2)
def make_unpool_layer(dim):
return nn.Upsample(scale_factor=2)
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(
convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1))
)
def make_inter_layer(dim):
return residual(3, dim, dim)
def make_cnv_layer(inp_dim, out_dim):
return convolution(3, inp_dim, out_dim)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _nms(heat, kernel=1):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _left_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
ret = heat.clone()
for i in range(1, heat.shape[1]):
inds = (heat[:, i] >= heat[:, i -1])
ret[:, i] += ret[:, i - 1] * inds.float()
return (ret - heat).reshape(shape)
def _right_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
ret = heat.clone()
for i in range(heat.shape[1] - 2, -1, -1):
inds = (heat[:, i] >= heat[:, i +1])
ret[:, i] += ret[:, i + 1] * inds.float()
return (ret - heat).reshape(shape)
def _top_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
heat = heat.transpose(3, 2)
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
ret = heat.clone()
for i in range(1, heat.shape[1]):
inds = (heat[:, i] >= heat[:, i - 1])
ret[:, i] += ret[:, i - 1] * inds.float()
return (ret - heat).reshape(shape).transpose(3, 2)
def _bottom_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
heat = heat.transpose(3, 2)
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
ret = heat.clone()
for i in range(heat.shape[1] - 2, -1, -1):
inds = (heat[:, i] >= heat[:, i + 1])
ret[:, i] += ret[:, i + 1] * inds.float()
return (ret - heat).reshape(shape).transpose(3, 2)
def _h_aggregate(heat, aggr_weight=0.1):
return aggr_weight * _left_aggregate(heat) + \
aggr_weight * _right_aggregate(heat) + heat
def _v_aggregate(heat, aggr_weight=0.1):
return aggr_weight * _top_aggregate(heat) + \
aggr_weight * _bottom_aggregate(heat) + heat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _filter(heat, direction, val=0.1):
num_channels = heat.shape[1]
if direction == 'v':
kernel = torch.zeros((num_channels, num_channels, 3, 1))
for i in range(num_channels):
kernel[i, i, 0, 0] = val
kernel[i, i, 1, 0] = 1
kernel[i, i, 2, 0] = val
padding = (1, 0)
elif direction == 'h':
kernel = torch.zeros((num_channels, num_channels, 1, 3))
for i in range(num_channels):
kernel[i, i, 0, 0] = val
kernel[i, i, 0, 1] = 1
kernel[i, i, 0, 2] = val
padding = (0, 1)
else:
assert 0, direction
heat = nn.functional.conv2d(heat, kernel.cuda(), padding=padding)
# heat[heat > 1] = 1
return heat
def _topk(scores, K=20):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K)
topk_clses = (topk_inds / (height * width)).int()
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
def _decode(
tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr,
K=100, kernel=1, ae_threshold=1, num_dets=1000
):
batch, cat, height, width = tl_heat.size()
tl_heat = torch.sigmoid(tl_heat)
br_heat = torch.sigmoid(br_heat)
# perform nms on heatmaps
tl_heat = _nms(tl_heat, kernel=kernel)
br_heat = _nms(br_heat, kernel=kernel)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=K)
br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=K)
tl_ys = tl_ys.view(batch, K, 1).expand(batch, K, K)
tl_xs = tl_xs.view(batch, K, 1).expand(batch, K, K)
br_ys = br_ys.view(batch, 1, K).expand(batch, K, K)
br_xs = br_xs.view(batch, 1, K).expand(batch, K, K)
if tl_regr is not None and br_regr is not None:
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
tl_regr = tl_regr.view(batch, K, 1, 2)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
br_regr = br_regr.view(batch, 1, K, 2)
tl_xs = tl_xs + tl_regr[..., 0]
tl_ys = tl_ys + tl_regr[..., 1]
br_xs = br_xs + br_regr[..., 0]
br_ys = br_ys + br_regr[..., 1]
# all possible boxes based on top k corners (ignoring class)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
tl_tag = tl_tag.view(batch, K, 1)
br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
br_tag = br_tag.view(batch, 1, K)
dists = torch.abs(tl_tag - br_tag)
tl_scores = tl_scores.view(batch, K, 1).expand(batch, K, K)
br_scores = br_scores.view(batch, 1, K).expand(batch, K, K)
scores = (tl_scores + br_scores) / 2
# reject boxes based on classes
tl_clses = tl_clses.view(batch, K, 1).expand(batch, K, K)
br_clses = br_clses.view(batch, 1, K).expand(batch, K, K)
cls_inds = (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = (dists > ae_threshold)
# reject boxes based on widths and heights
width_inds = (br_xs < tl_xs)
height_inds = (br_ys < tl_ys)
scores[cls_inds] = -1
scores[dist_inds] = -1
scores[width_inds] = -1
scores[height_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = tl_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
tl_scores = tl_scores.contiguous().view(batch, -1, 1)
tl_scores = _gather_feat(tl_scores, inds).float()
br_scores = br_scores.contiguous().view(batch, -1, 1)
br_scores = _gather_feat(br_scores, inds).float()
detections = torch.cat([bboxes, scores, tl_scores, br_scores, clses], dim=2)
return detections
def _exct_decode(
t_heat, l_heat, b_heat, r_heat, ct_heat,
t_regr, l_regr, b_regr, r_regr,
K=40, kernel=3,
aggr_weight=0.1, scores_thresh=0.1, center_thresh=0.1,num_dets=1000
):
batch, cat, height, width = t_heat.size()
'''
filter_kernel = 0.1
t_heat = _filter(t_heat, direction='h', val=filter_kernel)
l_heat = _filter(l_heat, direction='v', val=filter_kernel)
b_heat = _filter(b_heat, direction='h', val=filter_kernel)
r_heat = _filter(r_heat, direction='v', val=filter_kernel)
'''
t_heat = torch.sigmoid(t_heat)
l_heat = torch.sigmoid(l_heat)
b_heat = torch.sigmoid(b_heat)
r_heat = torch.sigmoid(r_heat)
ct_heat = torch.sigmoid(ct_heat)
if aggr_weight > 0:
t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight)
l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight)
b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight)
r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight)
# perform nms on heatmaps
t_heat = _nms(t_heat, kernel=kernel)
l_heat = _nms(l_heat, kernel=kernel)
b_heat = _nms(b_heat, kernel=kernel)
r_heat = _nms(r_heat, kernel=kernel)
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
t_scores, t_inds, t_clses, t_ys, t_xs = _topk(t_heat, K=K)
l_scores, l_inds, l_clses, l_ys, l_xs = _topk(l_heat, K=K)
b_scores, b_inds, b_clses, b_ys, b_xs = _topk(b_heat, K=K)
r_scores, r_inds, r_clses, r_ys, r_xs = _topk(r_heat, K=K)
t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
t_clses = t_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_clses = l_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_clses = b_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_clses = r_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long()
box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long()
ct_inds = t_clses.long() * (height * width) + box_ct_ys * width + box_ct_xs
ct_inds = ct_inds.view(batch, -1)
ct_heat = ct_heat.view(batch, -1, 1)
ct_scores = _gather_feat(ct_heat, ct_inds)
t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
ct_scores = ct_scores.view(batch, K, K, K, K)
scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6
# reject boxes based on classes
cls_inds = (t_clses != l_clses) + (t_clses != b_clses) + \
(t_clses != r_clses)
cls_inds = (cls_inds > 0)
top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys)
top_inds = (top_inds > 0)
left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs)
left_inds = (left_inds > 0)
bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys)
bottom_inds = (bottom_inds > 0)
right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs)
right_inds = (right_inds > 0)
sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \
(b_scores < scores_thresh) + (r_scores < scores_thresh) + \
(ct_scores < center_thresh)
sc_inds = (sc_inds > 0)
'''
scores[sc_inds] = -1
scores[cls_inds] = -1
scores[top_inds] = -1
scores[left_inds] = -1
scores[bottom_inds] = -1
scores[right_inds] = -1
'''
scores = scores - sc_inds.float()
scores = scores - cls_inds.float()
scores = scores - top_inds.float()
scores = scores - left_inds.float()
scores = scores - bottom_inds.float()
scores = scores - right_inds.float()
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
if t_regr is not None and l_regr is not None \
and b_regr is not None and r_regr is not None:
t_regr = _tranpose_and_gather_feat(t_regr, t_inds)
t_regr = t_regr.view(batch, K, 1, 1, 1, 2)
l_regr = _tranpose_and_gather_feat(l_regr, l_inds)
l_regr = l_regr.view(batch, 1, K, 1, 1, 2)
b_regr = _tranpose_and_gather_feat(b_regr, b_inds)
b_regr = b_regr.view(batch, 1, 1, K, 1, 2)
r_regr = _tranpose_and_gather_feat(r_regr, r_inds)
r_regr = r_regr.view(batch, 1, 1, 1, K, 2)
t_xs = t_xs + t_regr[..., 0]
t_ys = t_ys + t_regr[..., 1]
l_xs = l_xs + l_regr[..., 0]
l_ys = l_ys + l_regr[..., 1]
b_xs = b_xs + b_regr[..., 0]
b_ys = b_ys + b_regr[..., 1]
r_xs = r_xs + r_regr[..., 0]
r_ys = r_ys + r_regr[..., 1]
else:
t_xs = t_xs + 0.5
t_ys = t_ys + 0.5
l_xs = l_xs + 0.5
l_ys = l_ys + 0.5
b_xs = b_xs + 0.5
b_ys = b_ys + 0.5
r_xs = r_xs + 0.5
r_ys = r_ys + 0.5
bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = t_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
t_xs = t_xs.contiguous().view(batch, -1, 1)
t_xs = _gather_feat(t_xs, inds).float()
t_ys = t_ys.contiguous().view(batch, -1, 1)
t_ys = _gather_feat(t_ys, inds).float()
l_xs = l_xs.contiguous().view(batch, -1, 1)
l_xs = _gather_feat(l_xs, inds).float()
l_ys = l_ys.contiguous().view(batch, -1, 1)
l_ys = _gather_feat(l_ys, inds).float()
b_xs = b_xs.contiguous().view(batch, -1, 1)
b_xs = _gather_feat(b_xs, inds).float()
b_ys = b_ys.contiguous().view(batch, -1, 1)
b_ys = _gather_feat(b_ys, inds).float()
r_xs = r_xs.contiguous().view(batch, -1, 1)
r_xs = _gather_feat(r_xs, inds).float()
r_ys = r_ys.contiguous().view(batch, -1, 1)
r_ys = _gather_feat(r_ys, inds).float()
detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys,
b_xs, b_ys, r_xs, r_ys, clses], dim=2)
return detections
'''
# Faster but costs more memory
def _neg_loss(preds, gt):
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
for pred in preds:
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * \
neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
'''
def _neg_loss(preds, gt):
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
for pred in preds:
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _sigmoid(x):
x = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return x
def _ae_loss(tag0, tag1, mask):
num = mask.sum(dim=1, keepdim=True).float()
tag0 = tag0.squeeze()
tag1 = tag1.squeeze()
tag_mean = (tag0 + tag1) / 2
tag0 = torch.pow(tag0 - tag_mean, 2) / (num + 1e-4)
tag0 = tag0[mask].sum()
tag1 = torch.pow(tag1 - tag_mean, 2) / (num + 1e-4)
tag1 = tag1[mask].sum()
pull = tag0 + tag1
mask = mask.unsqueeze(1) + mask.unsqueeze(2)
mask = mask.eq(2)
num = num.unsqueeze(2)
num2 = (num - 1) * num
dist = tag_mean.unsqueeze(1) - tag_mean.unsqueeze(2)
dist = 1 - torch.abs(dist)
dist = nn.functional.relu(dist, inplace=True)
dist = dist - 1 / (num + 1e-4)
dist = dist / (num2 + 1e-4)
dist = dist[mask]
push = dist.sum()
return pull, push
'''
def _regr_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
regr = regr * mask
gt_regr = gt_regr * mask
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
'''
def _regr_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/scatter_gather.py | Python | import torch
from torch.autograd import Variable
from torch.nn.parallel._functions import Scatter, Gather
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
r"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
assert not torch.is_tensor(obj), "Tensors not supported in scatter."
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list):
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs)
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
models/py_utils/utils.py | Python | import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class fully_connected(nn.Module):
def __init__(self, inp_dim, out_dim, with_bn=True):
super(fully_connected, self).__init__()
self.with_bn = with_bn
self.linear = nn.Linear(inp_dim, out_dim)
if self.with_bn:
self.bn = nn.BatchNorm1d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
linear = self.linear(x)
bn = self.bn(linear) if self.with_bn else linear
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(
nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)
) if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = [layer(k, inp_dim, out_dim, **kwargs)]
for _ in range(1, modules):
layers.append(layer(k, out_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = []
for _ in range(modules - 1):
layers.append(layer(k, inp_dim, inp_dim, **kwargs))
layers.append(layer(k, inp_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
nnet/py_factory.py | Python | import os
import torch
import importlib
import torch.nn as nn
from config import system_configs
from models.py_utils.data_parallel import DataParallel
torch.manual_seed(317)
class Network(nn.Module):
def __init__(self, model, loss):
super(Network, self).__init__()
self.model = model
self.loss = loss
def forward(self, xs, ys, **kwargs):
preds = self.model(*xs, **kwargs)
loss = self.loss(preds, ys, **kwargs)
return loss
# for model backward compatibility
# previously model was wrapped by DataParallel module
class DummyModule(nn.Module):
def __init__(self, model):
super(DummyModule, self).__init__()
self.module = model
def forward(self, *xs, **kwargs):
return self.module(*xs, **kwargs)
class NetworkFactory(object):
def __init__(self, db):
super(NetworkFactory, self).__init__()
module_file = "models.{}".format(system_configs.snapshot_name)
print("module_file: {}".format(module_file))
nnet_module = importlib.import_module(module_file)
self.model = DummyModule(nnet_module.model(db))
self.loss = nnet_module.loss
self.network = Network(self.model, self.loss)
self.network = DataParallel(self.network, chunk_sizes=system_configs.chunk_sizes)
total_params = 0
for params in self.model.parameters():
num_params = 1
for x in params.size():
num_params *= x
total_params += num_params
print("total parameters: {}".format(total_params))
if system_configs.opt_algo == "adam":
self.optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters())
)
elif system_configs.opt_algo == "sgd":
self.optimizer = torch.optim.SGD(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=system_configs.learning_rate,
momentum=0.9, weight_decay=0.0001
)
else:
raise ValueError("unknown optimizer")
def cuda(self):
self.model.cuda()
def train_mode(self):
self.network.train()
def eval_mode(self):
self.network.eval()
def train(self, xs, ys, **kwargs):
xs = [x.cuda(non_blocking=True) for x in xs]
ys = [y.cuda(non_blocking=True) for y in ys]
self.optimizer.zero_grad()
loss = self.network(xs, ys)
loss = loss.mean()
loss.backward()
self.optimizer.step()
return loss
def validate(self, xs, ys, **kwargs):
with torch.no_grad():
xs = [x.cuda(non_blocking=True) for x in xs]
ys = [y.cuda(non_blocking=True) for y in ys]
loss = self.network(xs, ys)
loss = loss.mean()
return loss
def test(self, xs, **kwargs):
with torch.no_grad():
xs = [x.cuda(non_blocking=True) for x in xs]
return self.model(*xs, **kwargs)
def set_lr(self, lr):
print("setting learning rate to: {}".format(lr))
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def load_pretrained_params(self, pretrained_model):
print("loading from {}".format(pretrained_model))
with open(pretrained_model, "rb") as f:
params = torch.load(f)
self.model.load_state_dict(params, strict=False)
def load_params(self, iteration):
cache_file = system_configs.snapshot_file.format(iteration)
print("loading model from {}".format(cache_file))
with open(cache_file, "rb") as f:
params = torch.load(f)
self.model.load_state_dict(params)
def save_params(self, iteration):
cache_file = system_configs.snapshot_file.format(iteration)
print("saving model to {}".format(cache_file))
with open(cache_file, "wb") as f:
params = self.model.state_dict()
torch.save(params, f)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
sample/coco.py | Python | import cv2
import math
import numpy as np
import torch
import random
import string
from config import system_configs
from utils import crop_image, normalize_, color_jittering_, lighting_
from .utils import random_crop, draw_gaussian, gaussian_radius
def _full_image_crop(image, detections):
detections = detections.copy()
height, width = image.shape[0:2]
max_hw = max(height, width)
center = [height // 2, width // 2]
size = [max_hw, max_hw]
image, border, offset = crop_image(image, center, size)
detections[:, 0:4:2] += border[2]
detections[:, 1:4:2] += border[0]
return image, detections
def _resize_image(image, detections, size):
detections = detections.copy()
height, width = image.shape[0:2]
new_height, new_width = size
image = cv2.resize(image, (new_width, new_height))
height_ratio = new_height / height
width_ratio = new_width / width
detections[:, 0:4:2] *= width_ratio
detections[:, 1:4:2] *= height_ratio
return image, detections
def _clip_detections(image, detections):
detections = detections.copy()
height, width = image.shape[0:2]
detections[:, 0:4:2] = np.clip(detections[:, 0:4:2], 0, width - 1)
detections[:, 1:4:2] = np.clip(detections[:, 1:4:2], 0, height - 1)
keep_inds = ((detections[:, 2] - detections[:, 0]) > 0) & \
((detections[:, 3] - detections[:, 1]) > 0)
detections = detections[keep_inds]
return detections
def kp_detection(db, k_ind, data_aug, debug):
data_rng = system_configs.data_rng
batch_size = system_configs.batch_size
categories = db.configs["categories"]
input_size = db.configs["input_size"]
output_size = db.configs["output_sizes"][0]
border = db.configs["border"]
lighting = db.configs["lighting"]
rand_crop = db.configs["rand_crop"]
rand_color = db.configs["rand_color"]
rand_scales = db.configs["rand_scales"]
gaussian_bump = db.configs["gaussian_bump"]
gaussian_iou = db.configs["gaussian_iou"]
gaussian_rad = db.configs["gaussian_radius"]
max_tag_len = 128
# allocating memory
images = np.zeros((batch_size, 3, input_size[0], input_size[1]), dtype=np.float32)
tl_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
br_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
tl_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
br_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
tl_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
br_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
tag_masks = np.zeros((batch_size, max_tag_len), dtype=np.uint8)
tag_lens = np.zeros((batch_size, ), dtype=np.int32)
db_size = db.db_inds.size
for b_ind in range(batch_size):
if not debug and k_ind == 0:
db.shuffle_inds()
db_ind = db.db_inds[k_ind]
k_ind = (k_ind + 1) % db_size
# reading image
image_file = db.image_file(db_ind)
image = cv2.imread(image_file)
# reading detections
detections = db.detections(db_ind)
# cropping an image randomly
if not debug and rand_crop:
image, detections = random_crop(image, detections, rand_scales, input_size, border=border)
else:
image, detections = _full_image_crop(image, detections)
image, detections = _resize_image(image, detections, input_size)
detections = _clip_detections(image, detections)
width_ratio = output_size[1] / input_size[1]
height_ratio = output_size[0] / input_size[0]
# flipping an image randomly
if not debug and np.random.uniform() > 0.5:
image[:] = image[:, ::-1, :]
width = image.shape[1]
detections[:, [0, 2]] = width - detections[:, [2, 0]] - 1
if not debug:
image = image.astype(np.float32) / 255.
if rand_color:
color_jittering_(data_rng, image)
if lighting:
lighting_(data_rng, image, 0.1, db.eig_val, db.eig_vec)
normalize_(image, db.mean, db.std)
images[b_ind] = image.transpose((2, 0, 1))
for ind, detection in enumerate(detections):
category = int(detection[-1]) - 1
xtl, ytl = detection[0], detection[1]
xbr, ybr = detection[2], detection[3]
fxtl = (xtl * width_ratio)
fytl = (ytl * height_ratio)
fxbr = (xbr * width_ratio)
fybr = (ybr * height_ratio)
xtl = int(fxtl)
ytl = int(fytl)
xbr = int(fxbr)
ybr = int(fybr)
if gaussian_bump:
width = detection[2] - detection[0]
height = detection[3] - detection[1]
width = math.ceil(width * width_ratio)
height = math.ceil(height * height_ratio)
if gaussian_rad == -1:
radius = gaussian_radius((height, width), gaussian_iou)
radius = max(0, int(radius))
else:
radius = gaussian_rad
draw_gaussian(tl_heatmaps[b_ind, category], [xtl, ytl], radius)
draw_gaussian(br_heatmaps[b_ind, category], [xbr, ybr], radius)
else:
tl_heatmaps[b_ind, category, ytl, xtl] = 1
br_heatmaps[b_ind, category, ybr, xbr] = 1
tag_ind = tag_lens[b_ind]
tl_regrs[b_ind, tag_ind, :] = [fxtl - xtl, fytl - ytl]
br_regrs[b_ind, tag_ind, :] = [fxbr - xbr, fybr - ybr]
tl_tags[b_ind, tag_ind] = ytl * output_size[1] + xtl
br_tags[b_ind, tag_ind] = ybr * output_size[1] + xbr
tag_lens[b_ind] += 1
for b_ind in range(batch_size):
tag_len = tag_lens[b_ind]
tag_masks[b_ind, :tag_len] = 1
images = torch.from_numpy(images)
tl_heatmaps = torch.from_numpy(tl_heatmaps)
br_heatmaps = torch.from_numpy(br_heatmaps)
tl_regrs = torch.from_numpy(tl_regrs)
br_regrs = torch.from_numpy(br_regrs)
tl_tags = torch.from_numpy(tl_tags)
br_tags = torch.from_numpy(br_tags)
tag_masks = torch.from_numpy(tag_masks)
return {
"xs": [images, tl_tags, br_tags],
"ys": [tl_heatmaps, br_heatmaps, tag_masks, tl_regrs, br_regrs]
}, k_ind
def sample_data(db, k_ind, data_aug=True, debug=False):
return globals()[system_configs.sampling_function](db, k_ind, data_aug, debug)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
sample/coco_extreme.py | Python | import cv2
import math
import numpy as np
import torch
import random
import string
from config import system_configs
from utils import crop_image, normalize_, color_jittering_, lighting_
from .utils import random_crop_pts, draw_gaussian, gaussian_radius
from utils.debugger import Debugger
def _resize_image_pts(image, detections, extreme_pts, size):
detections = detections.copy()
height, width = image.shape[0:2]
new_height, new_width = size
image = cv2.resize(image, (new_width, new_height))
height_ratio = new_height / height
width_ratio = new_width / width
detections[:, 0:4:2] *= width_ratio
detections[:, 1:4:2] *= height_ratio
extreme_pts[:, :, 0] *= width_ratio
extreme_pts[:, :, 1] *= height_ratio
return image, detections, extreme_pts
def _clip_detections_pts(image, detections, extreme_pts):
detections = detections.copy()
height, width = image.shape[0:2]
detections[:, 0:4:2] = np.clip(detections[:, 0:4:2], 0, width - 1)
detections[:, 1:4:2] = np.clip(detections[:, 1:4:2], 0, height - 1)
extreme_pts[:, :, 0] = np.clip(extreme_pts[:, :, 0], 0, width - 1)
extreme_pts[:, :, 1] = np.clip(extreme_pts[:, :, 1], 0, height - 1)
keep_inds = ((detections[:, 2] - detections[:, 0]) > 0) & \
((detections[:, 3] - detections[:, 1]) > 0)
detections = detections[keep_inds]
extreme_pts = extreme_pts[keep_inds]
return detections, extreme_pts
def kp_detection(db, k_ind, data_aug, debug):
data_rng = system_configs.data_rng
batch_size = system_configs.batch_size
categories = db.configs["categories"]
input_size = db.configs["input_size"]
output_size = db.configs["output_sizes"][0]
border = db.configs["border"]
lighting = db.configs["lighting"]
rand_crop = db.configs["rand_crop"]
rand_color = db.configs["rand_color"]
rand_scales = db.configs["rand_scales"]
gaussian_bump = db.configs["gaussian_bump"]
gaussian_iou = db.configs["gaussian_iou"]
gaussian_rad = db.configs["gaussian_radius"]
max_tag_len = 128
# allocating memory
images = np.zeros((batch_size, 3, input_size[0], input_size[1]), dtype=np.float32)
t_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
l_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
b_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
r_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
ct_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
t_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
l_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
b_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
r_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
t_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
l_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
b_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
r_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
ct_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
tag_masks = np.zeros((batch_size, max_tag_len), dtype=np.uint8)
tag_lens = np.zeros((batch_size, ), dtype=np.int32)
db_size = db.db_inds.size
for b_ind in range(batch_size):
if not debug and k_ind == 0:
db.shuffle_inds()
db_ind = db.db_inds[k_ind]
k_ind = (k_ind + 1) % db_size
# reading image
image_file = db.image_file(db_ind)
image = cv2.imread(image_file)
# reading detections
detections, extreme_pts = db.detections(db_ind)
# cropping an image randomly
if rand_crop:
image, detections, extreme_pts = random_crop_pts(
image, detections, extreme_pts,
rand_scales, input_size, border=border)
else:
assert 0
# image, detections = _full_image_crop(image, detections)
image, detections, extreme_pts = _resize_image_pts(
image, detections, extreme_pts, input_size)
detections, extreme_pts = _clip_detections_pts(
image, detections, extreme_pts)
width_ratio = output_size[1] / input_size[1]
height_ratio = output_size[0] / input_size[0]
# flipping an image randomly
if np.random.uniform() > 0.5:
image[:] = image[:, ::-1, :]
width = image.shape[1]
detections[:, [0, 2]] = width - detections[:, [2, 0]] - 1
extreme_pts[:, :, 0] = width - extreme_pts[:, :, 0] - 1
extreme_pts[:, 1, :], extreme_pts[:, 3, :] = \
extreme_pts[:, 3, :].copy(), extreme_pts[:, 1, :].copy()
image = image.astype(np.float32) / 255.
if not debug:
if rand_color:
color_jittering_(data_rng, image)
if lighting:
lighting_(data_rng, image, 0.1, db.eig_val, db.eig_vec)
normalize_(image, db.mean, db.std)
images[b_ind] = image.transpose((2, 0, 1))
for ind, detection in enumerate(detections):
category = int(detection[-1]) - 1
extreme_pt = extreme_pts[ind]
xt, yt = extreme_pt[0, 0], extreme_pt[0, 1]
xl, yl = extreme_pt[1, 0], extreme_pt[1, 1]
xb, yb = extreme_pt[2, 0], extreme_pt[2, 1]
xr, yr = extreme_pt[3, 0], extreme_pt[3, 1]
xct = (xl + xr) / 2
yct = (yt + yb) / 2
fxt = (xt * width_ratio)
fyt = (yt * height_ratio)
fxl = (xl * width_ratio)
fyl = (yl * height_ratio)
fxb = (xb * width_ratio)
fyb = (yb * height_ratio)
fxr = (xr * width_ratio)
fyr = (yr * height_ratio)
fxct = (xct * width_ratio)
fyct = (yct * height_ratio)
xt = int(fxt)
yt = int(fyt)
xl = int(fxl)
yl = int(fyl)
xb = int(fxb)
yb = int(fyb)
xr = int(fxr)
yr = int(fyr)
xct = int(fxct)
yct = int(fyct)
if gaussian_bump:
width = detection[2] - detection[0]
height = detection[3] - detection[1]
width = math.ceil(width * width_ratio)
height = math.ceil(height * height_ratio)
if gaussian_rad == -1:
radius = gaussian_radius((height, width), gaussian_iou)
radius = max(0, int(radius))
else:
radius = gaussian_rad
draw_gaussian(t_heatmaps[b_ind, category], [xt, yt], radius)
draw_gaussian(l_heatmaps[b_ind, category], [xl, yl], radius)
draw_gaussian(b_heatmaps[b_ind, category], [xb, yb], radius)
draw_gaussian(r_heatmaps[b_ind, category], [xr, yr], radius)
draw_gaussian(ct_heatmaps[b_ind, category], [xct, yct], radius)
else:
t_heatmaps[b_ind, category, yt, xt] = 1
l_heatmaps[b_ind, category, yl, xl] = 1
b_heatmaps[b_ind, category, yb, xb] = 1
r_heatmaps[b_ind, category, yr, xr] = 1
tag_ind = tag_lens[b_ind]
t_regrs[b_ind, tag_ind, :] = [fxt - xt, fyt - yt]
l_regrs[b_ind, tag_ind, :] = [fxl - xl, fyl - yl]
b_regrs[b_ind, tag_ind, :] = [fxb - xb, fyb - yb]
r_regrs[b_ind, tag_ind, :] = [fxr - xr, fyr - yr]
t_tags[b_ind, tag_ind] = yt * output_size[1] + xt
l_tags[b_ind, tag_ind] = yl * output_size[1] + xl
b_tags[b_ind, tag_ind] = yb * output_size[1] + xb
r_tags[b_ind, tag_ind] = yr * output_size[1] + xr
ct_tags[b_ind, tag_ind] = yct * output_size[1] + xct
tag_lens[b_ind] += 1
for b_ind in range(batch_size):
tag_len = tag_lens[b_ind]
tag_masks[b_ind, :tag_len] = 1
if debug:
debugger = Debugger(num_classes=80)
t_hm = debugger.gen_colormap(t_heatmaps[0])
l_hm = debugger.gen_colormap(l_heatmaps[0])
b_hm = debugger.gen_colormap(b_heatmaps[0])
r_hm = debugger.gen_colormap(r_heatmaps[0])
ct_hm = debugger.gen_colormap(ct_heatmaps[0])
img = images[0] * db.std.reshape(3, 1, 1) + db.mean.reshape(3, 1, 1)
img = (img * 255).astype(np.uint8).transpose(1, 2, 0)
debugger.add_blend_img(img, t_hm, 't_hm')
debugger.add_blend_img(img, l_hm, 'l_hm')
debugger.add_blend_img(img, b_hm, 'b_hm')
debugger.add_blend_img(img, r_hm, 'r_hm')
debugger.add_blend_img(
img, np.maximum(np.maximum(t_hm, l_hm),
np.maximum(b_hm, r_hm)), 'extreme')
debugger.add_blend_img(img, ct_hm, 'center')
debugger.show_all_imgs(pause=True)
images = torch.from_numpy(images)
t_heatmaps = torch.from_numpy(t_heatmaps)
l_heatmaps = torch.from_numpy(l_heatmaps)
b_heatmaps = torch.from_numpy(b_heatmaps)
r_heatmaps = torch.from_numpy(r_heatmaps)
ct_heatmaps = torch.from_numpy(ct_heatmaps)
t_regrs = torch.from_numpy(t_regrs)
l_regrs = torch.from_numpy(l_regrs)
b_regrs = torch.from_numpy(b_regrs)
r_regrs = torch.from_numpy(r_regrs)
t_tags = torch.from_numpy(t_tags)
l_tags = torch.from_numpy(l_tags)
b_tags = torch.from_numpy(b_tags)
r_tags = torch.from_numpy(r_tags)
ct_tags = torch.from_numpy(ct_tags)
tag_masks = torch.from_numpy(tag_masks)
return {
"xs": [images, t_tags, l_tags, b_tags, r_tags, ct_tags],
"ys": [t_heatmaps, l_heatmaps, b_heatmaps, r_heatmaps, ct_heatmaps,
tag_masks, t_regrs, l_regrs, b_regrs, r_regrs]
}, k_ind
def sample_data(db, k_ind, data_aug=True, debug=False):
return globals()[system_configs.sampling_function](db, k_ind, data_aug, debug)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
sample/utils.py | Python | import cv2
import numpy as np
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = center
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
def gaussian_radius(det_size, min_overlap):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def _get_border(border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def random_crop(image, detections, random_scales, view_size, border=64):
view_height, view_width = view_size
image_height, image_width = image.shape[0:2]
scale = np.random.choice(random_scales)
height = int(view_height * scale)
width = int(view_width * scale)
cropped_image = np.zeros((height, width, 3), dtype=image.dtype)
w_border = _get_border(border, image_width)
h_border = _get_border(border, image_height)
ctx = np.random.randint(low=w_border, high=image_width - w_border)
cty = np.random.randint(low=h_border, high=image_height - h_border)
x0, x1 = max(ctx - width // 2, 0), min(ctx + width // 2, image_width)
y0, y1 = max(cty - height // 2, 0), min(cty + height // 2, image_height)
left_w, right_w = ctx - x0, x1 - ctx
top_h, bottom_h = cty - y0, y1 - cty
# crop image
cropped_ctx, cropped_cty = width // 2, height // 2
x_slice = slice(cropped_ctx - left_w, cropped_ctx + right_w)
y_slice = slice(cropped_cty - top_h, cropped_cty + bottom_h)
cropped_image[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
# crop detections
cropped_detections = detections.copy()
cropped_detections[:, 0:4:2] -= x0
cropped_detections[:, 1:4:2] -= y0
cropped_detections[:, 0:4:2] += cropped_ctx - left_w
cropped_detections[:, 1:4:2] += cropped_cty - top_h
return cropped_image, cropped_detections
def random_crop_pts(image, detections, extreme_pts,
random_scales, view_size, border=64):
view_height, view_width = view_size
image_height, image_width = image.shape[0:2]
scale = np.random.choice(random_scales)
height = int(view_height * scale)
width = int(view_width * scale)
cropped_image = np.zeros((height, width, 3), dtype=image.dtype)
w_border = _get_border(border, image_width)
h_border = _get_border(border, image_height)
ctx = np.random.randint(low=w_border, high=image_width - w_border)
cty = np.random.randint(low=h_border, high=image_height - h_border)
x0, x1 = max(ctx - width // 2, 0), min(ctx + width // 2, image_width)
y0, y1 = max(cty - height // 2, 0), min(cty + height // 2, image_height)
left_w, right_w = ctx - x0, x1 - ctx
top_h, bottom_h = cty - y0, y1 - cty
# crop image
cropped_ctx, cropped_cty = width // 2, height // 2
x_slice = slice(cropped_ctx - left_w, cropped_ctx + right_w)
y_slice = slice(cropped_cty - top_h, cropped_cty + bottom_h)
cropped_image[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
# crop detections
cropped_detections = detections.copy()
cropped_detections[:, 0:4:2] -= x0
cropped_detections[:, 1:4:2] -= y0
cropped_detections[:, 0:4:2] += cropped_ctx - left_w
cropped_detections[:, 1:4:2] += cropped_cty - top_h
cropped_extreme_pts = extreme_pts.copy()
cropped_extreme_pts[:, :, 0] -= x0
cropped_extreme_pts[:, :, 1] -= y0
cropped_extreme_pts[:, :, 0] += cropped_ctx - left_w
cropped_extreme_pts[:, :, 1] += cropped_cty - top_h
return cropped_image, cropped_detections, cropped_extreme_pts | xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
test.py | Python | #!/usr/bin/env python
import os
import json
import torch
import pprint
import argparse
import importlib
import numpy as np
import matplotlib
matplotlib.use("Agg")
from config import system_configs
from nnet.py_factory import NetworkFactory
from db.datasets import datasets
torch.backends.cudnn.benchmark = False
def parse_args():
parser = argparse.ArgumentParser(description="Test CornerNet")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--testiter", dest="testiter",
help="test at iteration i",
default=None, type=int)
parser.add_argument("--split", dest="split",
help="which split to use",
default="validation", type=str)
parser.add_argument("--suffix", dest="suffix", default=None, type=str)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
def make_dirs(directories):
for directory in directories:
if not os.path.exists(directory):
os.makedirs(directory)
def test(db, split, testiter, debug=False, suffix=None):
result_dir = system_configs.result_dir
result_dir = os.path.join(result_dir, str(testiter), split)
if suffix is not None:
result_dir = os.path.join(result_dir, suffix)
make_dirs([result_dir])
test_iter = system_configs.max_iter if testiter is None else testiter
print("loading parameters at iteration: {}".format(test_iter))
print("building neural network...")
nnet = NetworkFactory(db)
print("loading parameters...")
nnet.load_params(test_iter)
test_file = "test.{}".format(db.data)
testing = importlib.import_module(test_file).testing
nnet.cuda()
nnet.eval_mode()
testing(db, nnet, result_dir, debug=debug)
if __name__ == "__main__":
args = parse_args()
if args.suffix is None:
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
else:
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
print("cfg_file: {}".format(cfg_file))
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
test_split = system_configs.test_split
split = {
"training": train_split,
"validation": val_split,
"testing": test_split
}[args.split]
print("loading all datasets...")
dataset = system_configs.dataset
print("split: {}".format(split))
testing_db = datasets[dataset](configs["db"], split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(testing_db.configs)
test(testing_db, args.split, args.testiter, args.debug, args.suffix)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
test/coco.py | Python | import os
import cv2
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms, soft_nms_merge
def _rescale_dets(detections, ratios, borders, sizes):
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def save_image(data, fn):
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data)
plt.savefig(fn, dpi = height)
plt.close()
def kp_decode(nnet, images, K, ae_threshold=0.5, kernel=3):
detections = nnet.test([images], ae_threshold=ae_threshold, K=K, kernel=kernel)
detections = detections.data.cpu().numpy()
return detections
def kp_detection(db, nnet, result_dir, debug=False, decode_func=kp_decode):
debug_dir = os.path.join(result_dir, "debug")
if not os.path.exists(debug_dir):
os.makedirs(debug_dir)
if db.split != "trainval":
db_inds = db.db_inds[:100] if debug else db.db_inds
else:
db_inds = db.db_inds[:100] if debug else db.db_inds[:5000]
num_images = db_inds.size
K = db.configs["top_k"]
ae_threshold = db.configs["ae_threshold"]
nms_kernel = db.configs["nms_kernel"]
scales = db.configs["test_scales"]
weight_exp = db.configs["weight_exp"]
merge_bbox = db.configs["merge_bbox"]
categories = db.configs["categories"]
nms_threshold = db.configs["nms_threshold"]
max_per_image = db.configs["max_per_image"]
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}[db.configs["nms_algorithm"]]
top_bboxes = {}
for ind in tqdm(range(0, num_images), ncols=80, desc="locating kps"):
db_ind = db_inds[ind]
image_id = db.image_ids(db_ind)
image_file = db.image_file(db_ind)
image = cv2.imread(image_file)
height, width = image.shape[0:2]
detections = []
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
normalize_(resized_image, db.mean, db.std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
dets = decode_func(nnet, images, K, ae_threshold=ae_threshold, kernel=nms_kernel)
dets = dets.reshape(2, -1, 8)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets = dets.reshape(1, -1, 8)
_rescale_dets(dets, ratios, borders, sizes)
dets[:, :, 0:4] /= scale
detections.append(dets)
detections = np.concatenate(detections, axis=1)
classes = detections[..., -1]
classes = classes[0]
detections = detections[0]
# reject detections with negative scores
keep_inds = (detections[:, 4] > -1)
detections = detections[keep_inds]
classes = classes[keep_inds]
top_bboxes[image_id] = {}
for j in range(categories):
keep_inds = (classes == j)
top_bboxes[image_id][j + 1] = detections[keep_inds][:, 0:7].astype(np.float32)
if merge_bbox:
soft_nms_merge(top_bboxes[image_id][j + 1], Nt=nms_threshold, method=nms_algorithm, weight_exp=weight_exp)
else:
soft_nms(top_bboxes[image_id][j + 1], Nt=nms_threshold, method=nms_algorithm)
top_bboxes[image_id][j + 1] = top_bboxes[image_id][j + 1][:, 0:5]
scores = np.hstack([
top_bboxes[image_id][j][:, -1]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, -1] >= thresh)
top_bboxes[image_id][j] = top_bboxes[image_id][j][keep_inds]
if debug:
image_file = db.image_file(db_ind)
image = cv2.imread(image_file)
bboxes = {}
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, -1] > 0.5)
cat_name = db.class_name(j)
cat_size = cv2.getTextSize(cat_name, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)[0]
color = np.random.random((3, )) * 0.6 + 0.4
color = color * 255
color = color.astype(np.int32).tolist()
for bbox in top_bboxes[image_id][j][keep_inds]:
bbox = bbox[0:4].astype(np.int32)
if bbox[1] - cat_size[1] - 2 < 0:
cv2.rectangle(image,
(bbox[0], bbox[1] + 2),
(bbox[0] + cat_size[0], bbox[1] + cat_size[1] + 2),
color, -1
)
cv2.putText(image, cat_name,
(bbox[0], bbox[1] + cat_size[1] + 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness=1
)
else:
cv2.rectangle(image,
(bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2),
color, -1
)
cv2.putText(image, cat_name,
(bbox[0], bbox[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness=1
)
cv2.rectangle(image,
(bbox[0], bbox[1]),
(bbox[2], bbox[3]),
color, 2
)
debug_file = os.path.join(debug_dir, "{}.jpg".format(db_ind))
result_json = os.path.join(result_dir, "results.json")
detections = db.convert_to_coco(top_bboxes)
with open(result_json, "w") as f:
json.dump(detections, f)
cls_ids = list(range(1, categories + 1))
image_ids = [db.image_ids(ind) for ind in db_inds]
db.evaluate(result_json, cls_ids, image_ids)
return 0
def testing(db, nnet, result_dir, debug=False):
return globals()[system_configs.sampling_function](db, nnet, result_dir, debug=debug)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
test/coco_extreme.py | Python | import os
import cv2
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms_with_points as soft_nms
def _rescale_dets(detections, ratios, borders, sizes):
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def _rescale_ex_pts(detections, ratios, borders, sizes):
xs, ys = detections[..., 5:13:2], detections[..., 6:13:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def save_image(data, fn):
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data)
plt.savefig(fn, dpi = height)
plt.close()
def _box_inside(box2, box1):
inside = (box2[0] >= box1[0] and box2[1] >= box1[1] and \
box2[2] <= box1[2] and box2[3] <= box1[3])
return inside
def kp_decode(nnet, images, K, kernel=3, aggr_weight=0.1,
scores_thresh=0.1, center_thresh=0.1, debug=False):
detections = nnet.test(
[images], kernel=kernel, aggr_weight=aggr_weight,
scores_thresh=scores_thresh, center_thresh=center_thresh, debug=debug)
detections = detections.data.cpu().numpy()
return detections
def kp_detection(db, nnet, result_dir, debug=False, decode_func=kp_decode):
debug_dir = os.path.join(result_dir, "debug")
if not os.path.exists(debug_dir):
os.makedirs(debug_dir)
if db.split != "trainval":
db_inds = db.db_inds[:100] if debug else db.db_inds
else:
db_inds = db.db_inds[:100] if debug else db.db_inds[:5000]
num_images = db_inds.size
K = db.configs["top_k"]
aggr_weight = db.configs["aggr_weight"]
scores_thresh = db.configs["scores_thresh"]
center_thresh = db.configs["center_thresh"]
suppres_ghost = db.configs["suppres_ghost"]
nms_kernel = db.configs["nms_kernel"]
scales = db.configs["test_scales"]
categories = db.configs["categories"]
nms_threshold = db.configs["nms_threshold"]
max_per_image = db.configs["max_per_image"]
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}[db.configs["nms_algorithm"]]
top_bboxes = {}
for ind in tqdm(range(0, num_images), ncols=80, desc="locating kps"):
db_ind = db_inds[ind]
image_id = db.image_ids(db_ind)
image_file = db.image_file(db_ind)
image = cv2.imread(image_file)
height, width = image.shape[0:2]
detections = []
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(
resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
normalize_(resized_image, db.mean, db.std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
dets = decode_func(
nnet, images, K, aggr_weight=aggr_weight,
scores_thresh=scores_thresh, center_thresh=center_thresh,
kernel=nms_kernel, debug=debug)
dets = dets.reshape(2, -1, 14)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets[1, :, [5, 7, 9, 11]] = out_width - dets[1, :, [5, 7, 9, 11]]
dets[1, :, [7, 8, 11, 12]] = dets[1, :, [11, 12, 7, 8]].copy()
dets = dets.reshape(1, -1, 14)
_rescale_dets(dets, ratios, borders, sizes)
_rescale_ex_pts(dets, ratios, borders, sizes)
dets[:, :, 0:4] /= scale
dets[:, :, 5:13] /= scale
detections.append(dets)
detections = np.concatenate(detections, axis=1)
classes = detections[..., -1]
classes = classes[0]
detections = detections[0]
# reject detections with negative scores
keep_inds = (detections[:, 4] > 0)
detections = detections[keep_inds]
classes = classes[keep_inds]
top_bboxes[image_id] = {}
for j in range(categories):
keep_inds = (classes == j)
top_bboxes[image_id][j + 1] = \
detections[keep_inds].astype(np.float32)
soft_nms(top_bboxes[image_id][j + 1],
Nt=nms_threshold, method=nms_algorithm)
# top_bboxes[image_id][j + 1] = top_bboxes[image_id][j + 1][:, 0:5]
scores = np.hstack([
top_bboxes[image_id][j][:, 4]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, 4] >= thresh)
top_bboxes[image_id][j] = top_bboxes[image_id][j][keep_inds]
if suppres_ghost:
for j in range(1, categories + 1):
n = len(top_bboxes[image_id][j])
for k in range(n):
inside_score = 0
if top_bboxes[image_id][j][k, 4] > 0.2:
for t in range(n):
if _box_inside(top_bboxes[image_id][j][t],
top_bboxes[image_id][j][k]):
inside_score += top_bboxes[image_id][j][t, 4]
if inside_score > top_bboxes[image_id][j][k, 4] * 3:
top_bboxes[image_id][j][k, 4] /= 2
if debug:
image_file = db.image_file(db_ind)
image = cv2.imread(image_file)
bboxes = {}
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, 4] > 0.5)
cat_name = db.class_name(j)
cat_size = cv2.getTextSize(
cat_name + '0', cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)[0]
color = np.random.random((3, )) * 0.6 + 0.4
color = color * 255
color = color.astype(np.int32).tolist()
for bbox in top_bboxes[image_id][j][keep_inds]:
sc = bbox[4]
bbox = bbox[0:4].astype(np.int32)
txt = '{}{:.0f}'.format(cat_name, sc * 10)
if bbox[1] - cat_size[1] - 2 < 0:
cv2.rectangle(image,
(bbox[0], bbox[1] + 2),
(bbox[0] + cat_size[0], bbox[1] + cat_size[1] + 2),
color, -1
)
cv2.putText(image, txt,
(bbox[0], bbox[1] + cat_size[1] + 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0),
thickness=1, lineType=cv2.LINE_AA
)
else:
cv2.rectangle(image,
(bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2),
color, -1
)
cv2.putText(image, txt,
(bbox[0], bbox[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0),
thickness=1, lineType=cv2.LINE_AA
)
cv2.rectangle(image,
(bbox[0], bbox[1]),
(bbox[2], bbox[3]),
color, 2
)
debug_file = os.path.join(debug_dir, "{}.jpg".format(db_ind))
cv2.imwrite(debug_file, image)
cv2.imshow('out', image)
cv2.waitKey()
result_json = os.path.join(result_dir, "results.json")
detections = db.convert_to_coco(top_bboxes)
with open(result_json, "w") as f:
json.dump(detections, f)
cls_ids = list(range(1, categories + 1))
image_ids = [db.image_ids(ind) for ind in db_inds]
db.evaluate(result_json, cls_ids, image_ids)
return 0
def testing(db, nnet, result_dir, debug=False):
return globals()[system_configs.sampling_function](
db, nnet, result_dir, debug=debug)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
tools/gen_coco_extreme_points.py | Python | import pycocotools.coco as cocoapi
import sys
import cv2
import numpy as np
import pickle
import json
SPLITS = ['val', 'train']
ANN_PATH = '../data/coco/annotations/instances_{}2017.json'
OUT_PATH = '../data/coco/annotations/instances_extreme_{}2017.json'
IMG_DIR = '../data/coco/{}2017/'
DEBUG = False
from scipy.spatial import ConvexHull
def _coco_box_to_bbox(box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.int32)
return bbox
def _get_extreme_points(pts):
l, t = min(pts[:, 0]), min(pts[:, 1])
r, b = max(pts[:, 0]), max(pts[:, 1])
# 3 degrees
thresh = 0.02
w = r - l + 1
h = b - t + 1
pts = np.concatenate([pts[-1:], pts, pts[:1]], axis=0)
t_idx = np.argmin(pts[:, 1])
t_idxs = [t_idx]
tmp = t_idx + 1
while tmp < pts.shape[0] and pts[tmp, 1] - pts[t_idx, 1] <= thresh * h:
t_idxs.append(tmp)
tmp += 1
tmp = t_idx - 1
while tmp >= 0 and pts[tmp, 1] - pts[t_idx, 1] <= thresh * h:
t_idxs.append(tmp)
tmp -= 1
tt = [(max(pts[t_idxs, 0]) + min(pts[t_idxs, 0])) // 2, t]
b_idx = np.argmax(pts[:, 1])
b_idxs = [b_idx]
tmp = b_idx + 1
while tmp < pts.shape[0] and pts[b_idx, 1] - pts[tmp, 1] <= thresh * h:
b_idxs.append(tmp)
tmp += 1
tmp = b_idx - 1
while tmp >= 0 and pts[b_idx, 1] - pts[tmp, 1] <= thresh * h:
b_idxs.append(tmp)
tmp -= 1
bb = [(max(pts[b_idxs, 0]) + min(pts[b_idxs, 0])) // 2, b]
l_idx = np.argmin(pts[:, 0])
l_idxs = [l_idx]
tmp = l_idx + 1
while tmp < pts.shape[0] and pts[tmp, 0] - pts[l_idx, 0] <= thresh * w:
l_idxs.append(tmp)
tmp += 1
tmp = l_idx - 1
while tmp >= 0 and pts[tmp, 0] - pts[l_idx, 0] <= thresh * w:
l_idxs.append(tmp)
tmp -= 1
ll = [l, (max(pts[l_idxs, 1]) + min(pts[l_idxs, 1])) // 2]
r_idx = np.argmax(pts[:, 0])
r_idxs = [r_idx]
tmp = r_idx + 1
while tmp < pts.shape[0] and pts[r_idx, 0] - pts[tmp, 0] <= thresh * w:
r_idxs.append(tmp)
tmp += 1
tmp = r_idx - 1
while tmp >= 0 and pts[r_idx, 0] - pts[tmp, 0] <= thresh * w:
r_idxs.append(tmp)
tmp -= 1
rr = [r, (max(pts[r_idxs, 1]) + min(pts[r_idxs, 1])) // 2]
return np.array([tt, ll, bb, rr])
if __name__ == '__main__':
for split in SPLITS:
data = json.load(open(ANN_PATH.format(split), 'r'))
coco = cocoapi.COCO(ANN_PATH.format(split))
img_ids = coco.getImgIds()
num_images = len(img_ids)
num_classes = 80
tot_box = 0
print('num_images', num_images)
anns_all = data['annotations']
for i, ann in enumerate(anns_all):
tot_box += 1
bbox = ann['bbox']
seg = ann['segmentation']
if type(seg) == list:
if len(seg) == 1:
pts = np.array(seg[0]).reshape(-1, 2)
else:
pts = []
for v in seg:
pts += v
pts = np.array(pts).reshape(-1, 2)
else:
mask = coco.annToMask(ann) * 255
tmp = np.where(mask > 0)
pts = np.asarray(tmp).transpose()[:, ::-1].astype(np.int32)
extreme_points = _get_extreme_points(pts).astype(np.int32)
anns_all[i]['extreme_points'] = extreme_points.copy().tolist()
if DEBUG:
img_id = ann['image_id']
img_info = coco.loadImgs(ids=[img_id])[0]
img_path = IMG_DIR.format(split) + img_info['file_name']
img = cv2.imread(img_path)
if type(seg) == list:
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
cv2.fillPoly(mask, [pts.astype(np.int32).reshape(-1, 1, 2)], (255,0,0))
else:
mask = mask.reshape(img.shape[0], img.shape[1], 1)
img = (0.4 * img + 0.6 * mask).astype(np.uint8)
bbox = _coco_box_to_bbox(ann['bbox'])
cl = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)]
for j in range(extreme_points.shape[0]):
cv2.circle(img, (extreme_points[j, 0], extreme_points[j, 1]),
5, cl[j], -1)
cv2.imshow('img', img)
cv2.waitKey()
print('tot_box', tot_box)
data['annotations'] = anns_all
json.dump(data, open(OUT_PATH.format(split), 'w'))
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
tools/suppress_ghost.py | Python | import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import sys
import cv2
import numpy as np
import pickle
import json
ANN_PATH = '../data/coco/annotations/instances_val2017.json'
DEBUG = True
def _coco_box_to_bbox(box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.int32)
return bbox
def _overlap(box1, box2):
area1 = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
inter = max(min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1, 0) * \
max(min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1, 0)
iou = 1.0 * inter / (area1 + 1e-5)
return iou
def _box_inside(box2, box1):
inside = (box2[0] >= box1[0] and box2[1] >= box1[1] and \
box2[2] <= box1[2] and box2[3] <= box1[3])
return inside
if __name__ == '__main__':
if len(sys.argv) > 2:
ANN_PATH = sys.argv[2]
coco = coco.COCO(ANN_PATH)
pred_path = sys.argv[1]
out_path = pred_path[:-5] + '_no_ghost.json'
dets = coco.loadRes(pred_path)
img_ids = coco.getImgIds()
num_images = len(img_ids)
thresh = 4
out = []
for i, img_id in enumerate(img_ids):
if i % 500 == 0:
print(i)
pred_ids = dets.getAnnIds(imgIds=[img_id])
preds = dets.loadAnns(pred_ids)
num_preds = len(preds)
for j in range(num_preds):
overlap_score = 0
if preds[j]['score'] > 0.2:
for k in range(num_preds):
if preds[j]['category_id'] == preds[k]['category_id'] and \
_box_inside(_coco_box_to_bbox(preds[k]['bbox']),
_coco_box_to_bbox(preds[j]['bbox'])) > 0.8:
overlap_score += preds[k]['score']
if overlap_score > thresh * preds[j]['score']:
# print('overlap_score', overlap_score, preds[j]['score'])
preds[j]['score'] = preds[j]['score'] / 2
# preds[j]['score'] = preds[j]['score'] * np.exp(-(overlap_score / preds[j]['score'] - thresh)**2/2)
out.append(preds[j])
json.dump(out, open(out_path, 'w'))
dets_refined = coco.loadRes(out_path)
coco_eval = COCOeval(coco, dets_refined, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
train.py | Python | #!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
from tqdm import tqdm
from utils import stdout_to_tqdm
from config import system_configs
from nnet.py_factory import NetworkFactory
from torch.multiprocessing import Process, Queue, Pool
from db.datasets import datasets
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CornerNet")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--threads", dest="threads", default=4, type=int)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count > 0:
self.avg = self.sum / self.count
def prefetch_data(db, queue, sample_data, data_aug, debug=False):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug, debug=debug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(dbs, queue, fn, data_aug, debug=False):
tasks = [Process(target=prefetch_data,
args=(db, queue, fn, data_aug, debug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0, debug=False):
learning_rate = system_configs.learning_rate
max_iteration = system_configs.max_iter
pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
# val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
# validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(system_configs.prefetch_size)
# validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_configs.prefetch_size)
# pinned_validation_queue = queue.Queue(5)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(
training_dbs, training_queue, sample_data, True, debug)
# if val_iter:
# validation_tasks = init_parallel_jobs([validation_db], validation_queue, sample_data, False)
training_pin_semaphore = threading.Semaphore()
# validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
# validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
# validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
# validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
# validation_pin_thread.daemon = True
# validation_pin_thread.start()
print("building model...")
nnet = NetworkFactory(training_dbs[0])
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
avg_loss = AverageMeter()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training)
avg_loss.update(training_loss.item())
if display and iteration % display == 0:
print("training loss at iteration {}: {:.6f} ({:.6f})".format(
iteration, training_loss.item(), avg_loss.avg))
del training_loss
# if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
# nnet.eval_mode()
# validation = pinned_validation_queue.get(block=True)
# validation_loss = nnet.validate(**validation)
# print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
# nnet.train_mode()
if iteration % snapshot == 0:
nnet.save_params(iteration)
if iteration % 1000 == 0:
nnet.save_params(-1)
avg_loss = AverageMeter()
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
# validation_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
# for validation_task in validation_tasks:
# validation_task.terminate()
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
# Remove validation to save GPU resources
# validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
# train(training_dbs, validation_db, args.start_iter)
train(training_dbs, None, args.start_iter, args.debug)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
utils/__init__.py | Python | from .tqdm import stdout_to_tqdm
from .image import crop_image
from .image import color_jittering_, lighting_, normalize_
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
utils/color_map.py | Python | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""An awesome colormap for really neat visualizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
def colormap(rgb=False):
color_list = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
utils/debugger.py | Python | import numpy as np
import cv2
import matplotlib.pyplot as plt
color_list = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000,
0.50, 0.5, 0
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
def show_2d(img, points, c, edges):
num_joints = points.shape[0]
points = ((points.reshape(num_joints, -1))).astype(np.int32)
for j in range(num_joints):
cv2.circle(img, (points[j, 0], points[j, 1]), 3, c, -1)
for e in edges:
if points[e].min() > 0:
cv2.line(img, (points[e[0], 0], points[e[0], 1]),
(points[e[1], 0], points[e[1], 1]), c, 2)
return img
class Debugger(object):
def __init__(self, ipynb = False, num_classes=80):
self.ipynb = ipynb
if not self.ipynb:
self.plt = plt
self.fig = self.plt.figure()
self.imgs = {}
# colors = [((np.random.random((3, )) * 0.6 + 0.4)*255).astype(np.uint8) \
# for _ in range(num_classes)]
colors = [(color_list[_]).astype(np.uint8) \
for _ in range(num_classes)]
self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3)
def add_img(self, img, imgId = 'default', revert_color=False):
if revert_color:
img = 255 - img
self.imgs[imgId] = img.copy()
def add_mask(self, mask, bg, imgId = 'default', trans = 0.8):
self.imgs[imgId] = (mask.reshape(mask.shape[0], mask.shape[1], 1) * 255 * trans + \
bg * (1 - trans)).astype(np.uint8)
def add_point_2d(self, point, c, edges, imgId = 'default'):
self.imgs[imgId] = show_2d(self.imgs[imgId], point, c, edges)
def show_img(self, pause = False, imgId = 'default'):
cv2.imshow('{}'.format(imgId), self.imgs[imgId])
if pause:
cv2.waitKey()
def add_blend_img(self, back, fore, imgId='blend', trans=0.5):
# fore = 255 - fore
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
if len(fore.shape) == 2:
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
self.imgs[imgId] = (back * (1. - trans) + fore * trans)
self.imgs[imgId][self.imgs[imgId] > 255] = 255
self.imgs[imgId] = self.imgs[imgId].astype(np.uint8)
def gen_colormap(self, img, s=4):
num_classes = len(self.colors)
img[img < 0] = 0
h, w = img.shape[1], img.shape[2]
color_map = np.zeros((h*s, w*s, 3), dtype=np.uint8)
for i in range(num_classes):
resized = cv2.resize(img[i], (w*s, h*s)).reshape(h*s, w*s, 1)
cl = self.colors[i]
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
return color_map
def add_rect(self, rect1, rect2, c, conf=1, imgId = 'default'):
cv2.rectangle(self.imgs[imgId], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2)
if conf < 1:
cv2.circle(self.imgs[imgId], (rect1[0], rect1[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[imgId], (rect2[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[imgId], (rect1[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[imgId], (rect2[0], rect1[1]), int(10 * conf), c, 1)
def add_points(self, points, img_id = 'default'):
num_classes = len(points)
assert num_classes == len(self.colors)
for i in range(num_classes):
for j in range(len(points[i])):
c = self.colors[i, 0, 0]
cv2.circle(self.imgs[img_id], (points[i][j][0] * 4, points[i][j][1] * 4),
5, (255, 255, 255), -1)
cv2.circle(self.imgs[img_id], (points[i][j][0] * 4, points[i][j][1] * 4),
3, (int(c[0]), int(c[1]), int(c[2])), -1)
def show_all_imgs(self, pause=False):
if not self.ipynb:
for i, v in self.imgs.items():
cv2.imshow('{}'.format(i), v)
if pause:
cv2.waitKey()
else:
self.ax = None
nImgs = len(self.imgs)
fig=plt.figure(figsize=(nImgs * 10,10))
nCols = nImgs
nRows = nImgs // nCols
for i, (k, v) in enumerate(self.imgs.items()):
fig.add_subplot(1, nImgs, i + 1)
if len(v.shape) == 3:
plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB))
else:
plt.imshow(v)
plt.show()
def save_img(self, imgId='default', path='./cache/debug/'):
cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId])
def save_all_imgs(self, path='./cache/debug/', prefix='', genID=False):
if genID:
try:
idx = int(np.loadtxt(path + '/id.txt'))
except:
idx = 0
prefix=idx
np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d')
for i, v in self.imgs.items():
cv2.imwrite(path + '/{}{}.png'.format(prefix, i), v)
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
utils/image.py | Python | import cv2
import numpy as np
import random
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def normalize_(image, mean, std):
image -= mean
image /= std
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3, ))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_jittering_(data_rng, image):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
def crop_image(image, center, size):
cty, ctx = center
height, width = size
im_height, im_width = image.shape[0:2]
cropped_image = np.zeros((height, width, 3), dtype=image.dtype)
x0, x1 = max(0, ctx - width // 2), min(ctx + width // 2, im_width)
y0, y1 = max(0, cty - height // 2), min(cty + height // 2, im_height)
left, right = ctx - x0, x1 - ctx
top, bottom = cty - y0, y1 - cty
cropped_cty, cropped_ctx = height // 2, width // 2
y_slice = slice(cropped_cty - top, cropped_cty + bottom)
x_slice = slice(cropped_ctx - left, cropped_ctx + right)
cropped_image[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_cty - top,
cropped_cty + bottom,
cropped_ctx - left,
cropped_ctx + right
], dtype=np.float32)
offset = np.array([
cty - height // 2,
ctx - width // 2
])
return cropped_image, border, offset
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
utils/tqdm.py | Python | import sys
import numpy as np
import contextlib
from tqdm import tqdm
class TqdmFile(object):
dummy_file = None
def __init__(self, dummy_file):
self.dummy_file = dummy_file
def write(self, x):
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.dummy_file)
@contextlib.contextmanager
def stdout_to_tqdm():
save_stdout = sys.stdout
try:
sys.stdout = TqdmFile(sys.stdout)
yield save_stdout
except Exception as exc:
raise exc
finally:
sys.stdout = save_stdout
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
utils/visualize.py | Python | import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import pycocotools.mask as mask_util
_GRAY = (218, 227, 218)
_GREEN = (18, 127, 15)
_WHITE = (255, 255, 255)
def vis_mask(img, mask, col, alpha=0.4, show_border=True, border_thick=2):
"""Visualizes a single binary mask."""
img = img.astype(np.float32)
idx = np.nonzero(mask)
img[idx[0], idx[1], :] *= 1.0 - alpha
img[idx[0], idx[1], :] += alpha * col
if show_border:
# How to use `cv2.findContours` in different OpenCV versions?
# https://stackoverflow.com/questions/48291581/how-to-use-cv2-findcontours-in-different-opencv-versions/48292371#48292371
contours = cv2.findContours(
mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)[-2]
cv2.drawContours(img, contours, -1, _WHITE, border_thick, cv2.LINE_AA)
return img.astype(np.uint8)
def vis_octagon(img, extreme_points, col, border_thick=2):
"""Visualizes a single binary mask."""
img = img.astype(np.uint8)
# COL = (col).astype(np.uint8).tolist()
# print('col', COL)
# octagon = get_octagon(extreme_points)
# octagon = np.array(octagon).reshape(8, 1, 2).astype(np.int32)
# cv2.polylines(img, [octagon],
# True, COL, border_thick)
mask = extreme_point_to_octagon_mask(
extreme_points, img.shape[0], img.shape[1])
img = vis_mask(img, mask, col)
return img.astype(np.uint8)
def vis_ex(img, extreme_points, col, border_thick=2):
"""Visualizes a single binary mask."""
img = img.astype(np.uint8)
COL = (col).astype(np.uint8).tolist()
# print('col', COL)
ex = np.array(extreme_points).reshape(4, 2).astype(np.int32)
L = 10
T = 0.7
cv2.arrowedLine(img, (ex[0][0], ex[0][1] + L), (ex[0][0], ex[0][1]), COL, border_thick, tipLength=T)
cv2.arrowedLine(img, (ex[1][0] + L, ex[1][1]), (ex[1][0], ex[1][1]), COL, border_thick, tipLength=T)
cv2.arrowedLine(img, (ex[2][0], ex[2][1] - L), (ex[2][0], ex[2][1]), COL, border_thick, tipLength=T)
cv2.arrowedLine(img, (ex[3][0] - L, ex[3][1]), (ex[3][0], ex[3][1]), COL, border_thick, tipLength=T)
'''
R = 6
cv2.circle(img, (ex[0][0], ex[0][1]), R, COL, -1)
cv2.circle(img, (ex[1][0], ex[1][1]), R, COL, -1)
cv2.circle(img, (ex[2][0], ex[2][1]), R, COL, -1)
cv2.circle(img, (ex[3][0], ex[3][1]), R, COL, -1)
cv2.circle(img, (ex[0][0], ex[0][1]), R, _WHITE, 2)
cv2.circle(img, (ex[1][0], ex[1][1]), R, _WHITE, 2)
cv2.circle(img, (ex[2][0], ex[2][1]), R, _WHITE, 2)
cv2.circle(img, (ex[3][0], ex[3][1]), R, _WHITE, 2)
'''
return img.astype(np.uint8)
def vis_class(img, pos, class_str, font_scale=0.35):
"""Visualizes the class."""
img = img.astype(np.uint8)
x0, y0 = int(pos[0]), int(pos[1])
# Compute text size.
txt = class_str
font = cv2.FONT_HERSHEY_SIMPLEX
((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
# Place text background.
if y0 - int(1.3 * txt_h) < 0:
y0 = y0 + int(1.6 * txt_h)
back_tl = x0, y0 - int(1.3 * txt_h)
back_br = x0 + txt_w, y0
cv2.rectangle(img, back_tl, back_br, _GREEN, -1)
# cv2.rectangle(img, back_tl, back_br, _GRAY, -1)
# Show text.
txt_tl = x0, y0 - int(0.3 * txt_h)
cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)
# cv2.putText(img, txt, txt_tl, font, font_scale, (46, 52, 54), lineType=cv2.LINE_AA)
return img
def vis_bbox(img, bbox, thick=2):
"""Visualizes a bounding box."""
img = img.astype(np.uint8)
(x0, y0, w, h) = bbox
x1, y1 = int(x0 + w), int(y0 + h)
x0, y0 = int(x0), int(y0)
cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)
return img
def get_octagon(ex):
ex = np.array(ex).reshape(4, 2)
w, h = ex[3][0] - ex[1][0], ex[2][1] - ex[0][1]
t, l, b, r = ex[0][1], ex[1][0], ex[2][1], ex[3][0]
x = 8.
octagon = [[min(ex[0][0] + w / x, r), ex[0][1], \
max(ex[0][0] - w / x, l), ex[0][1], \
ex[1][0], max(ex[1][1] - h / x, t), \
ex[1][0], min(ex[1][1] + h / x, b), \
max(ex[2][0] - w / x, l), ex[2][1], \
min(ex[2][0] + w / x, r), ex[2][1], \
ex[3][0], min(ex[3][1] + h / x, b), \
ex[3][0], max(ex[3][1] - h / x, t)
]]
return octagon
def extreme_point_to_octagon_mask(extreme_points, h, w):
octagon = get_octagon(extreme_points)
rles = mask_util.frPyObjects(octagon, h, w)
rle = mask_util.merge(rles)
mask = mask_util.decode(rle)
return mask
| xingyizhou/ExtremeNet | 1,034 | Bottom-up Object Detection by Grouping Extreme and Center Points | Python | xingyizhou | Xingyi Zhou | Meta |
archs/__init__.py | Python | import importlib
from os import path as osp
from basicsr.utils import scandir
# automatically scan and import arch modules for registry
# scan all the files that end with '_arch.py' under the archs folder
arch_folder = osp.dirname(osp.abspath(__file__))
arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
# import all the arch modules
_arch_modules = [importlib.import_module(f'archs.{file_name}') for file_name in arch_filenames]
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
archs/example_arch.py | Python | from torch import nn as nn
from torch.nn import functional as F
from basicsr.archs.arch_util import default_init_weights
from basicsr.utils.registry import ARCH_REGISTRY
@ARCH_REGISTRY.register()
class ExampleArch(nn.Module):
"""Example architecture.
Args:
num_in_ch (int): Channel number of inputs. Default: 3.
num_out_ch (int): Channel number of outputs. Default: 3.
num_feat (int): Channel number of intermediate features. Default: 64.
upscale (int): Upsampling factor. Default: 4.
"""
def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, upscale=4):
super(ExampleArch, self).__init__()
self.upscale = upscale
self.conv1 = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# initialization
default_init_weights(
[self.conv1, self.conv2, self.conv3, self.upconv1, self.upconv2, self.conv_hr, self.conv_last], 0.1)
def forward(self, x):
feat = self.lrelu(self.conv1(x))
feat = self.lrelu(self.conv2(feat))
feat = self.lrelu(self.conv3(feat))
out = self.lrelu(self.pixel_shuffle(self.upconv1(feat)))
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
out = self.conv_last(self.lrelu(self.conv_hr(out)))
base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
out += base
return out
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
data/__init__.py | Python | import importlib
from os import path as osp
from basicsr.utils import scandir
# automatically scan and import dataset modules for registry
# scan all the files that end with '_dataset.py' under the data folder
data_folder = osp.dirname(osp.abspath(__file__))
dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
# import all the dataset modules
_dataset_modules = [importlib.import_module(f'data.{file_name}') for file_name in dataset_filenames]
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
data/example_dataset.py | Python | import cv2
import os
import torch
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from basicsr.data.degradations import add_jpg_compression
from basicsr.data.transforms import augment, mod_crop, paired_random_crop
from basicsr.utils import FileClient, imfrombytes, img2tensor, scandir
from basicsr.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class ExampleDataset(data.Dataset):
"""Example dataset.
1. Read GT image
2. Generate LQ (Low Quality) image with cv2 bicubic downsampling and JPEG compression
Args:
opt (dict): Config for train datasets. It contains the following keys:
dataroot_gt (str): Data root path for gt.
io_backend (dict): IO backend type and other kwarg.
gt_size (int): Cropped patched size for gt patches.
use_flip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation).
scale (bool): Scale, which will be added automatically.
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(ExampleDataset, self).__init__()
self.opt = opt
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.mean = opt['mean'] if 'mean' in opt else None
self.std = opt['std'] if 'std' in opt else None
self.gt_folder = opt['dataroot_gt']
# it now only supports folder mode, for other modes such as lmdb and meta_info file, please see:
# https://github.com/xinntao/BasicSR/blob/master/basicsr/data/
self.paths = [os.path.join(self.gt_folder, v) for v in list(scandir(self.gt_folder))]
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
scale = self.opt['scale']
# Load gt images. Dimension order: HWC; channel order: BGR;
# image range: [0, 1], float32.
gt_path = self.paths[index]
img_bytes = self.file_client.get(gt_path, 'gt')
img_gt = imfrombytes(img_bytes, float32=True)
img_gt = mod_crop(img_gt, scale)
# generate lq image
# downsample
h, w = img_gt.shape[0:2]
img_lq = cv2.resize(img_gt, (w // scale, h // scale), interpolation=cv2.INTER_CUBIC)
# add JPEG compression
img_lq = add_jpg_compression(img_lq, quality=70)
# augmentation for training
if self.opt['phase'] == 'train':
gt_size = self.opt['gt_size']
# random crop
img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path)
# flip, rotation
img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_flip'], self.opt['use_rot'])
# BGR to RGB, HWC to CHW, numpy to tensor
img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
img_lq = torch.clamp((img_lq * 255.0).round(), 0, 255) / 255.
# normalize
if self.mean is not None or self.std is not None:
normalize(img_lq, self.mean, self.std, inplace=True)
normalize(img_gt, self.mean, self.std, inplace=True)
return {'lq': img_lq, 'gt': img_gt, 'lq_path': gt_path, 'gt_path': gt_path}
def __len__(self):
return len(self.paths)
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
losses/__init__.py | Python | import importlib
from os import path as osp
from basicsr.utils import scandir
# automatically scan and import loss modules for registry
# scan all the files that end with '_loss.py' under the loss folder
loss_folder = osp.dirname(osp.abspath(__file__))
loss_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(loss_folder) if v.endswith('_loss.py')]
# import all the loss modules
_model_modules = [importlib.import_module(f'losses.{file_name}') for file_name in loss_filenames]
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
losses/example_loss.py | Python | from torch import nn as nn
from torch.nn import functional as F
from basicsr.utils.registry import LOSS_REGISTRY
@LOSS_REGISTRY.register()
class ExampleLoss(nn.Module):
"""Example Loss.
Args:
loss_weight (float): Loss weight for Example loss. Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super(ExampleLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, pred, target, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * F.l1_loss(pred, target, reduction='mean')
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
models/__init__.py | Python | import importlib
from os import path as osp
from basicsr.utils import scandir
# automatically scan and import model modules for registry
# scan all the files that end with '_model.py' under the model folder
model_folder = osp.dirname(osp.abspath(__file__))
model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')]
# import all the model modules
_model_modules = [importlib.import_module(f'models.{file_name}') for file_name in model_filenames]
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
models/example_model.py | Python | from collections import OrderedDict
from basicsr.archs import build_network
from basicsr.losses import build_loss
from basicsr.models.sr_model import SRModel
from basicsr.utils import get_root_logger
from basicsr.utils.registry import MODEL_REGISTRY
@MODEL_REGISTRY.register()
class ExampleModel(SRModel):
"""Example model based on the SRModel class.
In this example model, we want to implement a new model that trains with both L1 and L2 loss.
New defined functions:
init_training_settings(self)
feed_data(self, data)
optimize_parameters(self, current_iter)
Inherited functions:
__init__(self, opt)
setup_optimizers(self)
test(self)
dist_validation(self, dataloader, current_iter, tb_logger, save_img)
nondist_validation(self, dataloader, current_iter, tb_logger, save_img)
_log_validation_metric_values(self, current_iter, dataset_name, tb_logger)
get_current_visuals(self)
save(self, epoch, current_iter)
"""
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
self.ema_decay = train_opt.get('ema_decay', 0)
if self.ema_decay > 0:
logger = get_root_logger()
logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}')
# define network net_g with Exponential Moving Average (EMA)
# net_g_ema is used only for testing on one GPU and saving
# There is no need to wrap with DistributedDataParallel
self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
# load pretrained model
load_path = self.opt['path'].get('pretrain_network_g', None)
if load_path is not None:
self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
else:
self.model_ema(0) # copy net_g weight
self.net_g_ema.eval()
# define losses
self.l1_pix = build_loss(train_opt['l1_opt']).to(self.device)
self.l2_pix = build_loss(train_opt['l2_opt']).to(self.device)
# set up optimizers and schedulers
self.setup_optimizers()
self.setup_schedulers()
def feed_data(self, data):
self.lq = data['lq'].to(self.device)
if 'gt' in data:
self.gt = data['gt'].to(self.device)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.output = self.net_g(self.lq)
l_total = 0
loss_dict = OrderedDict()
# l1 loss
l_l1 = self.l1_pix(self.output, self.gt)
l_total += l_l1
loss_dict['l_l1'] = l_l1
# l2 loss
l_l2 = self.l2_pix(self.output, self.gt)
l_total += l_l2
loss_dict['l_l2'] = l_l2
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if self.ema_decay > 0:
self.model_ema(decay=self.ema_decay)
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
scripts/prepare_example_data.py | Python | import os
import requests
def main(url, dataset):
# download
print(f'Download {url} ...')
response = requests.get(url)
with open(f'datasets/example/{dataset}.zip', 'wb') as f:
f.write(response.content)
# unzip
import zipfile
with zipfile.ZipFile(f'datasets/example/{dataset}.zip', 'r') as zip_ref:
zip_ref.extractall(f'datasets/example/{dataset}')
if __name__ == '__main__':
"""This script will download and prepare the example data:
1. BSDS100 for training
2. Set5 for testing
"""
os.makedirs('datasets/example', exist_ok=True)
urls = [
'https://github.com/xinntao/BasicSR-examples/releases/download/0.0.0/BSDS100.zip',
'https://github.com/xinntao/BasicSR-examples/releases/download/0.0.0/Set5.zip'
]
datasets = ['BSDS100', 'Set5']
for url, dataset in zip(urls, datasets):
main(url, dataset)
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
train.py | Python | # flake8: noqa
import os.path as osp
import archs
import data
import losses
import models
from basicsr.train import train_pipeline
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir))
train_pipeline(root_path)
| xinntao/BasicSR-examples | 255 | BasicSR-Examples illustrates how to easily use BasicSR in your own project | Python | xinntao | Xintao | Tencent |
handycrawler/crawler_util.py | Python | import imghdr
import requests
def sizeof_fmt(size, suffix='B'):
"""Get human readable file size.
Args:
size (int): File size.
suffix (str): Suffix. Default: 'B'.
Return:
str: Formated file siz.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(size) < 1024.0:
return f'{size:3.1f} {unit}{suffix}'
size /= 1024.0
return f'{size:3.1f} Y{suffix}'
def baidu_decode_url(encrypted_url):
"""Decrypt baidu ecrypted url."""
url = encrypted_url
map1 = {'_z2C$q': ':', '_z&e3B': '.', 'AzdH3F': '/'}
map2 = {
'w': 'a', 'k': 'b', 'v': 'c', '1': 'd', 'j': 'e',
'u': 'f', '2': 'g', 'i': 'h', 't': 'i', '3': 'j',
'h': 'k', 's': 'l', '4': 'm', 'g': 'n', '5': 'o',
'r': 'p', 'q': 'q', '6': 'r', 'f': 's', 'p': 't',
'7': 'u', 'e': 'v', 'o': 'w', '8': '1', 'd': '2',
'n': '3', '9': '4', 'c': '5', 'm': '6', '0': '7',
'b': '8', 'l': '9', 'a': '0'
} # yapf: disable
for (ciphertext, plaintext) in map1.items():
url = url.replace(ciphertext, plaintext)
char_list = [char for char in url]
for i in range(len(char_list)):
if char_list[i] in map2:
char_list[i] = map2[char_list[i]]
url = ''.join(char_list)
return url
def setup_session():
headers = {
'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)'
' AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/48.0.2564.116 Safari/537.36')
}
session = requests.Session()
session.headers.update(headers)
return session
def get_content(session, url, referer_url, req_timeout=5, max_retry=3):
retry = max_retry
while retry > 0:
try:
response = session.get(
url, timeout=req_timeout, headers={'Referer': referer_url})
except Exception as e:
print(f'Exception caught when fetching page {url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
content = response.content.decode('utf-8',
'ignore').replace("\\'", "'")
break
finally:
retry -= 1
return content
def get_img_content(session,
file_url,
extension=None,
max_retry=3,
req_timeout=5):
"""
Returns:
(data, actual_ext)
"""
retry = max_retry
while retry > 0:
try:
response = session.get(file_url, timeout=req_timeout)
except Exception as e:
print(f'Exception caught when downloading file {file_url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
if response.status_code != 200:
print(f'Response status code {response.status_code}, '
f'file {file_url}')
break
# get the response byte
data = response.content
if isinstance(data, str):
print('Converting str to byte, later remove it.')
data = data.encode(data)
actual_ext = imghdr.what(extension, data)
actual_ext = 'jpg' if actual_ext == 'jpeg' else actual_ext
# do not download original gif
if actual_ext == 'gif' or actual_ext is None:
return None, actual_ext
return data, actual_ext
finally:
retry -= 1
return None, None
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
setup.py | Python | #!/usr/bin/env python
from setuptools import find_packages, setup
import os
import subprocess
import time
version_file = 'handycrawler/version.py'
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from handycrawler.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join(
[x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
VERSION = SHORT_VERSION + '+' + sha
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION,
VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='handycrawler',
version=get_version(),
description='Handy Crawler Toolbox',
long_description=readme(),
author='Xintao Wang',
author_email='xintao.wang@outlook.com',
keywords='computer vision, crawler',
url='https://github.com/xinntao/HandyCrawler',
packages=find_packages(exclude=('options')),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='MIT License',
setup_requires=['cython', 'numpy'],
install_requires=get_requirements(),
ext_modules=[],
zip_safe=False)
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
tools/baidu_keywords/baidu_crawler.py | Python | import json
import time
from datetime import datetime
from urllib.parse import urlsplit
from handycrawler.crawler_util import (baidu_decode_url, setup_session,
sizeof_fmt)
try:
import pymongo
except Exception:
raise ImportError('Please install pymongo')
def main():
"""Parse baidu image search engine results to mongodb.
The keys in mongodb:
# for parsing
img_url
obj_url
hover_url
from_url
width
height
type
bd_news_date
page_title
record_date
# for download
try_download
download_from
download_date
md5
rel_path
"""
# configuration
keyword = '老照片'
offset = 0
max_num = 2000 # Baidu only returns the first 1000 results
req_timeout = 5
interval = 30
max_retry = 3
# Set up session
session = setup_session()
# Set up mongodb
mongo_client = pymongo.MongoClient('mongodb://localhost:27017/')
crawl_img_db = mongo_client['crawl_img']
old_photo_col = crawl_img_db['old_photo']
base_url = ('http://image.baidu.com/search/acjson?tn=resultjson_com'
'&ipn=rj&word={}&pn={}&rn={}')
num_new_records = 0
num_exist_records = 0
for i in range(offset, offset + max_num, interval):
url = base_url.format(keyword, i, interval)
# do not support filter (type, color, size) now
# if filter_str:
# url += '&' + filter_str
# fetch and parse the page
retry = max_retry
while retry > 0:
try:
split_rlt = urlsplit(url)
referer_url = f'{split_rlt.scheme}://{split_rlt.netloc}'
response = session.get(
url, timeout=req_timeout, headers={'Referer': referer_url})
except Exception as e:
print(f'Exception caught when fetching page {url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
print(f'parsing result page {url}')
records = parse(response)
# add to mongodb
for record in records:
result = old_photo_col.find_one(
{'img_url': record['img_url']})
if result is None:
num_new_records += 1
insert_rlt = old_photo_col.insert_one(record)
print(f'\tInsert one record: {insert_rlt.inserted_id}')
else:
num_exist_records += 1
break
finally:
retry -= 1
print(f'New added records: {num_new_records}.\n'
f'Existed records: {num_exist_records}.')
# database statistics
stat = crawl_img_db.command('dbstats')
print('Stats:\n'
f'\tNumber of entries: {stat["objects"]}'
f'\tSize of database: {sizeof_fmt(stat["dataSize"])}')
mongo_client.close()
def parse(response):
"""Parse response of baidu returned search results."""
try:
content = response.content.decode('utf-8',
'ignore').replace("\\'", "'")
content = json.loads(content, strict=False)
except Exception as e:
print(f'Error in parse response: {e}')
return
records = []
for item in content['data']:
record = dict()
# obj url (required key)
if 'objURL' in item:
obj_url = baidu_decode_url(item['objURL'])
else:
obj_url = ''
record['obj_url'] = obj_url
# hover url (required key)
if 'hoverURL' in item:
hover_url = item['hoverURL']
else:
hover_url = ''
record['hover_url'] = hover_url
# obj url and hover url, must have one
if record['obj_url'] == '' and record['hover_url'] == '':
print('\tNo obj_url and hover_url found.')
continue
# img url serves as the identification key. First use obj url.
if record['obj_url'] == '':
record['img_url'] = record['hover_url']
else:
record['img_url'] = record['obj_url']
if 'fromURL' in item:
record['from_url'] = baidu_decode_url(item['fromURL'])
if 'width' in item:
record['width'] = item['width']
if 'height' in item:
record['height'] = item['height']
if 'type' in item:
record['type'] = item['type']
if 'bdImgnewsDate' in item:
bd_news_date = datetime.strptime(item['bdImgnewsDate'],
'%Y-%m-%d %H:%M')
record['bd_news_date'] = time.strftime('%Y-%m-%d %H:%M:%S',
bd_news_date.timetuple())
if 'fromPageTitleEnc' in item:
record['page_title'] = item['fromPageTitleEnc']
elif 'fromPageTitle' in item:
record['page_title'] = item['fromPageTitle']
record['record_date'] = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime())
records.append(record)
return records
if __name__ == '__main__':
main()
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
tools/baike_stars/crawl_image_list.py | Python | import json
import pymongo
import time
from handycrawler.crawler_util import get_content, setup_session, sizeof_fmt
def main():
"""Parse baidu image search engine results to mongodb.
img_url: image url in Baidu cdn
person_id:
person_name:
album_id:
width: image width
height: image height
pic_id: picId in baidu html
record_date: crawl record data
# determine during download
type: image type (typically, jpg or png)
try_download
download_date
download_from: img_url or invalid
md5
rel_path:
save path: person_id/md5.extension
"""
verbose = False
# Set up session
session = setup_session()
# Set up mongodb
mongo_client = pymongo.MongoClient('mongodb://localhost:27017/')
crawl_img_db = mongo_client['baike_stars']
star_albums_col = crawl_img_db['star_albums']
img_col = crawl_img_db['all_imgs']
num_new_records = 0
num_exist_records = 0
for document in star_albums_col.find():
mongo_id = document['_id']
person_id = document['id']
person_name = document['name']
lemma_id = document['lemma_id']
new_lemma_id = document['new_lemma_id']
sub_lemma_id = document['sub_lemma_id']
albums = document['albums']
has_parsed_imgs = document.get('has_parsed_imgs', False)
if has_parsed_imgs:
print(f'{person_name} has parsed, skip.')
else:
for idx, album in enumerate(albums):
album_id = album['album_id']
album_title = album['album_title']
album_num_photo = album['album_num_photo']
print(f'Parse {person_name} - [{idx}/{len(albums)}] '
f'{album_title}, {album_num_photo} photos...')
# parse images
img_list = parse_imgs(lemma_id, new_lemma_id, sub_lemma_id,
album_id, album_num_photo, session)
# add to mongodb
if len(img_list) != album_num_photo:
print(f'WARNING: the number of image {len(img_list)} is '
f'different from album_num_photo {album_num_photo}.')
for img_info in img_list:
src = img_info['src']
width = img_info['width']
height = img_info['height']
pic_id = img_info['pic_id']
img_url = f'https://bkimg.cdn.bcebos.com/pic/{src}'
result = img_col.find_one({'img_url': img_url})
if result is None:
num_new_records += 1
record = dict(
img_url=img_url,
person_id=person_id,
person_name=person_name,
album_id=album_id,
album_title=album_title,
width=width,
height=height,
pic_id=pic_id,
record_data=time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime()))
insert_rlt = img_col.insert_one(record)
if verbose:
print('\tInsert one record: '
f'{insert_rlt.inserted_id}')
else:
num_exist_records += 1
# add has_parsed_imgs label
star_albums_col.update_one({'_id': mongo_id},
{'$set': dict(has_parsed_imgs=True)})
print(f'New added records: {num_new_records}.\n'
f'Existed records: {num_exist_records}.')
# database statistics
stat = crawl_img_db.command('dbstats')
print('Stats:\n'
f'\tNumber of entries: {stat["objects"]}'
f'\tSize of database: {sizeof_fmt(stat["dataSize"])}')
mongo_client.close()
def parse_imgs(lemma_id,
new_lemma_id,
sub_lemma_id,
album_id,
album_num_photo,
session,
req_timeout=5,
max_retry=3):
referer_url = 'https://baike.baidu.com'
interval = 50
img_list = []
for offset in range(0, album_num_photo, interval):
url = (f'https://baike.baidu.com/ajax/getPhotos?lemmaid={lemma_id}'
f'&id={new_lemma_id}&sublemmaid={sub_lemma_id}&aid={album_id}'
f'&pn={offset}&rn={interval}')
content = get_content(session, url, referer_url, req_timeout,
max_retry)
content = json.loads(content, strict=False)
for item in content['data']['list']:
src = item['src']
width = item['width']
height = item['height']
pic_id = item['picId']
img_list.append(
dict(src=src, width=width, height=height, pic_id=pic_id))
return img_list
if __name__ == '__main__':
main()
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
tools/baike_stars/crawl_imgs.py | Python | import hashlib
import os
import pymongo
import time
from handycrawler.crawler_util import get_img_content, setup_session
def main():
"""Download the image and save it to the corresponding path.
do not handle images with the same md5, because they may contain different
person.
And we will only crop the person belonging to this category.
img_url: image url in Baidu cdn
person_id:
person_name:
album_id:
width: image width
height: image height
pic_id: picId in baidu html
record_date: crawl record data
# determine during download
type: image type (typically, jpg or png)
try_download
download_date
download_from: img_url or invalid
md5
rel_path:
save path: person_id/md5.extension
"""
# configuration
save_root = 'baike_stars'
# Set up session
session = setup_session()
# Set up mongodb
mongo_client = pymongo.MongoClient('mongodb://localhost:27017/')
crawl_img_db = mongo_client['baike_stars']
img_col = crawl_img_db['all_imgs']
for idx, document in enumerate(img_col.find()):
mongo_id = document['_id']
img_url = document['img_url']
person_id = document['person_id']
person_name = document['person_name']
album_id = document['album_id']
try_download = document.get('try_download', False)
download_from = document.get('download_from', 'invalid')
print(f'{idx}: {person_name} - {img_url.split("/")[-1]}')
if not try_download or (try_download and download_from == 'invalid'):
# download
data, actual_ext = get_img_content(session, img_url)
if data is not None:
# save image
save_folder = os.path.join(save_root, person_id)
os.makedirs(save_folder, exist_ok=True)
# calculate md5
md5hash = hashlib.md5(data).hexdigest()
filename = f'{album_id}_{md5hash}.{actual_ext}'
save_path = os.path.join(save_root, person_id, filename)
with open(save_path, 'wb') as fout:
fout.write(data)
document['type'] = actual_ext
document['md5'] = md5hash
document['rel_path'] = save_folder
document['download_date'] = time.strftime(
'%Y-%m-%d %H:%M:%S', time.localtime())
download_from = 'img_url'
else:
download_from = 'invalid'
if actual_ext is not None:
document['type'] = actual_ext
print(f'\tinvalid download with extension {actual_ext}')
else:
print('\tinvalid download')
document['try_download'] = True
document['download_from'] = download_from
# update mongodb
img_col.update_one({'_id': mongo_id}, {'$set': document})
else:
print('\tHas downloaded, skip.')
if __name__ == '__main__':
main()
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
tools/baike_stars/crawl_star_album_list.py | Python | import pymongo
import re
import time
from bs4 import BeautifulSoup
from urllib.parse import unquote
from handycrawler.crawler_util import get_content, setup_session, sizeof_fmt
def main():
"""Parse baidu image search engine results to mongodb.
"""
# configuration
star_list_path = 'tools/baike_stars/baidu_stars_China_mainland_female_201118.txt' # noqa: E501
star_type = 'China Mainland Female'
# Set up session
session = setup_session()
# Set up mongodb
mongo_client = pymongo.MongoClient('mongodb://localhost:27017/')
crawl_img_db = mongo_client['baike_stars']
star_albums_col = crawl_img_db['star_albums']
num_new_records = 0
num_exist_records = 0
with open(star_list_path, 'r') as fin:
person_list = [line.strip().split('/item')[1] for line in fin]
for idx, person in enumerate(person_list):
encoded_name, person_id = person.split('/')[1], person.split('/')[2]
result = star_albums_col.find_one({'id': person_id})
if result is None:
# parse
person_info = parse_albums(encoded_name, person_id, session)
print(f'{idx} / {len(person_list)} - {person_info["name"]}, '
f'num_album: {person_info["num_album"]}, '
f'num_photo: {person_info["num_photo"]}')
# add to mongodb
num_new_records += 1
person_info['type'] = star_type
insert_rlt = star_albums_col.insert_one(person_info)
print(f'\tInsert one record: {insert_rlt.inserted_id}')
else:
print(f'\t{person_id} already exits.')
num_exist_records += 1
print(f'New added records: {num_new_records}.\n'
f'Existed records: {num_exist_records}.')
# database statistics
stat = crawl_img_db.command('dbstats')
print('Stats:\n'
f'\tNumber of entries: {stat["objects"]}'
f'\tSize of database: {sizeof_fmt(stat["dataSize"])}')
mongo_client.close()
def parse_albums(name, person_id, session, req_timeout=5, max_retry=3):
"""Parse album information for each persom.
For each person:
id: '681442'
name: '白岩松'
num_album: 11
num_photo: 82
lemma_id: '36809'
sub_lemma_id: '36809'
new_lemma_id: '681442'
For each album:
album_id: '126741'
album_title: '精彩图册'
album_num_photo: 4
"""
name_str = unquote(name, encoding='utf-8')
url = f'https://baike.baidu.com/pic/{name}/{person_id}'
referer_url = 'https://baike.baidu.com'
content = get_content(session, url, referer_url, req_timeout, max_retry)
soup = BeautifulSoup(content, 'html.parser')
# get num_album and num_photo
num_album = int(soup.find('span', {'class': 'album-num num'}).getText())
num_photo = int(soup.find('span', {'class': 'pic-num num'}).getText())
# for lemma_id, sub_lemma_id, new_lemma_id
lemma_id, sub_lemma_id, new_lemma_id = None, None, None
lemma_id_pattern = re.compile(r"lemmaId: '(\d+)',", re.S)
sub_lemma_id_pattern = re.compile(r"subLemmaId: '(\d+)',", re.S)
new_lemma_id_pattern = re.compile(r"newLemmaId: '(\d+)',", re.S)
album_info_list = []
# get info for each album
for element in soup.findAll('div', {'class': 'album-item'}):
album_info = {}
# get album title and number of photos
album_title = element.find('div', {'class': 'album-title'}).getText()
album_info['album_title'] = album_title
album_num_photo = int(
element.find('div', {
'class': 'album-pic-num'
}).getText())
album_info['album_num_photo'] = album_num_photo
# get album cover href for album_id
album_cover_href = element.find('a',
{'class': 'pic album-cover'})['href']
album_cover_url = 'https://baike.baidu.com' + album_cover_href
album_content = get_content(session, album_cover_url, url, req_timeout,
max_retry)
soup_album = BeautifulSoup(album_content, 'html.parser')
# parse album page
script_rlt = soup_album.findAll('script', {'type': 'text/javascript'})
for script in script_rlt:
if script.string is not None:
script_str = str(script.string)
album_id_pattern = re.compile(
r"albumId = useHash \? albumId : '(\d+)';", re.S)
album_id = album_id_pattern.findall(script_str)[0]
album_info['album_id'] = album_id
if lemma_id is None:
lemma_id = lemma_id_pattern.findall(script_str)[0]
if sub_lemma_id is None:
sub_lemma_id = sub_lemma_id_pattern.findall(script_str)[0]
if new_lemma_id is None:
new_lemma_id = new_lemma_id_pattern.findall(script_str)[0]
album_info_list.append(album_info)
person_info = dict(
id=person_id,
name=name_str,
num_album=num_album,
num_photo=num_photo,
lemma_id=lemma_id,
sub_lemma_id=sub_lemma_id,
new_lemma_id=new_lemma_id,
albums=album_info_list,
crawl_date=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
return person_info
if __name__ == '__main__':
main()
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
tools/baike_stars/crawl_star_list_from_baidu_starrank.py | Python | import time
from bs4 import BeautifulSoup
from selenium import webdriver
from urllib.parse import unquote
def get_name_relpath_from_html(html):
soup = BeautifulSoup(html, 'html.parser')
results = []
for tr in soup.findAll('tr', {'class': ''}): # each for a celebrity
if tr.find('a') is not None:
relpath = tr.find('a')['href']
# get celebrity name
encoded_name = relpath.split('/')[2]
name = unquote(encoded_name, encoding='utf-8')
results.append(dict(name=name, relpath=relpath))
return results
def main():
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument("user-agent='Mozilla/5.0 (X11; Linux x86_64) "
'AppleWebKit/537.36 (KHTML, like Gecko) '
"Chrome/87.0.4280.66 Safari/537.36'")
client = webdriver.Chrome(options=chrome_options) # executable_path
client.get('https://baike.baidu.com/starrank')
idx_total = 0
# configs:
"""
<li class="tab-item" data-rank="11">中国内地男明星榜</li>
<li class="tab-item" data-rank="10">中国内地女明星榜</li>
<li class="tab-item" data-rank="1">港台东南亚男明星榜</li>
<li class="tab-item" data-rank="2">港台东南亚女明星榜</li>
<li class="tab-item" data-rank="3">韩国男明星榜</li>
<li class="tab-item" data-rank="4">韩国女明星榜</li>
<li class="tab-item" data-rank="9">日本男明星榜</li>
<li class="tab-item" data-rank="8">日本女明星榜</li>
<li class="tab-item" data-rank="7">欧美男明星榜</li>
<li class="tab-item" data-rank="6">欧美女明星榜</li>
<li class="tab-item" data-rank="5">全球组合类明星榜</li>
"""
f = open('baidu_stars_China_mainland_female.txt', 'w')
# click other pages
new_page = client.find_element_by_xpath('//li[@data-rank="10"]')
new_page.click()
time.sleep(1)
idx_page = 1
next_page = 'Not None'
while next_page is not None:
print(f'#### Process Page {idx_page} ...')
results = get_name_relpath_from_html(client.page_source)
for result in results:
name, relpath = result['name'], result['relpath']
print(f'{idx_total + 1} \t{name}\t{relpath}')
f.write(f'{name} {relpath}\n')
idx_total += 1
# click next page
next_page = client.find_element_by_class_name('next')
try:
next_page.click()
except Exception as error:
print(f'Cannot click next page with error: {error}')
next_page = None
else:
time.sleep(1) # wait to load the new page
idx_page += 1
f.close()
client.quit()
if __name__ == '__main__':
main()
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
tools/baike_stars/url_downloader.py | Python | import hashlib
import imghdr
import os
import time
from handycrawler.crawler_util import setup_session
try:
import pymongo
except Exception:
raise ImportError('Please install pymongo')
def main():
"""Download the image and save it to the corresponding path."""
# configuration
save_root = 'old_photo'
# Set up session
session = setup_session()
# Set up mongodb
mongo_client = pymongo.MongoClient('mongodb://localhost:27017/')
crawl_img_db = mongo_client['crawl_img']
old_photo_col = crawl_img_db['old_photo']
for idx, document in enumerate(old_photo_col.find()):
mongo_id = document['_id']
obj_url = document['obj_url']
hover_url = document['hover_url']
extension = document.get('type', 'None')
page_title = document.get('page_title', 'None')
try_download = document.get('try_download', False)
download_from = document.get('download_from', 'invalid')
print(f'{idx}: {page_title}')
if not try_download or (try_download and download_from == 'invalid'):
# download obj_url
print(f'\tDownload obj_url: {obj_url}')
md5hash, actual_ext = download(session, obj_url, save_root,
extension)
download_from = 'obj_url' if md5hash is not None else 'invalid'
if md5hash is not None:
result = old_photo_col.find({'md5': md5hash})
if result is not None:
print(document)
print('collision', result.count())
for x in result:
print(x)
input('Please handle')
md5hash = None # use hover_url
if md5hash is None: # download hover_url
print(f'\tDownload hover_url: {hover_url}')
md5hash, actual_ext = download(session, hover_url, save_root,
extension)
download_from = 'hover_url' if (md5hash
is not None) else 'invalid'
# update mongodb record
if actual_ext != extension:
document['type'] = actual_ext
print(f'\tChange ext from {extension} to {actual_ext}')
document['try_download'] = True
document['download_from'] = download_from
if md5hash is not None:
document['download_date'] = time.strftime(
'%Y-%m-%d %H:%M:%S', time.localtime())
document['md5'] = md5hash
document['rel_path'] = save_root
print(f'\tUpdate mongodb: {download_from}')
old_photo_col.update_one({'_id': mongo_id}, {'$set': document})
else:
print('\tHas downloaded, skip.')
def download(session,
file_url,
save_root,
extension,
max_retry=3,
timeout=5,
default_ext='png'):
retry = max_retry
while retry > 0:
try:
response = session.get(file_url, timeout=timeout)
except Exception as e:
print(f'\tException caught when downloading file {file_url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
if response.status_code != 200:
print(f'Response status code {response.status_code}, '
f'file {file_url}')
break
# get the response byte
data = response.content
if isinstance(data, str):
print('Converting str to byte, later remove it.')
data = data.encode(data)
actual_ext = imghdr.what(extension, data)
actual_ext = 'jpg' if actual_ext == 'jpeg' else actual_ext
# do not download original gif, use hover url
if actual_ext == 'gif' or actual_ext is None:
return None, actual_ext
# save image
md5hash = write(save_root, data, actual_ext)
return md5hash, actual_ext
finally:
retry -= 1
return None, None
def write(save_root, data, extension):
os.makedirs(save_root, exist_ok=True)
# calculate md5
md5hash = hashlib.md5(data).hexdigest()
filename = f'{md5hash}.{extension}'
save_path = os.path.join(save_root, filename)
with open(save_path, 'wb') as fout:
fout.write(data)
return md5hash
if __name__ == '__main__':
main()
| xinntao/HandyCrawler | 9 | Python | xinntao | Xintao | Tencent | |
html/css/flow.css | CSS | body {
background-color: #eee;
font-size: 84%;
text-align: justify;
margin: 0px;
}
a {
color: #1772d0;
text-decoration: none;
}
a:focus,
a:hover {
color: #f09228;
text-decoration: none;
}
.navbar-fixed-top {
position: fixed;
right: 0;
left: 0;
z-index: 999;
}
.navbar {
border-bottom: 1px solid #ddd;
padding: 5px;
font-size: 17px;
line-height: 20px;
overflow: hidden;
letter-spacing: -1px;
background: #fff;
border-radius: 1px;
box-shadow: 2px 4px 5px rgba(3,3,3,0.2);
margin: 0 0 25px;
margin-bottom: 20px;
}
.navbar a {
/* float: left; */
margin: 0 30px 0 0;
padding: 2px 0 0 27px;
text-transform: uppercase;
color: #7e8c8d;
text-decoration: none;
}
.container {
padding-top: 6em;
}
.column {
display: inline-block;
vertical-align: top;
margin: 10px;
}
.unit {
display: block;
padding: 5px;
margin-bottom: 10px;
border: 1px solid #ccc;
background-color: #fff;
text-align: center;
text-decoration: none;
}
.unit img {
width: 100%;
display: block;
margin: 0 auto 5px;
border: 0;
vertical-align: bottom;
}
.unit strong {
/* color: #333; */
font-size: 18px;
}
.unit table {
color: #333;
font-size: 16px;
width: 100%;
border-top: 1px solid #ccc;
}
.unit td {
text-align: center;
text-decoration: none;
}
.block-text {
font-family: 'Lato',
Verdana,
Helvetica,
sans-serif;
font-size: 14px;
font-weight: 750;
background-color: #CCCCCC;
border-radius: 2px;
}
| xinntao/HandyFigure | 187 | HandyFigure provides the sources file (ususally PPT files) for paper figures | JavaScript | xinntao | Xintao | Tencent |
html/data/data.js | JavaScript | var data = [
{
"title": "Template",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/template.png",
"url_paper": "#",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/template.pptx",
"url_project": "#",
},
{
"title": "basic-neurons",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/basic_neurons.png",
"url_paper": "#",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/basic_neurons.pptx",
"url_project": "#",
},
{
"title": "GFPGAN-overvie",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/GFPGAN_overview.png",
"url_paper": "https://arxiv.org/abs/2101.04061",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/GFPGAN_overview.pptx",
"url_project": "https://github.com/TencentARC/GFPGAN",
},
{
"title": "GFPGAN-teaser",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/GFPGAN_teaser.jpg",
"url_paper": "https://arxiv.org/abs/2101.04061",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/GFPGAN_teaser.pptx",
"url_project": "https://github.com/TencentARC/GFPGAN",
},
{
"title": "RealESRGAN-highorder",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/RealESRGAN_highorder.png",
"url_paper": "https://arxiv.org/abs/2107.10833",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/RealESRGAN_highorder.pptx",
"url_project": "https://github.com/xinntao/Real-ESRGAN",
},
{
"title": "RealESRGAN-teaser",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/RealESRGAN_teaser.jpg",
"url_paper": "https://arxiv.org/abs/2107.10833",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/RealESRGAN_teaser.pptx",
"url_project": "https://github.com/xinntao/Real-ESRGAN",
},
{
"title": "RealESRGAN-network",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/RealESRGAN_network.jpg",
"url_paper": "https://arxiv.org/abs/2107.10833",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/RealESRGAN_network.pptx",
"url_project": "https://github.com/xinntao/Real-ESRGAN",
},
{
"title": "RealESRGAN-blur",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/RealESRGAN_blur.jpg",
"url_paper": "https://arxiv.org/abs/2107.10833",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/RealESRGAN_blur.pptx",
"url_project": "https://github.com/xinntao/Real-ESRGAN",
},
{
"title": "FAIG-teaser",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/FAIG_teaser.jpg",
"url_paper": "https://openreview.net/pdf?id=az0BBDjDvwD",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/FAIG_teaser.pptx",
"url_project": "https://github.com/TencentARC/FAIG",
},
{
"title": "FAIG-motivation",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/FAIG_motivation.png",
"url_paper": "https://openreview.net/pdf?id=az0BBDjDvwD",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/FAIG_motivation.pptx",
"url_project": "https://github.com/TencentARC/FAIG",
},
{
"title": "FAIG-compare",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/FAIG_compare.jpg",
"url_paper": "https://openreview.net/pdf?id=az0BBDjDvwD",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/FAIG_compare.pptx",
"url_project": "https://github.com/TencentARC/FAIG",
},
{
"title": "Colorization",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/Colorization.png",
"url_paper": "https://arxiv.org/abs/2108.08826",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/Colorization.pptx",
"url_project": "#",
},
{
"title": "DNI-teaser",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/DNI_teaser.jpg",
"url_paper": "https://arxiv.org/abs/1811.10515",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/DNI_teaser.pptx",
"url_project": "https://xinntao.github.io/projects/DNI",
},
{
"title": "EDVR-teaser",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/EDVR_teaser.jpg",
"url_paper": "https://arxiv.org/abs/1905.02716",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/EDVR_teaser.pptx",
"url_project": "https://xinntao.github.io/projects/EDVR",
},
{
"title": "EDVR-network",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/EDVR_network.jpg",
"url_paper": "https://arxiv.org/abs/1905.02716",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/EDVR_network.pptx",
"url_project": "https://xinntao.github.io/projects/EDVR",
},
{
"title": "EDVR-pcd-tsa",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/EDVR_pcd_tsa.png",
"url_paper": "https://arxiv.org/abs/1905.02716",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/EDVR_pcd_tsa.pptx",
"url_project": "https://xinntao.github.io/projects/EDVR",
},
{
"title": "EDVR-ablation",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/EDVR_ablation.jpg",
"url_paper": "https://arxiv.org/abs/1905.02716",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/EDVR_ablation.pptx",
"url_project": "https://xinntao.github.io/projects/EDVR",
},
{
"title": "ESRGAN-teaser",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/ESRGAN_teaser.jpg",
"url_paper": "https://arxiv.org/abs/1809.00219",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/ESRGAN_teaser.pptx",
"url_project": "https://github.com/xinntao/ESRGAN",
},
{
"title": "ESRGAN-network",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/ESRGAN_network.png",
"url_paper": "https://arxiv.org/abs/1809.00219",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/ESRGAN_network.pptx",
"url_project": "https://github.com/xinntao/ESRGAN",
},
{
"title": "SFTGAN-network",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/SFTGAN_network.png",
"url_paper": "https://arxiv.org/abs/1804.02815",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/SFTGAN_network.pptx",
"url_project": "http://mmlab.ie.cuhk.edu.hk/projects/SFTGAN/",
},
{
"title": "SFTGAN-modulation",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/SFTGAN_modulation.jpg",
"url_paper": "https://arxiv.org/abs/1804.02815",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/SFTGAN_modulation.pptx",
"url_project": "http://mmlab.ie.cuhk.edu.hk/projects/SFTGAN/",
},
{
"title": "UnderstandDCN-illustration",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/UnderstandDCN_illustration.png",
"url_paper": "https://arxiv.org/abs/2009.07265",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/UnderstandDCN_illustration.pptx",
"url_project": "#",
},
{
"title": "UnderstandDCN-network",
"url_img": "https://raw.githubusercontent.com/xinntao/HandyFigure/master/figures/UnderstandDCN_network.png",
"url_paper": "https://arxiv.org/abs/2009.07265",
"url_src": "https://github.com/xinntao/HandyFigure/releases/download/PPT-source/UnderstandDCN_network.pptx",
"url_project": "#",
},
];
// repeat data for testing (uncomment the following line when depolying)
//while (data.length < 100) data = data.concat(data);
| xinntao/HandyFigure | 187 | HandyFigure provides the sources file (ususally PPT files) for paper figures | JavaScript | xinntao | Xintao | Tencent |
html/js/waterfall.js | JavaScript | var waterFall = {
container: document.getElementById("container"),
columnWidth: 400, // the column number is based on this value
columnInitNum: 5, // number of images inited in each column
scrollTop: document.documentElement.scrollTop || document.body.scrollTop,
detectLeft: 0,
sensitivity: 50,
appendByColumn: function() {
var i = 0;
for (i = 0; i < this.columnNumber; i++) {
var c = document.getElementById("column_" + i);
if (c && !this.loadFinish) {
if (c.offsetTop + c.clientHeight <
this.scrollTop + (window.innerHeight || document.documentElement.clientHeight)) {
this.append(c);
}
}
}
return this;
},
append: function(column) {
crt_index = this.index_list[this.index]
url_img = this.data[crt_index]['url_img'];
url_paper = this.data[crt_index]['url_paper'];
url_src = this.data[crt_index]['url_src'];
url_project = this.data[crt_index]['url_project'];
title = this.data[crt_index]['title'];
var e = document.createElement("div");
e.className = "unit";
e.innerHTML = '<img src="'+ url_img +'" />'
if (url_paper != '#') {
e.innerHTML += '<a href="'+ url_project + '" target="_blank" ><strong>'+ title +'</strong></a>';}
else {
e.innerHTML += '<strong>'+ title +'</strong>';
}
if (url_paper != '#') {
e.innerHTML += '<table align="center" cellspacing="0" cellpadding="5"><tr>'
+ '<td><span class=" block-text"> <a href="'+ url_src +'" target="_blank">PPT Source</a></span></td>'
+ '<td><span class=" block-text"> <a href="'+ url_paper +'" target="_blank">Paper</a></span></td>'
+ '</tr></table>';
}
else {
e.innerHTML += '<table align="center" cellspacing="0" cellpadding="5"><tr>'
+ '<td><span class=" block-text"> <a href="'+ url_src +'" target="_blank">PPT Source</a></span></td>'
+ '</tr></table>';
}
column.appendChild(e);
this.index += 1;
if (this.index >= this.data.length) {
this.loadFinish = true;
}
return this;
},
create: function() {
this.loadFinish = false;
this.index = 0;
keep_num = 2; // keep first N figures fixed
index_list = [...Array(this.data.length).keys()];
for (j=0;j<keep_num;j++){
index_list.shift()
}
this.index_list = this.shuffleArray(index_list);
for (j=keep_num-1;j>=0;j--){
this.index_list.unshift(j)
}
this.columnNumber = Math.floor(document.body.clientWidth / this.columnWidth);
var i = 0, html = ''
for (i = 0; i < this.columnNumber; i++) {
html += '<span id="column_'+ i +'" class="column" style="width:'+ this.columnWidth +'px;"></span>';
}
html += '<span id="detect" class="column" style="width:'+ this.columnWidth +'px;"></span>';
this.container.innerHTML = html;
this.detectLeft = document.getElementById("detect").offsetLeft;
for (i = 0; i < this.columnInitNum; i++) {
this.appendByColumn();
}
return this;
},
/* Randomize array in-place using Durstenfeld shuffle algorithm */
shuffleArray: function(array) {
for (var i = array.length - 1; i > 0; i--) {
var j = Math.floor(Math.random() * (i + 1));
var temp = array[i];
array[i] = array[j];
array[j] = temp;
}
return array;
},
scroll: function() {
var self = this;
window.onscroll = function() {
var scrollTop = document.documentElement.scrollTop || document.body.scrollTop;
if (!this.loadFinish && Math.abs(scrollTop - self.scrollTop) > self.sensitivity) {
self.scrollTop = scrollTop;
self.appendByColumn();
}
};
return this;
},
resize: function() {
var self = this;
window.onresize = function() {
var eleDetect = document.getElementById("detect");
var detectLeft = eleDetect && eleDetect.offsetLeft;
if (detectLeft && Math.abs(detectLeft - self.detectLeft) > self.sensitivity) {
this.columnInitNum = Math.floor(this.index / this.columnNumber);
self.create(); // refresh the layout
}
};
return this;
},
init: function(data) {
if (this.container) {
this.data = data;
this.create().scroll().resize();
}
}
};
| xinntao/HandyFigure | 187 | HandyFigure provides the sources file (ususally PPT files) for paper figures | JavaScript | xinntao | Xintao | Tencent |
index.html | HTML | <!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>HandyFigure</title>
<link rel="stylesheet" href="html/css/flow.css">
</head>
<body>
<!-- navigation bar -->
<div class="navbar navbar-fixed-top">
<img src="icon_text.png" alt="icon_text" height="30">
<a href="https://xinntao.github.io/">Home Page</a>
<a href="https://github.com/xinntao/HandyFigure">GitHub</a>
</div>
<!-- container -->
<div id="container" class="container"></div>
<!-- scripts -->
<script src="html/js/waterfall.js"></script>
<script src="html/data/data.js"></script>
<script>
waterFall.init(data);
</script>
</body>
</html> | xinntao/HandyFigure | 187 | HandyFigure provides the sources file (ususally PPT files) for paper figures | JavaScript | xinntao | Xintao | Tencent |
process_data.py | Python | import yaml
with open('figures/database.yml', mode='r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)['figures']
# generate .js file for html
file_js = open('html/data/data.js', mode='w')
file_js.write('var data = [\n')
for entry in data:
title = entry['title']
url_img = entry['url_img']
url_paper = '#' if entry['url_paper'] is None else entry['url_paper']
url_src = entry['url_src']
url_project = '#' if entry['url_project'] is None else entry['url_project']
data_string = '{\n'\
f' "title": "{title}",\n'\
f' "url_img": "{url_img}",\n'\
f' "url_paper": "{url_paper}",\n'\
f' "url_src": "{url_src}",\n'\
f' "url_project": "{url_project}",\n'\
'},\n'
file_js.write(data_string)
file_js.write('];\n// repeat data for testing (uncomment the following line when depolying)\n'
'//while (data.length < 100) data = data.concat(data);\n')
file_js.close()
# update .md file for README.md
with open('README.md', mode='rb') as f:
ori_md = f.readlines()
ori_md_part1 = []
ori_md_part2 = []
flag_part1 = True
flag_part2 = False
for line in ori_md:
if flag_part1 is True:
ori_md_part1.append(line)
if line == b'<!-- TBR -->\r\n' or line == b'<!-- TBR -->\n':
if flag_part1 is True:
flag_part1 = False
else:
flag_part2 = True
if flag_part2 is True:
ori_md_part2.append(line)
file_md = open('README.md', mode='wb')
# part1
for line in ori_md_part1:
file_md.write(line)
file_md.write(b'| Figure | Name | PPT Source| Paper |\n')
file_md.write(
b'|:------------------:|:-------------------------:|:-------------------------:|:-------------------------:|\n')
for entry in data:
title = entry['title']
url_img = entry['url_img']
url_paper = entry['url_paper']
url_src = entry['url_src']
url_project = '-' if entry['url_project'] is None else entry['url_project']
if url_paper is None:
data_string = f'|  | {title} | <{url_src}> | -|\n'
else:
data_string = f'|  | {title} | <{url_src}> | [Paper]({url_paper})|\n'
file_md.write(str.encode(data_string))
# part2
for line in ori_md_part2:
file_md.write(line)
file_md.close()
| xinntao/HandyFigure | 187 | HandyFigure provides the sources file (ususally PPT files) for paper figures | JavaScript | xinntao | Xintao | Tencent |
handyinfer/__init__.py | Python | # flake8: noqa
from .depth_estimation import *
from .face_alignment import *
from .saliency_detection import *
from .utils import *
from .visualization import *
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/depth_estimation/DPT_BEiT_L_384_arch.py | Python | import numpy as np
# from timm.models.layers import get_act_layer
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
import types
from timm.models.beit import gen_relative_position_index
from torch.utils.checkpoint import checkpoint
from typing import Optional
class Interpolate(nn.Module):
"""Interpolation module.
"""
def __init__(self, scale_factor, mode, align_corners=False):
"""Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
"""
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners)
return x
class ResidualConvUnit_custom(nn.Module):
"""Residual convolution module.
"""
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups = 1
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
if self.bn:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x)
# return out + x
class FeatureFusionBlock_custom(nn.Module):
"""Feature fusion block.
"""
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock_custom, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups = 1
self.expand = expand
out_features = features
if self.expand:
out_features = features // 2
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
self.size = size
def forward(self, *xs, size=None):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
# output += res
output = self.resConfUnit2(output)
if (size is None) and (self.size is None):
modifier = {'scale_factor': 2}
elif size is None:
modifier = {'size': self.size}
else:
modifier = {'size': size}
output = nn.functional.interpolate(output, **modifier, mode='bilinear', align_corners=self.align_corners)
output = self.out_conv(output)
return output
def _make_fusion_block(features, use_bn, size=None):
return FeatureFusionBlock_custom(
features,
nn.ReLU(False),
deconv=False,
bn=use_bn,
expand=False,
align_corners=True,
size=size,
)
class ProjectReadout(nn.Module):
def __init__(self, in_features, start_index=1):
super(ProjectReadout, self).__init__()
self.start_index = start_index
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
def forward(self, x):
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:])
features = torch.cat((x[:, self.start_index:], readout), -1)
return self.project(features)
class Transpose(nn.Module):
def __init__(self, dim0, dim1):
super(Transpose, self).__init__()
self.dim0 = dim0
self.dim1 = dim1
def forward(self, x):
x = x.transpose(self.dim0, self.dim1)
return x
activations = {}
def get_activation(name):
def hook(model, input, output):
activations[name] = output
return hook
class Slice(nn.Module):
def __init__(self, start_index=1):
super(Slice, self).__init__()
self.start_index = start_index
def forward(self, x):
return x[:, self.start_index:]
class AddReadout(nn.Module):
def __init__(self, start_index=1):
super(AddReadout, self).__init__()
self.start_index = start_index
def forward(self, x):
if self.start_index == 2:
readout = (x[:, 0] + x[:, 1]) / 2
else:
readout = x[:, 0]
return x[:, self.start_index:] + readout.unsqueeze(1)
def get_readout_oper(vit_features, features, use_readout, start_index=1):
if use_readout == 'ignore':
readout_oper = [Slice(start_index)] * len(features)
elif use_readout == 'add':
readout_oper = [AddReadout(start_index)] * len(features)
elif use_readout == 'project':
readout_oper = [ProjectReadout(vit_features, start_index) for out_feat in features]
else:
assert (False), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
return readout_oper
def make_backbone_default(
model,
features=[96, 192, 384, 768],
size=[384, 384],
hooks=[2, 5, 8, 11],
vit_features=768,
use_readout='ignore',
start_index=1,
start_index_readout=1,
):
pretrained = nn.Module()
pretrained.model = model
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation('1'))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation('2'))
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation('3'))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation('4'))
pretrained.activations = activations
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index_readout)
# 32, 48, 136, 384
pretrained.act_postprocess1 = nn.Sequential(
readout_oper[0],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[0],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[0],
out_channels=features[0],
kernel_size=4,
stride=4,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess2 = nn.Sequential(
readout_oper[1],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[1],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[1],
out_channels=features[1],
kernel_size=2,
stride=2,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess3 = nn.Sequential(
readout_oper[2],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[2],
kernel_size=1,
stride=1,
padding=0,
),
)
pretrained.act_postprocess4 = nn.Sequential(
readout_oper[3],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[3],
kernel_size=1,
stride=1,
padding=0,
),
nn.Conv2d(
in_channels=features[3],
out_channels=features[3],
kernel_size=3,
stride=2,
padding=1,
),
)
pretrained.model.start_index = start_index
pretrained.model.patch_size = [16, 16]
return pretrained
def _get_rel_pos_bias(self, window_size):
"""
Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
"""
old_height = 2 * self.window_size[0] - 1
old_width = 2 * self.window_size[1] - 1
new_height = 2 * window_size[0] - 1
new_width = 2 * window_size[1] - 1
old_relative_position_bias_table = self.relative_position_bias_table
old_num_relative_distance = self.num_relative_distance
new_num_relative_distance = new_height * new_width + 3
old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3]
old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
new_sub_table = F.interpolate(old_sub_table, size=(new_height, new_width), mode='bilinear')
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
new_relative_position_bias_table = torch.cat(
[new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]])
key = str(window_size[1]) + ',' + str(window_size[0])
if key not in self.relative_position_indices.keys():
self.relative_position_indices[key] = gen_relative_position_index(window_size)
relative_position_bias = new_relative_position_bias_table[self.relative_position_indices[key].view(-1)].view(
window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0)
def forward_adapted_unflatten(pretrained, x, function_name='forward_features'):
b, c, h, w = x.shape
exec(f'glob = pretrained.model.{function_name}(x)')
layer_1 = pretrained.activations['1']
layer_2 = pretrained.activations['2']
layer_3 = pretrained.activations['3']
layer_4 = pretrained.activations['4']
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
unflatten = nn.Sequential(
nn.Unflatten(
2,
torch.Size([
h // pretrained.model.patch_size[1],
w // pretrained.model.patch_size[0],
]),
))
if layer_1.ndim == 3:
layer_1 = unflatten(layer_1)
if layer_2.ndim == 3:
layer_2 = unflatten(layer_2)
if layer_3.ndim == 3:
layer_3 = unflatten(layer_3)
if layer_4.ndim == 3:
layer_4 = unflatten(layer_4)
layer_1 = pretrained.act_postprocess1[3:len(pretrained.act_postprocess1)](layer_1)
layer_2 = pretrained.act_postprocess2[3:len(pretrained.act_postprocess2)](layer_2)
layer_3 = pretrained.act_postprocess3[3:len(pretrained.act_postprocess3)](layer_3)
layer_4 = pretrained.act_postprocess4[3:len(pretrained.act_postprocess4)](layer_4)
return layer_1, layer_2, layer_3, layer_4
def forward_beit(pretrained, x):
return forward_adapted_unflatten(pretrained, x, 'forward_features')
def attention_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None):
"""
Modification of timm.models.beit.py: Attention.forward to support arbitrary window sizes.
"""
B, N, C = x.shape
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
window_size = tuple(np.array(resolution) // 16)
attn = attn + self._get_rel_pos_bias(window_size)
if shared_rel_pos_bias is not None:
attn = attn + shared_rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
def block_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None):
"""
Modification of timm.models.beit.py: Block.forward to support arbitrary window sizes.
"""
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(
self.gamma_1 * self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
def beit_forward_features(self, x):
"""
Modification of timm.models.beit.py: Beit.forward_features to support arbitrary window sizes.
"""
resolution = x.shape[2:]
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
else:
x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias)
x = self.norm(x)
return x
def _make_beit_backbone(
model,
features=[96, 192, 384, 768],
size=[384, 384],
hooks=[0, 4, 8, 11],
vit_features=768,
use_readout='ignore',
start_index=1,
start_index_readout=1,
):
backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index,
start_index_readout)
backbone.model.patch_embed.forward = types.MethodType(patch_embed_forward, backbone.model.patch_embed)
backbone.model.forward_features = types.MethodType(beit_forward_features, backbone.model)
for block in backbone.model.blocks:
attn = block.attn
attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn)
attn.forward = types.MethodType(attention_forward, attn)
attn.relative_position_indices = {}
block.forward = types.MethodType(block_forward, block)
return backbone
def _make_pretrained_beitl16_384(pretrained, use_readout='ignore', hooks=None):
model = timm.create_model('beit_large_patch16_384', pretrained=pretrained)
hooks = [5, 11, 17, 23] if hooks is None else hooks
return _make_beit_backbone(
model,
features=[256, 512, 1024, 1024],
hooks=hooks,
vit_features=1024,
use_readout=use_readout,
)
def patch_embed_forward(self, x):
"""
Modification of timm.models.layers.patch_embed.py: PatchEmbed.forward to support arbitrary window sizes.
"""
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
scratch = nn.Module()
out_shape1 = out_shape
out_shape2 = out_shape
out_shape3 = out_shape
if len(in_shape) >= 4:
out_shape4 = out_shape
if expand:
out_shape1 = out_shape
out_shape2 = out_shape * 2
out_shape3 = out_shape * 4
if len(in_shape) >= 4:
out_shape4 = out_shape * 8
scratch.layer1_rn = nn.Conv2d(
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
scratch.layer2_rn = nn.Conv2d(
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
scratch.layer3_rn = nn.Conv2d(
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
if len(in_shape) >= 4:
scratch.layer4_rn = nn.Conv2d(
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
return scratch
def _make_encoder(backbone,
features,
use_pretrained,
groups=1,
expand=False,
exportable=True,
hooks=None,
use_vit_only=False,
use_readout='ignore',
in_features=[96, 256, 512, 1024]):
pretrained = _make_pretrained_beitl16_384(use_pretrained, hooks=hooks, use_readout=use_readout)
scratch = _make_scratch([256, 512, 1024, 1024], features, groups=groups, expand=expand) # BEiT_384-L (backbone)
return pretrained, scratch
class DPT(nn.Module):
def __init__(self,
head,
features=256,
backbone='beitl16_384',
readout='project',
channels_last=False,
use_bn=False,
**kwargs):
super(DPT, self).__init__()
self.channels_last = channels_last
# For the Swin, Swin 2, LeViT and Next-ViT Transformers, the hierarchical architectures prevent setting the
# hooks freely. Instead, the hooks have to be chosen according to the ranges specified in the comments.
hooks = {
'beitl16_384': [5, 11, 17, 23],
}[backbone]
in_features = None
# Instantiate backbone and reassemble blocks
self.pretrained, self.scratch = _make_encoder(
backbone,
features,
False, # Set to true of you want to train from scratch, uses ImageNet weights
groups=1,
expand=False,
exportable=False,
hooks=hooks,
use_readout=readout,
in_features=in_features,
)
self.number_layers = len(hooks) if hooks is not None else 4
size_refinenet3 = None
self.scratch.stem_transpose = None
self.forward_transformer = forward_beit
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
self.scratch.refinenet3 = _make_fusion_block(features, use_bn, size_refinenet3)
if self.number_layers >= 4:
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
self.scratch.output_conv = head
def forward(self, x):
if self.channels_last:
x.contiguous(memory_format=torch.channels_last)
layers = self.forward_transformer(self.pretrained, x)
if self.number_layers == 3:
layer_1, layer_2, layer_3 = layers
else:
layer_1, layer_2, layer_3, layer_4 = layers
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
if self.number_layers >= 4:
layer_4_rn = self.scratch.layer4_rn(layer_4)
if self.number_layers == 3:
path_3 = self.scratch.refinenet3(layer_3_rn, size=layer_2_rn.shape[2:])
else:
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
if self.scratch.stem_transpose is not None:
path_1 = self.scratch.stem_transpose(path_1)
out = self.scratch.output_conv(path_1)
return out
class DPTDepthModel(DPT):
def __init__(self, path=None, non_negative=True, **kwargs):
features = kwargs['features'] if 'features' in kwargs else 256
head_features_1 = kwargs['head_features_1'] if 'head_features_1' in kwargs else features
head_features_2 = kwargs['head_features_2'] if 'head_features_2' in kwargs else 32
kwargs.pop('head_features_1', None)
kwargs.pop('head_features_2', None)
head = nn.Sequential(
nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
super().__init__(head, **kwargs)
def forward(self, x):
return super().forward(x).squeeze(dim=1)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/depth_estimation/__init__.py | Python | import torch
from handyinfer.utils import load_file_from_url
from .DPT_BEiT_L_384_arch import DPTDepthModel
from .midas import MidasCore
from .zoedepth_arch import ZoeDepth
__all__ = ['ZoeDepth']
def init_depth_estimation_model(model_name, device='cuda', model_rootpath=None, img_size=[384, 512]):
if model_name == 'ZoeD_N':
# "DPT_BEiT_L_384"
midas = DPTDepthModel(
path=None,
backbone='beitl16_384',
non_negative=True,
)
core = MidasCore(midas, freeze_bn=True, img_size=img_size)
core.set_output_channels('DPT_BEiT_L_384')
model = ZoeDepth(core)
model_url = 'https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_N.pt'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='handyinfer/weights', progress=True, file_name=None, save_dir=model_rootpath)
model.load_state_dict(torch.load(model_path)['model'], strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/depth_estimation/midas.py | Python | # MIT License
# Copyright (c) 2022 Intelligent Systems Lab Org
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# File author: Shariq Farooq Bhat
import numpy as np
import torch
import torch.nn as nn
from torchvision.transforms import Normalize
def denormalize(x):
"""Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input
"""
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
return x * std + mean
def get_activation(name, bank):
def hook(model, input, output):
bank[name] = output
return hook
class Resize(object):
"""Resize sample to given size (width, height).
"""
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method='lower_bound',
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size.
(Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
print('Params passed to Resize transform:')
print('\twidth: ', width)
print('\theight: ', height)
print('\tresize_target: ', resize_target)
print('\tkeep_aspect_ratio: ', keep_aspect_ratio)
print('\tensure_multiple_of: ', ensure_multiple_of)
print('\tresize_method: ', resize_method)
self.__width = width
self.__height = height
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
if max_val is not None and y > max_val:
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
if y < min_val:
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
return y
def get_size(self, width, height):
# determine new height and width
scale_height = self.__height / height
scale_width = self.__width / width
if self.__keep_aspect_ratio:
if self.__resize_method == 'lower_bound':
# scale such that output size is lower bound
if scale_width > scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == 'upper_bound':
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == 'minimal':
# scale as least as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
else:
raise ValueError(f'resize_method {self.__resize_method} not implemented')
if self.__resize_method == 'lower_bound':
new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
elif self.__resize_method == 'upper_bound':
new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
elif self.__resize_method == 'minimal':
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
else:
raise ValueError(f'resize_method {self.__resize_method} not implemented')
return (new_width, new_height)
def __call__(self, x):
width, height = self.get_size(*x.shape[-2:][::-1])
return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True)
class PrepForMidas(object):
def __init__(self, resize_mode='minimal', keep_aspect_ratio=True, img_size=384, do_resize=True):
if isinstance(img_size, int):
img_size = (img_size, img_size)
net_h, net_w = img_size
self.normalization = Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
self.resizer = Resize(
net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32,
resize_method=resize_mode) if do_resize else nn.Identity()
def __call__(self, x):
return self.normalization(self.resizer(x))
class MidasCore(nn.Module):
def __init__(self,
midas,
trainable=False,
fetch_features=True,
layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'),
freeze_bn=False,
keep_aspect_ratio=True,
img_size=384,
**kwargs):
"""Midas Base model used for multi-scale feature extraction.
Args:
midas (torch.nn.Module): Midas model.
trainable (bool, optional): Train midas model. Defaults to False.
fetch_features (bool, optional): Extract multi-scale features. Defaults to True.
layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features,
last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').
freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance.
Defaults to False.
keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.
img_size (int, tuple, optional): Input resolution. Defaults to 384.
"""
super().__init__()
self.core = midas
self.output_channels = None
self.core_out = {}
self.trainable = trainable
self.fetch_features = fetch_features
# midas.scratch.output_conv = nn.Identity()
self.handles = []
# self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']
self.layer_names = layer_names
self.set_trainable(trainable)
self.set_fetch_features(fetch_features)
self.prep = PrepForMidas(
keep_aspect_ratio=keep_aspect_ratio, img_size=img_size, do_resize=kwargs.get('do_resize', True))
if freeze_bn:
self.freeze_bn()
def set_trainable(self, trainable):
self.trainable = trainable
if trainable:
self.unfreeze()
else:
self.freeze()
return self
def set_fetch_features(self, fetch_features):
self.fetch_features = fetch_features
if fetch_features:
if len(self.handles) == 0:
self.attach_hooks(self.core)
else:
self.remove_hooks()
return self
def freeze(self):
for p in self.parameters():
p.requires_grad = False
self.trainable = False
return self
def unfreeze(self):
for p in self.parameters():
p.requires_grad = True
self.trainable = True
return self
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
return self
def forward(self, x, denorm=False, return_rel_depth=False):
with torch.no_grad():
if denorm:
x = denormalize(x)
x = self.prep(x)
# print("Shape after prep: ", x.shape)
with torch.set_grad_enabled(self.trainable):
# print("Input size to Midascore", x.shape)
rel_depth = self.core(x)
# print("Output from midas shape", rel_depth.shape)
if not self.fetch_features:
return rel_depth
out = [self.core_out[k] for k in self.layer_names]
if return_rel_depth:
return rel_depth, out
return out
def get_rel_pos_params(self):
for name, p in self.core.pretrained.named_parameters():
if 'relative_position' in name:
yield p
def get_enc_params_except_rel_pos(self):
for name, p in self.core.pretrained.named_parameters():
if 'relative_position' not in name:
yield p
def freeze_encoder(self, freeze_rel_pos=False):
if freeze_rel_pos:
for p in self.core.pretrained.parameters():
p.requires_grad = False
else:
for p in self.get_enc_params_except_rel_pos():
p.requires_grad = False
return self
def attach_hooks(self, midas):
if len(self.handles) > 0:
self.remove_hooks()
if 'out_conv' in self.layer_names:
self.handles.append(
list(midas.scratch.output_conv.children())[3].register_forward_hook(
get_activation('out_conv', self.core_out)))
if 'r4' in self.layer_names:
self.handles.append(midas.scratch.refinenet4.register_forward_hook(get_activation('r4', self.core_out)))
if 'r3' in self.layer_names:
self.handles.append(midas.scratch.refinenet3.register_forward_hook(get_activation('r3', self.core_out)))
if 'r2' in self.layer_names:
self.handles.append(midas.scratch.refinenet2.register_forward_hook(get_activation('r2', self.core_out)))
if 'r1' in self.layer_names:
self.handles.append(midas.scratch.refinenet1.register_forward_hook(get_activation('r1', self.core_out)))
if 'l4_rn' in self.layer_names:
self.handles.append(midas.scratch.layer4_rn.register_forward_hook(get_activation('l4_rn', self.core_out)))
return self
def remove_hooks(self):
for h in self.handles:
h.remove()
return self
def __del__(self):
self.remove_hooks()
def set_output_channels(self, model_type):
self.output_channels = MIDAS_SETTINGS[model_type]
@staticmethod
def build(midas_model_type='DPT_BEiT_L_384',
train_midas=False,
use_pretrained_midas=True,
fetch_features=False,
freeze_bn=True,
force_keep_ar=False,
force_reload=False,
**kwargs):
if midas_model_type not in MIDAS_SETTINGS:
raise ValueError(f'Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}')
if 'img_size' in kwargs:
kwargs = MidasCore.parse_img_size(kwargs)
img_size = kwargs.pop('img_size', [384, 384])
print('img_size', img_size)
midas = torch.hub.load(
'intel-isl/MiDaS', midas_model_type, pretrained=use_pretrained_midas, force_reload=force_reload)
kwargs.update({'keep_aspect_ratio': force_keep_ar})
midas_core = MidasCore(
midas,
trainable=train_midas,
fetch_features=fetch_features,
freeze_bn=freeze_bn,
img_size=img_size,
**kwargs)
midas_core.set_output_channels(midas_model_type)
return midas_core
@staticmethod
def build_from_config(config):
return MidasCore.build(**config)
@staticmethod
def parse_img_size(config):
assert 'img_size' in config
if isinstance(config['img_size'], str):
assert ',' in config['img_size'], 'img_size should be a string with comma separated img_size=H,W'
config['img_size'] = list(map(int, config['img_size'].split(',')))
assert len(config['img_size']) == 2, 'img_size should be a string with comma separated img_size=H,W'
elif isinstance(config['img_size'], int):
config['img_size'] = [config['img_size'], config['img_size']]
else:
assert isinstance(config['img_size'], list) and len(
config['img_size']) == 2, 'img_size should be a list of H,W'
return config
nchannels2models = {
tuple([256] * 5): [
'DPT_BEiT_L_384', 'DPT_BEiT_L_512', 'DPT_BEiT_B_384', 'DPT_SwinV2_L_384', 'DPT_SwinV2_B_384',
'DPT_SwinV2_T_256', 'DPT_Large', 'DPT_Hybrid'
],
(512, 256, 128, 64, 64): ['MiDaS_small']
}
# Model name to number of output channels
MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items() for m in v}
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/depth_estimation/zoedepth_arch.py | Python | # MIT License
# Copyright (c) 2022 Intelligent Systems Lab Org
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# File author: Shariq Farooq Bhat
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# from zoedepth.models.model_io import load_state_from_resource
def log_binom(n, k, eps=1e-7):
""" log(nCk) using stirling approximation """
n = n + eps
k = k + eps
return n * torch.log(n) - k * torch.log(k) - (n - k) * torch.log(n - k + eps)
class LogBinomial(nn.Module):
def __init__(self, n_classes=256, act=torch.softmax):
"""Compute log binomial distribution for n_classes
Args:
n_classes (int, optional): number of output classes. Defaults to 256.
"""
super().__init__()
self.K = n_classes
self.act = act
self.register_buffer('k_idx', torch.arange(0, n_classes).view(1, -1, 1, 1))
self.register_buffer('K_minus_1', torch.Tensor([self.K - 1]).view(1, -1, 1, 1))
def forward(self, x, t=1., eps=1e-4):
"""Compute log binomial distribution for x
Args:
x (torch.Tensor - NCHW): probabilities
t (float, torch.Tensor - NCHW, optional): Temperature of distribution. Defaults to 1..
eps (float, optional): Small number for numerical stability. Defaults to 1e-4.
Returns:
torch.Tensor -NCHW: log binomial distribution logbinomial(p;t)
"""
if x.ndim == 3:
x = x.unsqueeze(1) # make it nchw
one_minus_x = torch.clamp(1 - x, eps, 1)
x = torch.clamp(x, eps, 1)
y = log_binom(self.K_minus_1, self.k_idx) + self.k_idx * \
torch.log(x) + (self.K - 1 - self.k_idx) * torch.log(one_minus_x)
return self.act(y / t, dim=1)
class ConditionalLogBinomial(nn.Module):
def __init__(self,
in_features,
condition_dim,
n_classes=256,
bottleneck_factor=2,
p_eps=1e-4,
max_temp=50,
min_temp=1e-7,
act=torch.softmax):
"""Conditional Log Binomial distribution
Args:
in_features (int): number of input channels in main feature
condition_dim (int): number of input channels in condition feature
n_classes (int, optional): Number of classes. Defaults to 256.
bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.
p_eps (float, optional): small eps value. Defaults to 1e-4.
max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.
min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.
"""
super().__init__()
self.p_eps = p_eps
self.max_temp = max_temp
self.min_temp = min_temp
self.log_binomial_transform = LogBinomial(n_classes, act=act)
bottleneck = (in_features + condition_dim) // bottleneck_factor
self.mlp = nn.Sequential(
nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0),
nn.GELU(),
# 2 for p linear norm, 2 for t linear norm
nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0),
nn.Softplus())
def forward(self, x, cond):
"""Forward pass
Args:
x (torch.Tensor - NCHW): Main feature
cond (torch.Tensor - NCHW): condition feature
Returns:
torch.Tensor: Output log binomial distribution
"""
pt = self.mlp(torch.concat((x, cond), dim=1))
p, t = pt[:, :2, ...], pt[:, 2:, ...]
p = p + self.p_eps
p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])
t = t + self.p_eps
t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])
t = t.unsqueeze(1)
t = (self.max_temp - self.min_temp) * t + self.min_temp
return self.log_binomial_transform(p, t)
class SeedBinRegressorUnnormed(nn.Module):
def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
"""Bin center regressor network. Bin centers are unbounded
Args:
in_features (int): input channels
n_bins (int, optional): Number of bin centers. Defaults to 16.
mlp_dim (int, optional): Hidden dimension. Defaults to 256.
min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
"""
super().__init__()
self.version = '1_1'
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.ReLU(inplace=True), nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),
nn.Softplus())
def forward(self, x):
"""
Returns tensor of bin_width vectors (centers). One vector b for every pixel
"""
B_centers = self._net(x)
return B_centers, B_centers
class Projector(nn.Module):
def __init__(self, in_features, out_features, mlp_dim=128):
"""Projector MLP
Args:
in_features (int): input channels
out_features (int): output channels
mlp_dim (int, optional): hidden dimension. Defaults to 128.
"""
super().__init__()
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(mlp_dim, out_features, 1, 1, 0),
)
def forward(self, x):
return self._net(x)
@torch.jit.script
def exp_attractor(dx, alpha: float = 300, gamma: int = 2):
"""Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center,
dc = shift in bin centermmary for exp_attractor
Args:
dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha =
greater attraction. Defaults to 300.
gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly
number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
Returns:
torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc
"""
return torch.exp(-alpha * (torch.abs(dx)**gamma)) * (dx)
@torch.jit.script
def inv_attractor(dx, alpha: float = 300, gamma: int = 2):
"""Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center,
dc = shift in bin center
This is the default one according to the accompanying paper.
Args:
dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (float, optional): Proportional Attractor strength. Determines the absolute strength.
Lower alpha = greater attraction. Defaults to 300.
gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly
number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
Returns:
torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc
"""
return dx.div(1 + alpha * dx.pow(gamma))
class AttractorLayerUnnormed(nn.Module):
def __init__(self,
in_features,
n_bins,
n_attractors=16,
mlp_dim=128,
min_depth=1e-3,
max_depth=10,
alpha=300,
gamma=2,
kind='sum',
attractor_type='exp',
memory_efficient=False):
"""
Attractor layer for bin centers. Bin centers are unbounded
"""
super().__init__()
self.n_attractors = n_attractors
self.n_bins = n_bins
self.min_depth = min_depth
self.max_depth = max_depth
self.alpha = alpha
self.gamma = gamma
self.kind = kind
self.attractor_type = attractor_type
self.memory_efficient = memory_efficient
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.ReLU(inplace=True), nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),
nn.Softplus())
def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
"""
Args:
x (torch.Tensor) : feature block; shape - n, c, h, w
b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
Returns:
tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to
keep the API consistent with the normed version
"""
if prev_b_embedding is not None:
if interpolate:
prev_b_embedding = nn.functional.interpolate(
prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
x = x + prev_b_embedding
A = self._net(x)
n, c, h, w = A.shape
b_prev = nn.functional.interpolate(b_prev, (h, w), mode='bilinear', align_corners=True)
b_centers = b_prev
if self.attractor_type == 'exp':
dist = exp_attractor
else:
dist = inv_attractor
if not self.memory_efficient:
func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
# .shape N, nbins, h, w
delta_c = func(dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)
else:
delta_c = torch.zeros_like(b_centers, device=b_centers.device)
for i in range(self.n_attractors):
delta_c += dist(A[:, i, ...].unsqueeze(1) - b_centers) # .shape N, nbins, h, w
if self.kind == 'mean':
delta_c = delta_c / self.n_attractors
b_new_centers = b_centers + delta_c
B_centers = b_new_centers
return b_new_centers, B_centers
def percentile(input, percentiles):
# source code is from https://github.com/aliutkus/torchpercentile
"""
Find the percentiles of a tensor along the first dimension.
"""
input_dtype = input.dtype
input_shape = input.shape
if not isinstance(percentiles, torch.Tensor):
percentiles = torch.tensor(percentiles, dtype=torch.double)
if not isinstance(percentiles, torch.Tensor):
percentiles = torch.tensor(percentiles)
input = input.double()
percentiles = percentiles.to(input.device).double()
input = input.view(input.shape[0], -1)
in_sorted, in_argsort = torch.sort(input, dim=0)
positions = percentiles * (input.shape[0] - 1) / 100
floored = torch.floor(positions)
ceiled = floored + 1
ceiled[ceiled > input.shape[0] - 1] = input.shape[0] - 1
weight_ceiled = positions - floored
weight_floored = 1.0 - weight_ceiled
d0 = in_sorted[floored.long(), :] * weight_floored[:, None]
d1 = in_sorted[ceiled.long(), :] * weight_ceiled[:, None]
result = (d0 + d1).view(-1, *input_shape[1:])
return result.type(input_dtype)
class DepthModel(nn.Module):
def __init__(self):
super().__init__()
self.device = 'cpu'
self.gray_r_map = torch.linspace(1, 0, 256)
self.gray_r_map = self.gray_r_map.unsqueeze(dim=1).unsqueeze(dim=2).unsqueeze(dim=3)
def to(self, device) -> nn.Module:
self.device = device
self.gray_r_map = self.gray_r_map.to(device)
return super().to(device)
def forward(self, x, *args, **kwargs):
raise NotImplementedError
def _infer(self, x: torch.Tensor):
"""
Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
return self(x)['metric_depth']
def _infer_with_pad_aug(self,
x: torch.Tensor,
pad_input: bool = True,
fh: float = 3,
fw: float = 3,
upsampling_mode: str = 'bicubic',
padding_mode='reflect',
**kwargs) -> torch.Tensor:
"""
Inference interface for the model with padding augmentation
Padding augmentation fixes the boundary artifacts in the output depth map.
Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset
which has a black or white border around the image.
This augmentation pads the input image and crops the prediction back to the original size / view.
Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to pad the input or not. Defaults to True.
fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.
fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.
upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.
padding_mode (str, optional): padding mode. Defaults to "reflect".
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
# assert x is nchw and c = 3
assert x.dim() == 4, 'x must be 4 dimensional, got {}'.format(x.dim())
assert x.shape[1] == 3, 'x must have 3 channels, got {}'.format(x.shape[1])
if pad_input:
assert fh > 0 or fw > 0, 'atlease one of fh and fw must be greater than 0'
pad_h = int(np.sqrt(x.shape[2] / 2) * fh)
pad_w = int(np.sqrt(x.shape[3] / 2) * fw)
padding = [pad_w, pad_w]
if pad_h > 0:
padding += [pad_h, pad_h]
x = F.pad(x, padding, mode=padding_mode, **kwargs)
out = self._infer(x)
if out.shape[-2:] != x.shape[-2:]:
out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)
if pad_input:
# crop to the original size, handling the case where pad_h and pad_w is 0
if pad_h > 0:
out = out[:, :, pad_h:-pad_h, :]
if pad_w > 0:
out = out[:, :, :, pad_w:-pad_w]
return out
def infer_with_flip_aug(self, x, pad_input: bool = True, **kwargs) -> torch.Tensor:
"""
Inference interface for the model with horizontal flip augmentation
Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model
with and without horizontal flip.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
# infer with horizontal flip and average
out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)
out = (out + torch.flip(out_flip, dims=[3])) / 2
return out
def infer(self,
x,
pad_input: bool = True,
with_flip_aug: bool = True,
normalize: bool = True,
dtype='float32',
**kwargs) -> torch.Tensor:
"""
Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
if with_flip_aug:
depth = self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)
else:
depth = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
if normalize:
depth = self.to_gray_r(depth, dtype=dtype)
return depth
def to_gray_r(self,
value,
vmin=None,
vmax=None,
invalid_val=-99,
invalid_mask=None,
background_color=128,
dtype='float32'):
"""Converts a depth map to a gray revers image.
Args:
value (torch.Tensor): Input depth map. Shape: (b, 1, H, W).
All singular dimensions are squeezed
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used.
Defaults to None.
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used.
Defaults to None.
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'.
Defaults to -99.
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels.
Defaults to (128, 128, 128).
Returns:
tensor.Tensor, dtype - float32 if dtype == 'float32 or unit8: gray reverse depth map. shape (b, 1, H, W)
"""
# Percentile can only process the first dimension
# self.gray_r_map = self.gray_r_map.to(value.device)
n, c, h, w = value.shape
value = value.reshape(n, c, h * w).permute(2, 0, 1)
if invalid_mask is None:
invalid_mask = value == invalid_val
mask = torch.logical_not(invalid_mask)
# normaliza
vmin_vmax = percentile(value[mask], [2, 85])
vmin = vmin_vmax[0] if vmin is None else vmin
vmax = vmin_vmax[1] if vmax is None else vmax
value[:, vmin == vmax] = value[:, vmin == vmax] * 0.
value[:, vmin != vmax] = (value[:, vmin != vmax] - vmin[vmin != vmax]) / (
vmax[vmin != vmax] - vmin[vmin != vmax])
value[invalid_mask] = torch.nan
diff = torch.abs(self.gray_r_map - value)
min_ids = torch.argmin(diff, dim=0) # [h*w, n, c]
min_ids[invalid_mask] = background_color
min_ids = min_ids.reshape(h, w, n, c).permute(2, 3, 0, 1)
if dtype == 'float32':
min_ids = min_ids.type(value.dtype) / 255.0 # [0,1]
return min_ids
class ZoeDepth(DepthModel):
def __init__(self,
core,
n_bins=64,
bin_centers_type='softplus',
bin_embedding_dim=128,
min_depth=1e-3,
max_depth=10,
n_attractors=[16, 8, 4, 1],
attractor_alpha=1000,
attractor_gamma=2,
attractor_kind='mean',
attractor_type='inv',
min_temp=0.0212,
max_temp=50,
train_midas=False,
midas_lr_factor=10,
encoder_lr_factor=10,
pos_enc_lr_factor=10,
inverse_midas=False,
**kwargs):
"""ZoeDepth model. This is the version of ZoeDepth that has a single metric head
Args:
core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative"
features
n_bins (int, optional): Number of bin centers. Defaults to 64.
bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers.
For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded.
Defaults to "softplus".
bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3.
max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10.
n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for
more details. Defaults to 300.
attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for
more details. Defaults to 2.
attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp"
(Exponential attractor). Defaults to 'exp'.
min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder
and positional encodings. Defaults to 10.
encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model.
Defaults to 10.
pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in
the base midas model. Defaults to 10.
"""
super().__init__()
self.core = core
self.max_depth = max_depth
self.min_depth = min_depth
self.min_temp = min_temp
self.bin_centers_type = bin_centers_type
self.midas_lr_factor = midas_lr_factor
self.encoder_lr_factor = encoder_lr_factor
self.pos_enc_lr_factor = pos_enc_lr_factor
self.train_midas = train_midas
self.inverse_midas = inverse_midas
if self.encoder_lr_factor <= 0:
self.core.freeze_encoder(freeze_rel_pos=self.pos_enc_lr_factor <= 0)
N_MIDAS_OUT = 32
btlnck_features = self.core.output_channels[0]
num_out_features = self.core.output_channels[1:]
self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) # btlnck conv
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayerUnnormed
self.seed_bin_regressor = SeedBinRegressorLayer(
btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
self.projectors = nn.ModuleList([Projector(num_out, bin_embedding_dim) for num_out in num_out_features])
self.attractors = nn.ModuleList([
Attractor(
bin_embedding_dim,
n_bins,
n_attractors=n_attractors[i],
min_depth=min_depth,
max_depth=max_depth,
alpha=attractor_alpha,
gamma=attractor_gamma,
kind=attractor_kind,
attractor_type=attractor_type) for i in range(len(num_out_features))
])
last_in = N_MIDAS_OUT + 1 # +1 for relative depth
# use log binomial instead of softmax
self.conditional_log_binomial = ConditionalLogBinomial(
last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp)
def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
"""
Args:
x (torch.Tensor): Input image tensor of shape (B, C, H, W)
return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False.
denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as
midas normalization is different. Defaults to False.
return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False.
Returns:
dict: Dictionary containing the following keys:
- rel_depth (torch.Tensor): Relative depth map of shape (B, H, W)
- metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W)
- bin_centers (torch.Tensor): Bin centers of shape (B, n_bins).
Present only if return_final_centers is True
- probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W).
Present only if return_probs is True
"""
b, c, h, w = x.shape
# print("input shape ", x.shape)
self.orig_input_width = w
self.orig_input_height = h
rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
# print("output shapes", rel_depth.shape, out.shape)
outconv_activation = out[0]
btlnck = out[1]
x_blocks = out[2:]
x_d0 = self.conv2(btlnck)
x = x_d0
_, seed_b_centers = self.seed_bin_regressor(x)
if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
b_prev = (seed_b_centers - self.min_depth) / \
(self.max_depth - self.min_depth)
else:
b_prev = seed_b_centers
prev_b_embedding = self.seed_projector(x)
# unroll this loop for better performance
for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks):
b_embedding = projector(x)
b, b_centers = attractor(b_embedding, b_prev, prev_b_embedding, interpolate=True)
b_prev = b.clone()
prev_b_embedding = b_embedding.clone()
last = outconv_activation
if self.inverse_midas:
# invert depth followed by normalization
rel_depth = 1.0 / (rel_depth + 1e-6)
rel_depth = (rel_depth - rel_depth.min()) / \
(rel_depth.max() - rel_depth.min())
# concat rel depth with last. First interpolate rel depth to last size
rel_cond = rel_depth.unsqueeze(1)
rel_cond = nn.functional.interpolate(rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True)
last = torch.cat([last, rel_cond], dim=1)
b_embedding = nn.functional.interpolate(b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
x = self.conditional_log_binomial(last, b_embedding)
# Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
# print(x.shape, b_centers.shape)
b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
out = torch.sum(x * b_centers, dim=1, keepdim=True)
# Structure output dict
output = dict(metric_depth=out)
if return_final_centers or return_probs:
output['bin_centers'] = b_centers
if return_probs:
output['probs'] = x
return output
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/face_alignment/__init__.py | Python | import torch
from handyinfer.utils import load_file_from_url
from .awing_arch import FAN
from .convert_98_to_68_landmarks import landmark_98_to_68
__all__ = ['FAN', 'landmark_98_to_68']
def init_face_alignment_model(model_name, half=False, device='cuda', model_rootpath=None):
if model_name == 'awing_fan':
model = FAN(num_modules=4, num_landmarks=98)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/alignment_WFLW_4HG.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='handyinfer/weights', progress=True, file_name=None, save_dir=model_rootpath)
model.load_state_dict(torch.load(model_path)['state_dict'], strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/face_alignment/awing_arch.py | Python | import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def calculate_points(heatmaps):
# change heatmaps to landmarks
B, N, H, W = heatmaps.shape
HW = H * W
BN_range = np.arange(B * N)
heatline = heatmaps.reshape(B, N, HW)
indexes = np.argmax(heatline, axis=2)
preds = np.stack((indexes % W, indexes // W), axis=2)
preds = preds.astype(np.float, copy=False)
inr = indexes.ravel()
heatline = heatline.reshape(B * N, HW)
x_up = heatline[BN_range, inr + 1]
x_down = heatline[BN_range, inr - 1]
# y_up = heatline[BN_range, inr + W]
if any((inr + W) >= 4096):
y_up = heatline[BN_range, 4095]
else:
y_up = heatline[BN_range, inr + W]
if any((inr - W) <= 0):
y_down = heatline[BN_range, 0]
else:
y_down = heatline[BN_range, inr - W]
think_diff = np.sign(np.stack((x_up - x_down, y_up - y_down), axis=1))
think_diff *= .25
preds += think_diff.reshape(B, N, 2)
preds += .5
return preds
class AddCoordsTh(nn.Module):
def __init__(self, x_dim=64, y_dim=64, with_r=False, with_boundary=False):
super(AddCoordsTh, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.with_r = with_r
self.with_boundary = with_boundary
def forward(self, input_tensor, heatmap=None):
"""
input_tensor: (batch, c, x_dim, y_dim)
"""
batch_size_tensor = input_tensor.shape[0]
xx_ones = torch.ones([1, self.y_dim], dtype=torch.int32).cuda()
xx_ones = xx_ones.unsqueeze(-1)
xx_range = torch.arange(self.x_dim, dtype=torch.int32).unsqueeze(0).cuda()
xx_range = xx_range.unsqueeze(1)
xx_channel = torch.matmul(xx_ones.float(), xx_range.float())
xx_channel = xx_channel.unsqueeze(-1)
yy_ones = torch.ones([1, self.x_dim], dtype=torch.int32).cuda()
yy_ones = yy_ones.unsqueeze(1)
yy_range = torch.arange(self.y_dim, dtype=torch.int32).unsqueeze(0).cuda()
yy_range = yy_range.unsqueeze(-1)
yy_channel = torch.matmul(yy_range.float(), yy_ones.float())
yy_channel = yy_channel.unsqueeze(-1)
xx_channel = xx_channel.permute(0, 3, 2, 1)
yy_channel = yy_channel.permute(0, 3, 2, 1)
xx_channel = xx_channel / (self.x_dim - 1)
yy_channel = yy_channel / (self.y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size_tensor, 1, 1, 1)
yy_channel = yy_channel.repeat(batch_size_tensor, 1, 1, 1)
if self.with_boundary and heatmap is not None:
boundary_channel = torch.clamp(heatmap[:, -1:, :, :], 0.0, 1.0)
zero_tensor = torch.zeros_like(xx_channel)
xx_boundary_channel = torch.where(boundary_channel > 0.05, xx_channel, zero_tensor)
yy_boundary_channel = torch.where(boundary_channel > 0.05, yy_channel, zero_tensor)
if self.with_boundary and heatmap is not None:
xx_boundary_channel = xx_boundary_channel.cuda()
yy_boundary_channel = yy_boundary_channel.cuda()
ret = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel, 2) + torch.pow(yy_channel, 2))
rr = rr / torch.max(rr)
ret = torch.cat([ret, rr], dim=1)
if self.with_boundary and heatmap is not None:
ret = torch.cat([ret, xx_boundary_channel, yy_boundary_channel], dim=1)
return ret
class CoordConvTh(nn.Module):
"""CoordConv layer as in the paper."""
def __init__(self, x_dim, y_dim, with_r, with_boundary, in_channels, first_one=False, *args, **kwargs):
super(CoordConvTh, self).__init__()
self.addcoords = AddCoordsTh(x_dim=x_dim, y_dim=y_dim, with_r=with_r, with_boundary=with_boundary)
in_channels += 2
if with_r:
in_channels += 1
if with_boundary and not first_one:
in_channels += 2
self.conv = nn.Conv2d(in_channels=in_channels, *args, **kwargs)
def forward(self, input_tensor, heatmap=None):
ret = self.addcoords(input_tensor, heatmap)
last_channel = ret[:, -2:, :, :]
ret = self.conv(ret)
return ret, last_channel
def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=strd, padding=padding, bias=bias, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
# self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
# self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes):
super(ConvBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, int(out_planes / 2))
self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4), padding=1, dilation=1)
self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4), padding=1, dilation=1)
if in_planes != out_planes:
self.downsample = nn.Sequential(
nn.BatchNorm2d(in_planes),
nn.ReLU(True),
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False),
)
else:
self.downsample = None
def forward(self, x):
residual = x
out1 = self.bn1(x)
out1 = F.relu(out1, True)
out1 = self.conv1(out1)
out2 = self.bn2(out1)
out2 = F.relu(out2, True)
out2 = self.conv2(out2)
out3 = self.bn3(out2)
out3 = F.relu(out3, True)
out3 = self.conv3(out3)
out3 = torch.cat((out1, out2, out3), 1)
if self.downsample is not None:
residual = self.downsample(residual)
out3 += residual
return out3
class HourGlass(nn.Module):
def __init__(self, num_modules, depth, num_features, first_one=False):
super(HourGlass, self).__init__()
self.num_modules = num_modules
self.depth = depth
self.features = num_features
self.coordconv = CoordConvTh(
x_dim=64,
y_dim=64,
with_r=True,
with_boundary=True,
in_channels=256,
first_one=first_one,
out_channels=256,
kernel_size=1,
stride=1,
padding=0)
self._generate_network(self.depth)
def _generate_network(self, level):
self.add_module('b1_' + str(level), ConvBlock(256, 256))
self.add_module('b2_' + str(level), ConvBlock(256, 256))
if level > 1:
self._generate_network(level - 1)
else:
self.add_module('b2_plus_' + str(level), ConvBlock(256, 256))
self.add_module('b3_' + str(level), ConvBlock(256, 256))
def _forward(self, level, inp):
# Upper branch
up1 = inp
up1 = self._modules['b1_' + str(level)](up1)
# Lower branch
low1 = F.avg_pool2d(inp, 2, stride=2)
low1 = self._modules['b2_' + str(level)](low1)
if level > 1:
low2 = self._forward(level - 1, low1)
else:
low2 = low1
low2 = self._modules['b2_plus_' + str(level)](low2)
low3 = low2
low3 = self._modules['b3_' + str(level)](low3)
up2 = F.interpolate(low3, scale_factor=2, mode='nearest')
return up1 + up2
def forward(self, x, heatmap):
x, last_channel = self.coordconv(x, heatmap)
return self._forward(self.depth, x), last_channel
class FAN(nn.Module):
def __init__(self, num_modules=1, end_relu=False, gray_scale=False, num_landmarks=68):
super(FAN, self).__init__()
self.num_modules = num_modules
self.gray_scale = gray_scale
self.end_relu = end_relu
self.num_landmarks = num_landmarks
# Base part
if self.gray_scale:
self.conv1 = CoordConvTh(
x_dim=256,
y_dim=256,
with_r=True,
with_boundary=False,
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3)
else:
self.conv1 = CoordConvTh(
x_dim=256,
y_dim=256,
with_r=True,
with_boundary=False,
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = ConvBlock(64, 128)
self.conv3 = ConvBlock(128, 128)
self.conv4 = ConvBlock(128, 256)
# Stacking part
for hg_module in range(self.num_modules):
if hg_module == 0:
first_one = True
else:
first_one = False
self.add_module('m' + str(hg_module), HourGlass(1, 4, 256, first_one))
self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256))
self.add_module('conv_last' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256))
self.add_module('l' + str(hg_module), nn.Conv2d(256, num_landmarks + 1, kernel_size=1, stride=1, padding=0))
if hg_module < self.num_modules - 1:
self.add_module('bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
self.add_module('al' + str(hg_module),
nn.Conv2d(num_landmarks + 1, 256, kernel_size=1, stride=1, padding=0))
def forward(self, x):
x, _ = self.conv1(x)
x = F.relu(self.bn1(x), True)
# x = F.relu(self.bn1(self.conv1(x)), True)
x = F.avg_pool2d(self.conv2(x), 2, stride=2)
x = self.conv3(x)
x = self.conv4(x)
previous = x
outputs = []
boundary_channels = []
tmp_out = None
for i in range(self.num_modules):
hg, boundary_channel = self._modules['m' + str(i)](previous, tmp_out)
ll = hg
ll = self._modules['top_m_' + str(i)](ll)
ll = F.relu(self._modules['bn_end' + str(i)](self._modules['conv_last' + str(i)](ll)), True)
# Predict heatmaps
tmp_out = self._modules['l' + str(i)](ll)
if self.end_relu:
tmp_out = F.relu(tmp_out) # HACK: Added relu
outputs.append(tmp_out)
boundary_channels.append(boundary_channel)
if i < self.num_modules - 1:
ll = self._modules['bl' + str(i)](ll)
tmp_out_ = self._modules['al' + str(i)](tmp_out)
previous = previous + ll + tmp_out_
return outputs, boundary_channels
def get_landmarks(self, img, device='cuda'):
H, W, _ = img.shape
offset = W / 64, H / 64, 0, 0
img = cv2.resize(img, (256, 256))
inp = img[..., ::-1]
inp = torch.from_numpy(np.ascontiguousarray(inp.transpose((2, 0, 1)))).float()
inp = inp.to(device)
inp.div_(255.0).unsqueeze_(0)
outputs, _ = self.forward(inp)
out = outputs[-1][:, :-1, :, :]
heatmaps = out.detach().cpu().numpy()
pred = calculate_points(heatmaps).reshape(-1, 2)
pred *= offset[:2]
pred += offset[-2:]
return pred
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/face_alignment/convert_98_to_68_landmarks.py | Python | import numpy as np
def load_txt_file(file_path):
"""Load data or string from txt file."""
with open(file_path, 'r') as cfile:
content = cfile.readlines()
cfile.close()
content = [x.strip() for x in content]
num_lines = len(content)
return content, num_lines
def anno_parser(anno_path, num_pts, line_offset=0):
"""Parse the annotation.
Args:
anno_path: path of anno file (suffix .txt)
num_pts: number of landmarks.
line_offset: first point starts, default: 0.
Returns:
pts: num_pts x 2 (x, y)
"""
data, _ = load_txt_file(anno_path)
n_points = num_pts
# read points coordinate.
pts = np.zeros((n_points, 2), dtype='float32')
for point_index in range(n_points):
try:
pts_list = data[point_index + line_offset].split(',')
pts[point_index, 0] = float(pts_list[0])
pts[point_index, 1] = float(pts_list[1])
except ValueError:
print(f'Error in loading points in {anno_path}')
return pts
def landmark_98_to_68(landmark_98):
"""Transfer 98 landmark positions to 68 landmark positions.
Args:
landmark_98(numpy array): Polar coordinates of 98 landmarks, (98, 2)
Returns:
landmark_68(numpy array): Polar coordinates of 98 landmarks, (68, 2)
"""
landmark_68 = np.zeros((68, 2), dtype='float32')
# cheek
for i in range(0, 33):
if i % 2 == 0:
landmark_68[int(i / 2), :] = landmark_98[i, :]
# nose
for i in range(51, 60):
landmark_68[i - 24, :] = landmark_98[i, :]
# mouth
for i in range(76, 96):
landmark_68[i - 28, :] = landmark_98[i, :]
# left eyebrow
landmark_68[17, :] = landmark_98[33, :]
landmark_68[18, :] = (landmark_98[34, :] + landmark_98[41, :]) / 2
landmark_68[19, :] = (landmark_98[35, :] + landmark_98[40, :]) / 2
landmark_68[20, :] = (landmark_98[36, :] + landmark_98[39, :]) / 2
landmark_68[21, :] = (landmark_98[37, :] + landmark_98[38, :]) / 2
# right eyebrow
landmark_68[22, :] = (landmark_98[42, :] + landmark_98[50, :]) / 2
landmark_68[23, :] = (landmark_98[43, :] + landmark_98[49, :]) / 2
landmark_68[24, :] = (landmark_98[44, :] + landmark_98[48, :]) / 2
landmark_68[25, :] = (landmark_98[45, :] + landmark_98[47, :]) / 2
landmark_68[26, :] = landmark_98[46, :]
# left eye
LUT_landmark_68_left_eye = [36, 37, 38, 39, 40, 41]
LUT_landmark_98_left_eye = [60, 61, 63, 64, 65, 67]
for idx, landmark_98_index in enumerate(LUT_landmark_98_left_eye):
landmark_68[LUT_landmark_68_left_eye[idx], :] = landmark_98[landmark_98_index, :]
# right eye
LUT_landmark_68_right_eye = [42, 43, 44, 45, 46, 47]
LUT_landmark_98_right_eye = [68, 69, 71, 72, 73, 75]
for idx, landmark_98_index in enumerate(LUT_landmark_98_right_eye):
landmark_68[LUT_landmark_68_right_eye[idx], :] = landmark_98[landmark_98_index, :]
return landmark_68
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/saliency_detection/__init__.py | Python | import torch
from handyinfer.utils import load_file_from_url
from .inspyrenet_arch import InSPyReNet_SwinB
__all__ = ['InSPyReNet_SwinB']
def init_saliency_detection_model(model_name, half=False, device='cuda', model_rootpath=None):
if model_name == 'inspyrenet':
model = InSPyReNet_SwinB()
model_url = 'https://huggingface.co/Xintao/HandyInfer/resolve/main/models/saliency_detection_InSpyReNet_SwinB.pth' # noqa: E501
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='handyinfer/weights', progress=True, file_name=None, save_dir=model_rootpath)
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
# model = model.to(device)
if device == 'cuda':
model = model.cuda()
return model
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/saliency_detection/inspyrenet_arch.py | Python | import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from handyinfer.utils import img2tensor
from .inspyrenet_modules import SICA, ImagePyramid, PAA_d, PAA_e, Transition
from .swin_transformer import SwinB
class InSPyReNet(nn.Module):
def __init__(self, backbone, in_channels, depth=64, base_size=[384, 384], threshold=512, **kwargs):
super(InSPyReNet, self).__init__()
self.backbone = backbone
self.in_channels = in_channels
self.depth = depth
self.base_size = base_size
self.threshold = threshold
self.context1 = PAA_e(self.in_channels[0], self.depth, base_size=self.base_size, stage=0)
self.context2 = PAA_e(self.in_channels[1], self.depth, base_size=self.base_size, stage=1)
self.context3 = PAA_e(self.in_channels[2], self.depth, base_size=self.base_size, stage=2)
self.context4 = PAA_e(self.in_channels[3], self.depth, base_size=self.base_size, stage=3)
self.context5 = PAA_e(self.in_channels[4], self.depth, base_size=self.base_size, stage=4)
self.decoder = PAA_d(self.depth * 3, depth=self.depth, base_size=base_size, stage=2)
self.attention0 = SICA(self.depth, depth=self.depth, base_size=self.base_size, stage=0, lmap_in=True)
self.attention1 = SICA(self.depth * 2, depth=self.depth, base_size=self.base_size, stage=1, lmap_in=True)
self.attention2 = SICA(self.depth * 2, depth=self.depth, base_size=self.base_size, stage=2)
self.ret = lambda x, target: F.interpolate(x, size=target.shape[-2:], mode='bilinear', align_corners=False)
self.res = lambda x, size: F.interpolate(x, size=size, mode='bilinear', align_corners=False)
self.des = lambda x, size: F.interpolate(x, size=size, mode='nearest')
self.image_pyramid = ImagePyramid(7, 1)
self.transition0 = Transition(17)
self.transition1 = Transition(9)
self.transition2 = Transition(5)
def cuda(self):
self.image_pyramid.cuda()
self.transition0.cuda()
self.transition1.cuda()
self.transition2.cuda()
super(InSPyReNet, self).cuda()
return self
def forward_inspyre(self, x):
B, _, H, W = x.shape
x1, x2, x3, x4, x5 = self.backbone(x)
x1 = self.context1(x1) # 4
x2 = self.context2(x2) # 4
x3 = self.context3(x3) # 8
x4 = self.context4(x4) # 16
x5 = self.context5(x5) # 32
f3, d3 = self.decoder([x3, x4, x5]) # 16
f3 = self.res(f3, (H // 4, W // 4))
f2, p2 = self.attention2(torch.cat([x2, f3], dim=1), d3.detach())
d2 = self.image_pyramid.reconstruct(d3.detach(), p2) # 4
x1 = self.res(x1, (H // 2, W // 2))
f2 = self.res(f2, (H // 2, W // 2))
f1, p1 = self.attention1(torch.cat([x1, f2], dim=1), d2.detach(), p2.detach()) # 2
d1 = self.image_pyramid.reconstruct(d2.detach(), p1) # 2
f1 = self.res(f1, (H, W))
_, p0 = self.attention0(f1, d1.detach(), p1.detach()) # 2
d0 = self.image_pyramid.reconstruct(d1.detach(), p0) # 2
out = dict()
out['saliency'] = [d3, d2, d1, d0]
out['laplacian'] = [p2, p1, p0]
return out
def forward(self, input):
input = inference_helper(input)
img, img_resize = input
B, _, H, W = img.shape
if img is None:
out = self.forward_inspyre(img_resize)
d3, d2, d1, d0 = out['saliency']
else:
# LR Saliency Pyramid
lr_out = self.forward_inspyre(img_resize)
lr_d3, lr_d2, lr_d1, lr_d0 = lr_out['saliency']
# HR Saliency Pyramid
hr_out = self.forward_inspyre(img)
hr_d3, hr_d2, hr_d1, hr_d0 = hr_out['saliency']
hr_p2, hr_p1, hr_p0 = hr_out['laplacian']
# Pyramid Blending
d3 = self.ret(lr_d0, hr_d3)
t2 = self.ret(self.transition2(d3), hr_p2)
p2 = t2 * hr_p2
d2 = self.image_pyramid.reconstruct(d3, p2)
t1 = self.ret(self.transition1(d2), hr_p1)
p1 = t1 * hr_p1
d1 = self.image_pyramid.reconstruct(d2, p1)
t0 = self.ret(self.transition0(d1), hr_p0)
p0 = t0 * hr_p0
d0 = self.image_pyramid.reconstruct(d1, p0)
pred = torch.sigmoid(d0)
pred = (pred - pred.min()) / (pred.max() - pred.min() + 1e-8)
return pred
def inference_helper(input):
threshold = 512
# resize
L = 1280
size = input.shape[0:2]
if (size[0] >= size[1]) and size[1] > L:
size[0] = size[0] / (size[1] / L)
size[1] = L
elif (size[1] > size[0]) and size[0] > L:
size[1] = size[1] / (size[0] / L)
size[0] = L
size_new = (int(round(size[1] / 32)) * 32, int(round(size[0] / 32)) * 32)
if size[0] < threshold or size[1] < threshold:
img = None
else:
img = cv2.resize(input, size_new, cv2.INTER_LINEAR)
img = img2tensor(img, bgr2rgb=True, float32=True).unsqueeze(0).cuda() / 255.
img_resize = cv2.resize(input, (384, 384), cv2.INTER_LINEAR)
img_resize = img2tensor(img_resize, bgr2rgb=True, float32=True).unsqueeze(0).cuda() / 255.
return img, img_resize
def InSPyReNet_SwinB(depth=64, pretrained=False, base_size=[384, 384], threshold=512):
return InSPyReNet(SwinB(pretrained=pretrained), [128, 128, 256, 512, 1024], depth, base_size, threshold)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/saliency_detection/inspyrenet_modules.py | Python | import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from typing import List, Optional
# dilation and erosion functions are copied from
# https://github.com/kornia/kornia/blob/master/kornia/morphology/morphology.py
def _neight2channels_like_kernel(kernel: torch.Tensor) -> torch.Tensor:
h, w = kernel.size()
kernel = torch.eye(h * w, dtype=kernel.dtype, device=kernel.device)
return kernel.view(h * w, 1, h, w)
def dilation(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the dilated image applying the same kernel in each channel.
.. image:: _static/img/dilation.png
The kernel must have 2 dimensions.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat
structuring element.
origin: Origin of the structuring element. Default: ``None`` and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if ``border_type`` is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: convolution is faster and less memory hungry, and unfold is more stable numerically
Returns:
Dilated image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(3, 3)
>>> dilated_img = dilation(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(tensor)}')
if len(tensor.shape) != 4:
raise ValueError(f'Input size must have 4 dimensions. Got {tensor.dim()}')
if not isinstance(kernel, torch.Tensor):
raise TypeError(f'Kernel type is not a torch.Tensor. Got {type(kernel)}')
if len(kernel.shape) != 2:
raise ValueError(f'Kernel size must have 2 dimensions. Got {kernel.dim()}')
# origin
se_h, se_w = kernel.shape
if origin is None:
origin = [se_h // 2, se_w // 2]
# pad
pad_e: List[int] = [origin[1], se_w - origin[1] - 1, origin[0], se_h - origin[0] - 1]
if border_type == 'geodesic':
border_value = -max_val
border_type = 'constant'
output: torch.Tensor = F.pad(tensor, pad_e, mode=border_type, value=border_value)
# computation
if structuring_element is None:
neighborhood = torch.zeros_like(kernel)
neighborhood[kernel == 0] = -max_val
else:
neighborhood = structuring_element.clone()
neighborhood[kernel == 0] = -max_val
if engine == 'unfold':
output = output.unfold(2, se_h, 1).unfold(3, se_w, 1)
output, _ = torch.max(output + neighborhood.flip((0, 1)), 4)
output, _ = torch.max(output, 4)
elif engine == 'convolution':
B, C, H, W = tensor.size()
h_pad, w_pad = output.shape[-2:]
reshape_kernel = _neight2channels_like_kernel(kernel)
output, _ = F.conv2d(
output.view(B * C, 1, h_pad, w_pad), reshape_kernel, padding=0,
bias=neighborhood.view(-1).flip(0)).max(dim=1)
output = output.view(B, C, H, W)
else:
raise NotImplementedError(f"engine {engine} is unknown, use 'convolution' or 'unfold'")
return output.view_as(tensor)
def erosion(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the eroded image applying the same kernel in each channel.
.. image:: _static/img/erosion.png
The kernel must have 2 dimensions.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element (torch.Tensor, optional): Structuring element used for the grayscale dilation.
It may be a non-flat structuring element.
origin: Origin of the structuring element. Default: ``None`` and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if border_type is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: ``convolution`` is faster and less memory hungry, and ``unfold`` is more stable numerically
Returns:
Eroded image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(5, 5)
>>> output = erosion(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(tensor)}')
if len(tensor.shape) != 4:
raise ValueError(f'Input size must have 4 dimensions. Got {tensor.dim()}')
if not isinstance(kernel, torch.Tensor):
raise TypeError(f'Kernel type is not a torch.Tensor. Got {type(kernel)}')
if len(kernel.shape) != 2:
raise ValueError(f'Kernel size must have 2 dimensions. Got {kernel.dim()}')
# origin
se_h, se_w = kernel.shape
if origin is None:
origin = [se_h // 2, se_w // 2]
# pad
pad_e: List[int] = [origin[1], se_w - origin[1] - 1, origin[0], se_h - origin[0] - 1]
if border_type == 'geodesic':
border_value = max_val
border_type = 'constant'
output: torch.Tensor = F.pad(tensor, pad_e, mode=border_type, value=border_value)
# computation
if structuring_element is None:
neighborhood = torch.zeros_like(kernel)
neighborhood[kernel == 0] = -max_val
else:
neighborhood = structuring_element.clone()
neighborhood[kernel == 0] = -max_val
if engine == 'unfold':
output = output.unfold(2, se_h, 1).unfold(3, se_w, 1)
output, _ = torch.min(output - neighborhood, 4)
output, _ = torch.min(output, 4)
elif engine == 'convolution':
B, C, H, W = tensor.size()
Hpad, Wpad = output.shape[-2:]
reshape_kernel = _neight2channels_like_kernel(kernel)
output, _ = F.conv2d(
output.view(B * C, 1, Hpad, Wpad), reshape_kernel, padding=0, bias=-neighborhood.view(-1)).min(dim=1)
output = output.view(B, C, H, W)
else:
raise NotImplementedError(f"engine {engine} is unknown, use 'convolution' or 'unfold'")
return output
class SelfAttention(nn.Module):
def __init__(self, in_channels, mode='hw', stage_size=None):
super(SelfAttention, self).__init__()
self.mode = mode
self.query_conv = Conv2d(in_channels, in_channels // 8, kernel_size=(1, 1))
self.key_conv = Conv2d(in_channels, in_channels // 8, kernel_size=(1, 1))
self.value_conv = Conv2d(in_channels, in_channels, kernel_size=(1, 1))
self.gamma = Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
self.stage_size = stage_size
def forward(self, x):
batch_size, channel, height, width = x.size()
axis = 1
if 'h' in self.mode:
axis *= height
if 'w' in self.mode:
axis *= width
view = (batch_size, -1, axis)
projected_query = self.query_conv(x).view(*view).permute(0, 2, 1)
projected_key = self.key_conv(x).view(*view)
attention_map = torch.bmm(projected_query, projected_key)
attention = self.softmax(attention_map)
projected_value = self.value_conv(x).view(*view)
out = torch.bmm(projected_value, attention.permute(0, 2, 1))
out = out.view(batch_size, channel, height, width)
out = self.gamma * out + x
return out
class Conv2d(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=1,
groups=1,
padding='same',
bias=False,
bn=True,
relu=False):
super(Conv2d, self).__init__()
if '__iter__' not in dir(kernel_size):
kernel_size = (kernel_size, kernel_size)
if '__iter__' not in dir(stride):
stride = (stride, stride)
if '__iter__' not in dir(dilation):
dilation = (dilation, dilation)
if padding == 'same':
width_pad_size = kernel_size[0] + (kernel_size[0] - 1) * (dilation[0] - 1)
height_pad_size = kernel_size[1] + (kernel_size[1] - 1) * (dilation[1] - 1)
elif padding == 'valid':
width_pad_size = 0
height_pad_size = 0
else:
if '__iter__' in dir(padding):
width_pad_size = padding[0] * 2
height_pad_size = padding[1] * 2
else:
width_pad_size = padding * 2
height_pad_size = padding * 2
width_pad_size = width_pad_size // 2 + (width_pad_size % 2 - 1)
height_pad_size = height_pad_size // 2 + (height_pad_size % 2 - 1)
pad_size = (width_pad_size, height_pad_size)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_size, dilation, groups, bias=bias)
self.reset_parameters()
if bn is True:
self.bn = nn.BatchNorm2d(out_channels)
else:
self.bn = None
if relu is True:
self.relu = nn.ReLU(inplace=True)
else:
self.relu = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def reset_parameters(self):
nn.init.kaiming_normal_(self.conv.weight)
class PAA_kernel(nn.Module):
def __init__(self, in_channel, out_channel, receptive_size, stage_size=None):
super(PAA_kernel, self).__init__()
self.conv0 = Conv2d(in_channel, out_channel, 1)
self.conv1 = Conv2d(out_channel, out_channel, kernel_size=(1, receptive_size))
self.conv2 = Conv2d(out_channel, out_channel, kernel_size=(receptive_size, 1))
self.conv3 = Conv2d(out_channel, out_channel, 3, dilation=receptive_size)
self.Hattn = SelfAttention(out_channel, 'h', stage_size[0] if stage_size is not None else None)
self.Wattn = SelfAttention(out_channel, 'w', stage_size[1] if stage_size is not None else None)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
Hx = self.Hattn(x)
Wx = self.Wattn(x)
x = self.conv3(Hx + Wx)
return x
class PAA_e(nn.Module):
def __init__(self, in_channel, out_channel, base_size=None, stage=None):
super(PAA_e, self).__init__()
self.relu = nn.ReLU(True)
if base_size is not None and stage is not None:
self.stage_size = (base_size[0] // (2**stage), base_size[1] // (2**stage))
else:
self.stage_size = None
self.branch0 = Conv2d(in_channel, out_channel, 1)
self.branch1 = PAA_kernel(in_channel, out_channel, 3, self.stage_size)
self.branch2 = PAA_kernel(in_channel, out_channel, 5, self.stage_size)
self.branch3 = PAA_kernel(in_channel, out_channel, 7, self.stage_size)
self.conv_cat = Conv2d(4 * out_channel, out_channel, 3)
self.conv_res = Conv2d(in_channel, out_channel, 1)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
x_cat = self.conv_cat(torch.cat((x0, x1, x2, x3), 1))
x = self.relu(x_cat + self.conv_res(x))
return x
class PAA_d(nn.Module):
def __init__(self, in_channel, out_channel=1, depth=64, base_size=None, stage=None):
super(PAA_d, self).__init__()
self.conv1 = Conv2d(in_channel, depth, 3)
self.conv2 = Conv2d(depth, depth, 3)
self.conv3 = Conv2d(depth, depth, 3)
self.conv4 = Conv2d(depth, depth, 3)
self.conv5 = Conv2d(depth, out_channel, 3, bn=False)
self.base_size = base_size
self.stage = stage
if base_size is not None and stage is not None:
self.stage_size = (base_size[0] // (2**stage), base_size[1] // (2**stage))
else:
self.stage_size = [None, None]
self.Hattn = SelfAttention(depth, 'h', self.stage_size[0])
self.Wattn = SelfAttention(depth, 'w', self.stage_size[1])
self.upsample = lambda img, size: F.interpolate(img, size=size, mode='bilinear', align_corners=True)
def forward(self, fs): # f3 f4 f5 -> f3 f2 f1
fx = fs[0]
for i in range(1, len(fs)):
fs[i] = self.upsample(fs[i], fx.shape[-2:])
fx = torch.cat(fs[::-1], dim=1)
fx = self.conv1(fx)
Hfx = self.Hattn(fx)
Wfx = self.Wattn(fx)
fx = self.conv2(Hfx + Wfx)
fx = self.conv3(fx)
fx = self.conv4(fx)
out = self.conv5(fx)
return fx, out
class SICA(nn.Module):
def __init__(self, in_channel, out_channel=1, depth=64, base_size=None, stage=None, lmap_in=False):
super(SICA, self).__init__()
self.in_channel = in_channel
self.depth = depth
self.lmap_in = lmap_in
if base_size is not None and stage is not None:
self.stage_size = (base_size[0] // (2**stage), base_size[1] // (2**stage))
else:
self.stage_size = None
self.conv_query = nn.Sequential(Conv2d(in_channel, depth, 3, relu=True), Conv2d(depth, depth, 3, relu=True))
self.conv_key = nn.Sequential(Conv2d(in_channel, depth, 1, relu=True), Conv2d(depth, depth, 1, relu=True))
self.conv_value = nn.Sequential(Conv2d(in_channel, depth, 1, relu=True), Conv2d(depth, depth, 1, relu=True))
if self.lmap_in is True:
self.ctx = 5
else:
self.ctx = 3
self.conv_out1 = Conv2d(depth, depth, 3, relu=True)
self.conv_out2 = Conv2d(in_channel + depth, depth, 3, relu=True)
self.conv_out3 = Conv2d(depth, depth, 3, relu=True)
self.conv_out4 = Conv2d(depth, out_channel, 1)
self.threshold = Parameter(torch.tensor([0.5]))
if self.lmap_in is True:
self.lthreshold = Parameter(torch.tensor([0.5]))
def forward(self, x, smap, lmap: Optional[torch.Tensor] = None):
# assert not xor(self.lmap_in is True, lmap is not None)
b, c, h, w = x.shape
# compute class probability
smap = F.interpolate(smap, size=x.shape[-2:], mode='bilinear', align_corners=False)
smap = torch.sigmoid(smap)
p = smap - self.threshold
fg = torch.clip(p, 0, 1) # foreground
bg = torch.clip(-p, 0, 1) # background
cg = self.threshold - torch.abs(p) # confusion area
if self.lmap_in is True and lmap is not None:
lmap = F.interpolate(lmap, size=x.shape[-2:], mode='bilinear', align_corners=False)
lmap = torch.sigmoid(lmap)
lp = lmap - self.lthreshold
fp = torch.clip(lp, 0, 1) # foreground
bp = torch.clip(-lp, 0, 1) # background
prob = [fg, bg, cg, fp, bp]
else:
prob = [fg, bg, cg]
prob = torch.cat(prob, dim=1)
# reshape feature & prob
if self.stage_size is not None:
shape = self.stage_size
shape_mul = self.stage_size[0] * self.stage_size[1]
else:
shape = (h, w)
shape_mul = h * w
f = F.interpolate(x, size=shape, mode='bilinear', align_corners=False).view(b, shape_mul, -1)
prob = F.interpolate(prob, size=shape, mode='bilinear', align_corners=False).view(b, self.ctx, shape_mul)
# compute context vector
context = torch.bmm(prob, f).permute(0, 2, 1).unsqueeze(3) # b, 3, c
# k q v compute
query = self.conv_query(x).view(b, self.depth, -1).permute(0, 2, 1)
key = self.conv_key(context).view(b, self.depth, -1)
value = self.conv_value(context).view(b, self.depth, -1).permute(0, 2, 1)
# compute similarity map
sim = torch.bmm(query, key) # b, hw, c x b, c, 2
sim = (self.depth**-.5) * sim
sim = F.softmax(sim, dim=-1)
# compute refined feature
context = torch.bmm(sim, value).permute(0, 2, 1).contiguous().view(b, -1, h, w)
context = self.conv_out1(context)
x = torch.cat([x, context], dim=1)
x = self.conv_out2(x)
x = self.conv_out3(x)
out = self.conv_out4(x)
return x, out
class ImagePyramid:
def __init__(self, ksize=7, sigma=1, channels=1):
self.ksize = ksize
self.sigma = sigma
self.channels = channels
k = cv2.getGaussianKernel(ksize, sigma)
k = np.outer(k, k)
k = torch.tensor(k).float()
self.kernel = k.repeat(channels, 1, 1, 1)
def cuda(self):
self.kernel = self.kernel.cuda()
return self
def expand(self, x):
z = torch.zeros_like(x)
x = torch.cat([x, z, z, z], dim=1)
x = F.pixel_shuffle(x, 2)
x = F.pad(x, (self.ksize // 2, ) * 4, mode='reflect')
x = F.conv2d(x, self.kernel * 4, groups=self.channels)
return x
def reduce(self, x):
x = F.pad(x, (self.ksize // 2, ) * 4, mode='reflect')
x = F.conv2d(x, self.kernel, groups=self.channels)
x = x[:, :, ::2, ::2]
return x
def deconstruct(self, x):
reduced_x = self.reduce(x)
expanded_reduced_x = self.expand(reduced_x)
if x.shape != expanded_reduced_x.shape:
expanded_reduced_x = F.interpolate(expanded_reduced_x, x.shape[-2:])
laplacian_x = x - expanded_reduced_x
return reduced_x, laplacian_x
def reconstruct(self, x, laplacian_x):
expanded_x = self.expand(x)
if laplacian_x.shape != expanded_x:
laplacian_x = F.interpolate(laplacian_x, expanded_x.shape[-2:], mode='bilinear', align_corners=True)
return expanded_x + laplacian_x
class Transition:
def __init__(self, k=3):
self.kernel = torch.tensor(cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k, k))).float()
def cuda(self):
self.kernel = self.kernel.cuda()
return self
def __call__(self, x):
x = torch.sigmoid(x)
dx = dilation(x, self.kernel)
ex = erosion(x, self.kernel)
return ((dx - ex) > .5).float()
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/saliency_detection/swin_transformer.py | Python | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu, Yutong Lin, Yixuan Wei
# --------------------------------------------------------
import collections.abc
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import warnings
from itertools import repeat
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
low = norm_cdf((a - mean) / std)
up = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [low, up], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * low - 1, 2 * up - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution.
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
# From PyTorch
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim**-0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer('relative_position_index', relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
""" Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self,
dim,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
assert 0 <= self.shift_size < self.window_size, 'shift_size must in 0-window_size'
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.H = None
self.W = None
def forward(self, x, mask_matrix):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
mask_matrix: Attention mask for cyclic shift.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, 'input feature has wrong size'
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x, H, W):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
assert L == H * W, 'input feature has wrong size'
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (int): Local window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
dim,
depth,
num_heads,
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False):
super().__init__()
self.window_size = window_size
self.shift_size = window_size // 2
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer) for i in range(depth)
])
# patch merging layer
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, H, W):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
h_slices = (slice(0, -self.window_size), slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size), slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x_down = self.downsample(x, H, W)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
Args:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class SwinTransformer(nn.Module):
""" Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute position embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
pretrain_img_size=224,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
use_checkpoint=False):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
# absolute position embedding
if self.ape:
pretrain_img_size = to_2tuple(pretrain_img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2**i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)]
self.num_features = num_features
# add a norm layer for each output
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
# logger = get_root_logger()
# load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
x = (x + absolute_pos_embed) # B Wh*Ww C
outs = [x.contiguous()]
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs.append(out)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
def SwinT(pretrained=True):
model = SwinTransformer(embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7)
if pretrained is True:
model.load_state_dict(torch.load('data/backbone_ckpt/swin_tiny_patch4_window7_224.pth')['model'], strict=False)
return model
def SwinS(pretrained=True):
model = SwinTransformer(embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7)
if pretrained is True:
model.load_state_dict(torch.load('data/backbone_ckpt/swin_small_patch4_window7_224.pth')['model'], strict=False)
return model
def SwinB(pretrained=True):
model = SwinTransformer(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12)
if pretrained is True:
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# model.load_state_dict(torch.load('data/backbone_ckpt/swin_base_patch4_window12_384_22kto1k.pth',map_location=torch.device('cpu'))['model'], strict=False) # noqa: E501
model.load_state_dict(
torch.load('data/backbone_ckpt/swin_base_patch4_window12_384_22kto1k.pth', map_location=DEVICE),
strict=False)
return model
def SwinL(pretrained=True):
model = SwinTransformer(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12)
if pretrained is True:
model.load_state_dict(
torch.load('data/backbone_ckpt/swin_large_patch4_window12_384_22kto1k.pth')['model'], strict=False)
return model
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/utils/__init__.py | Python | from .misc import img2tensor, load_file_from_url, scandir, tensor2img_fast
__all__ = ['load_file_from_url', 'img2tensor', 'scandir', 'tensor2img_fast']
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/utils/misc.py | Python | import cv2
import os
import os.path as osp
import torch
from torch.hub import download_url_to_file, get_dir
from urllib.parse import urlparse
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
return cv2.imwrite(file_path, img, params)
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
if img.dtype == 'float64':
img = img.astype('float32')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)):
"""This implementation is slightly faster than tensor2img.
It now only supports torch tensor with shape (1, c, h, w).
Args:
tensor (Tensor): Now only support torch tensor with (1, c, h, w).
rgb2bgr (bool): Whether to change rgb to bgr. Default: True.
min_max (tuple[int]): min and max values for clamp.
"""
output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0)
output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255
output = output.type(torch.uint8).cpu().numpy()
if rgb2bgr:
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def load_file_from_url(url, model_dir=None, progress=True, file_name=None, save_dir=None):
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
"""
if model_dir is None:
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, 'checkpoints')
if save_dir is None:
save_dir = os.path.join(ROOT_DIR, model_dir)
os.makedirs(save_dir, exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if file_name is not None:
filename = file_name
cached_file = os.path.abspath(os.path.join(save_dir, filename))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
return cached_file
def scandir(dir_path, suffix=None, recursive=False, full_path=False):
"""Scan a directory to find the interested files.
Args:
dir_path (str): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
full_path (bool, optional): If set to True, include the dir_path.
Default: False.
Returns:
A generator for all the interested files with relative paths.
"""
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, suffix, recursive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
if full_path:
return_path = entry.path
else:
return_path = osp.relpath(entry.path, root)
if suffix is None:
yield return_path
elif return_path.endswith(suffix):
yield return_path
else:
if recursive:
yield from _scandir(entry.path, suffix=suffix, recursive=recursive)
else:
continue
return _scandir(dir_path, suffix=suffix, recursive=recursive)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.