code stringlengths 17 6.64M |
|---|
def convert_cityscapes_instance_only(data_dir, out_dir):
'Convert from cityscapes format to COCO instance seg format - polygons'
sets = ['gtFine_val']
ann_dirs = ['gtFine_trainvaltest/gtFine/val']
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
for (data_set, ann_dir) in zip(sets, ann_dirs):
print(('Starting %s' % data_set))
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for (root, _, files) in os.walk(ann_dir):
for filename in files:
if filename.endswith((ends_in % data_set.split('_')[0])):
if ((len(images) % 50) == 0):
print(('Processed %s images, %s annotations' % (len(images), len(annotations))))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = (filename[:(- len((ends_in % data_set.split('_')[0])))] + 'leftImg8bit.png')
image['seg_file_name'] = (filename[:(- len((ends_in % data_set.split('_')[0])))] + ('%s_instanceIds.png' % data_set.split('_')[0]))
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons([fullname], verbose=False)[fullname]
for object_cls in objects:
if (object_cls not in category_instancesonly):
continue
for obj in objects[object_cls]:
if (obj['contours'] == []):
print('Warning: empty contours.')
continue
len_p = [len(p) for p in obj['contours']]
if (min(len_p) <= 4):
print('Warning: invalid contours.')
continue
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if (object_cls not in category_dict):
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(segms_util.polys_to_boxes([ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{'id': category_dict[name], 'name': name} for name in category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print(('Num categories: %s' % len(categories)))
print(('Num images: %s' % len(images)))
print(('Num annotations: %s' % len(annotations)))
with open(os.path.join(out_dir, (json_name % data_set)), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert a COCO pre-trained model for use with Cityscapes')
parser.add_argument('--coco_model', dest='coco_model_file_name', help='Pretrained network weights file path', default=None, type=str)
parser.add_argument('--convert_func', dest='convert_func', help='Blob conversion function', default='cityscapes_to_coco', type=str)
parser.add_argument('--output', dest='out_file_name', help='Output file path', default=None, type=str)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
|
def convert_coco_blobs_to_cityscape_blobs(model_dict):
for (k, v) in model_dict['blobs'].items():
if ((v.shape[0] == NUM_COCO_CLS) or (v.shape[0] == (4 * NUM_COCO_CLS))):
coco_blob = model_dict['blobs'][k]
print('Converting COCO blob {} with shape {}'.format(k, coco_blob.shape))
cs_blob = convert_coco_blob_to_cityscapes_blob(coco_blob, args.convert_func)
print(' -> converted shape {}'.format(cs_blob.shape))
model_dict['blobs'][k] = cs_blob
|
def convert_coco_blob_to_cityscapes_blob(coco_blob, convert_func):
coco_shape = coco_blob.shape
leading_factor = int((coco_shape[0] / NUM_COCO_CLS))
tail_shape = list(coco_shape[1:])
assert ((leading_factor == 1) or (leading_factor == 4))
coco_blob = coco_blob.reshape(([NUM_COCO_CLS, (- 1)] + tail_shape))
std = coco_blob.std()
mean = coco_blob.mean()
cs_shape = ([NUM_CS_CLS] + list(coco_blob.shape[1:]))
cs_blob = ((np.random.randn(*cs_shape) * std) + mean).astype(np.float32)
for i in range(NUM_CS_CLS):
coco_cls_id = getattr(cs, convert_func)(i)
if (coco_cls_id >= 0):
cs_blob[i] = coco_blob[coco_cls_id]
cs_shape = ([(NUM_CS_CLS * leading_factor)] + tail_shape)
return cs_blob.reshape(cs_shape)
|
def remove_momentum(model_dict):
for k in model_dict['blobs'].keys():
if k.endswith('_momentum'):
del model_dict['blobs'][k]
|
def load_and_convert_coco_model(args):
with open(args.coco_model_file_name, 'r') as f:
model_dict = pickle.load(f)
remove_momentum(model_dict)
convert_coco_blobs_to_cityscape_blobs(model_dict)
return model_dict
|
def factory(k):
if k.startswith('vg'):
ds_name = k[:k.find('_')]
return {IM_DIR: (_DATA_DIR + '/vg/images/'), ANN_FN: (_DATA_DIR + ('/%s/instances_%s.json' % (ds_name, k)))}
else:
return None
|
def get_coco_dataset():
"A dummy COCO dataset that includes only the 'classes' field."
ds = AttrDict()
classes = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
ds.classes = {i: name for (i, name) in enumerate(classes)}
return ds
|
def evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir, use_matlab=False):
'Evaluate "all" tasks, where "all" includes box detection, instance\n segmentation, and keypoint detection.\n '
all_results = evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=use_matlab)
logger.info('Evaluating bounding boxes is done!')
if cfg.MODEL.MASK_ON:
results = evaluate_masks(dataset, all_boxes, all_segms, output_dir)
all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating segmentations is done!')
if cfg.MODEL.KEYPOINTS_ON:
results = evaluate_keypoints(dataset, all_boxes, all_keyps, output_dir)
all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating keypoints is done!')
return all_results
|
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
'Evaluate bounding box detection.'
logger.info('Evaluating detections')
not_comp = (not cfg.TEST.COMPETITION_MODE)
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_cityscapes_evaluator(dataset):
logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_vg_evaluator(dataset):
logger.warn('Visual Genome bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_voc_evaluator(dataset):
voc_eval = voc_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=use_matlab)
box_results = _voc_eval_to_box_results(voc_eval)
else:
raise NotImplementedError('No evaluator for dataset: {}'.format(dataset.name))
return OrderedDict([(dataset.name, box_results)])
|
def evaluate_masks(dataset, all_boxes, all_segms, output_dir):
'Evaluate instance segmentation.'
logger.info('Evaluating segmentations')
not_comp = (not cfg.TEST.COMPETITION_MODE)
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_masks(dataset, all_boxes, all_segms, output_dir, use_salt=not_comp, cleanup=not_comp)
mask_results = _coco_eval_to_mask_results(coco_eval)
elif _use_cityscapes_evaluator(dataset):
cs_eval = cs_json_dataset_evaluator.evaluate_masks(dataset, all_boxes, all_segms, output_dir, use_salt=not_comp, cleanup=not_comp)
mask_results = _cs_eval_to_mask_results(cs_eval)
else:
raise NotImplementedError('No evaluator for dataset: {}'.format(dataset.name))
return OrderedDict([(dataset.name, mask_results)])
|
def evaluate_keypoints(dataset, all_boxes, all_keyps, output_dir):
'Evaluate human keypoint detection (i.e., 2D pose estimation).'
logger.info('Evaluating detections')
not_comp = (not cfg.TEST.COMPETITION_MODE)
assert dataset.name.startswith('keypoints_coco_'), 'Only COCO keypoints are currently supported'
coco_eval = json_dataset_evaluator.evaluate_keypoints(dataset, all_boxes, all_keyps, output_dir, use_salt=not_comp, cleanup=not_comp)
keypoint_results = _coco_eval_to_keypoint_results(coco_eval)
return OrderedDict([(dataset.name, keypoint_results)])
|
def evaluate_box_proposals(dataset, roidb):
'Evaluate bounding box object proposals.'
res = _empty_box_proposal_results()
areas = {'all': '', 'small': 's', 'medium': 'm', 'large': 'l'}
for limit in [100, 1000]:
for (area, suffix) in areas.items():
stats = json_dataset_evaluator.evaluate_box_proposals(dataset, roidb, area=area, limit=limit)
key = 'AR{}@{:d}'.format(suffix, limit)
res['box_proposal'][key] = stats['ar']
return OrderedDict([(dataset.name, res)])
|
def log_box_proposal_results(results):
'Log bounding box proposal results.'
for dataset in results.keys():
keys = results[dataset]['box_proposal'].keys()
pad = max([len(k) for k in keys])
logger.info(dataset)
for (k, v) in results[dataset]['box_proposal'].items():
logger.info('{}: {:.3f}'.format(k.ljust(pad), v))
|
def log_copy_paste_friendly_results(results):
"Log results in a format that makes it easy to copy-and-paste in a\n spreadsheet. Lines are prefixed with 'copypaste: ' to make grepping easy.\n "
for dataset in results.keys():
logger.info('copypaste: Dataset: {}'.format(dataset))
for (task, metrics) in results[dataset].items():
logger.info('copypaste: Task: {}'.format(task))
metric_names = metrics.keys()
metric_vals = ['{:.4f}'.format(v) for v in metrics.values()]
logger.info(('copypaste: ' + ','.join(metric_names)))
logger.info(('copypaste: ' + ','.join(metric_vals)))
|
def check_expected_results(results, atol=0.005, rtol=0.1):
"Check actual results against expected results stored in\n cfg.EXPECTED_RESULTS. Optionally email if the match exceeds the specified\n tolerance.\n\n Expected results should take the form of a list of expectations, each\n specified by four elements: [dataset, task, metric, expected value]. For\n example: [['coco_2014_minival', 'box_proposal', 'AR@1000', 0.387], ...].\n "
if (len(cfg.EXPECTED_RESULTS) == 0):
return
for (dataset, task, metric, expected_val) in cfg.EXPECTED_RESULTS:
assert (dataset in results), 'Dataset {} not in results'.format(dataset)
assert (task in results[dataset]), 'Task {} not in results'.format(task)
assert (metric in results[dataset][task]), 'Metric {} not in results'.format(metric)
actual_val = results[dataset][task][metric]
err = abs((actual_val - expected_val))
tol = (atol + (rtol * abs(expected_val)))
msg = '{} > {} > {} sanity check (actual vs. expected): {:.3f} vs. {:.3f}, err={:.3f}, tol={:.3f}'.format(dataset, task, metric, actual_val, expected_val, err, tol)
if (err > tol):
msg = ('FAIL: ' + msg)
logger.error(msg)
if (cfg.EXPECTED_RESULTS_EMAIL != ''):
subject = 'Detectron end-to-end test failure'
job_name = (os.environ['DETECTRON_JOB_NAME'] if ('DETECTRON_JOB_NAME' in os.environ) else '<unknown>')
job_id = (os.environ['WORKFLOW_RUN_ID'] if ('WORKFLOW_RUN_ID' in os.environ) else '<unknown>')
body = ['Name:', job_name, 'Run ID:', job_id, 'Failure:', msg, 'Config:', pprint.pformat(cfg), 'Env:', pprint.pformat(dict(os.environ))]
send_email(subject, '\n\n'.join(body), cfg.EXPECTED_RESULTS_EMAIL)
else:
msg = ('PASS: ' + msg)
logger.info(msg)
|
def _use_json_dataset_evaluator(dataset):
'Check if the dataset uses the general json dataset evaluator.'
return (dataset.name.startswith('coco') or cfg.TEST.FORCE_JSON_DATASET_EVAL)
|
def _use_cityscapes_evaluator(dataset):
'Check if the dataset uses the Cityscapes dataset evaluator.'
return (dataset.name.find('cityscapes_') > (- 1))
|
def _use_vg_evaluator(dataset):
'Check if the dataset uses the Cityscapes dataset evaluator.'
return dataset.name.startswith('vg')
|
def _use_voc_evaluator(dataset):
'Check if the dataset uses the PASCAL VOC dataset evaluator.'
return (dataset.name[:4] == 'voc_')
|
def _coco_eval_to_box_results(coco_eval):
res = _empty_box_results()
if (coco_eval is not None):
s = coco_eval.stats
res['box']['AP'] = s[COCO_AP]
res['box']['AP50'] = s[COCO_AP50]
res['box']['AP75'] = s[COCO_AP75]
res['box']['APs'] = s[COCO_APS]
res['box']['APm'] = s[COCO_APM]
res['box']['APl'] = s[COCO_APL]
return res
|
def _coco_eval_to_mask_results(coco_eval):
res = _empty_mask_results()
if (coco_eval is not None):
s = coco_eval.stats
res['mask']['AP'] = s[COCO_AP]
res['mask']['AP50'] = s[COCO_AP50]
res['mask']['AP75'] = s[COCO_AP75]
res['mask']['APs'] = s[COCO_APS]
res['mask']['APm'] = s[COCO_APM]
res['mask']['APl'] = s[COCO_APL]
return res
|
def _coco_eval_to_keypoint_results(coco_eval):
res = _empty_keypoint_results()
if (coco_eval is not None):
s = coco_eval.stats
res['keypoint']['AP'] = s[COCO_AP]
res['keypoint']['AP50'] = s[COCO_AP50]
res['keypoint']['AP75'] = s[COCO_AP75]
res['keypoint']['APm'] = s[COCO_KPS_APM]
res['keypoint']['APl'] = s[COCO_KPS_APL]
return res
|
def _voc_eval_to_box_results(voc_eval):
return _empty_box_results()
|
def _cs_eval_to_mask_results(cs_eval):
return _empty_mask_results()
|
def _empty_box_results():
return OrderedDict({'box': OrderedDict([('AP', (- 1)), ('AP50', (- 1)), ('AP75', (- 1)), ('APs', (- 1)), ('APm', (- 1)), ('APl', (- 1))])})
|
def _empty_mask_results():
return OrderedDict({'mask': OrderedDict([('AP', (- 1)), ('AP50', (- 1)), ('AP75', (- 1)), ('APs', (- 1)), ('APm', (- 1)), ('APl', (- 1))])})
|
def _empty_keypoint_results():
return OrderedDict({'keypoint': OrderedDict([('AP', (- 1)), ('AP50', (- 1)), ('AP75', (- 1)), ('APm', (- 1)), ('APl', (- 1))])})
|
def _empty_box_proposal_results():
return OrderedDict({'box_proposal': OrderedDict([('AR@100', (- 1)), ('ARs@100', (- 1)), ('ARm@100', (- 1)), ('ARl@100', (- 1)), ('AR@1000', (- 1)), ('ARs@1000', (- 1)), ('ARm@1000', (- 1)), ('ARl@1000', (- 1))])})
|
def clean_string(string):
predicate = sentence_preprocess(string)
if (predicate in rel_alias_dict):
predicate = rel_alias_dict[predicate]
return predicate
|
def sentence_preprocess(phrase):
' preprocess a sentence: lowercase, clean up weird chars, remove punctuation '
replacements = {'½': 'half', '—': '-', '™': '', '¢': 'cent', 'ç': 'c', 'û': 'u', 'é': 'e', '°': ' degree', 'è': 'e', '…': ''}
phrase = phrase.encode('utf-8')
phrase = phrase.lstrip(' ').rstrip(' ')
for (k, v) in replacements.items():
phrase = phrase.replace(k, v)
return str(phrase).lower().translate(None, string.punctuation).decode('utf-8', 'ignore')
|
def preprocess_predicates(data, alias_dict={}):
for img in data:
for relation in img['relationships']:
predicate = sentence_preprocess(relation['predicate'])
if (predicate in alias_dict):
predicate = alias_dict[predicate]
relation['predicate'] = predicate
|
def make_alias_dict(dict_file):
'create an alias dictionary from a file'
out_dict = {}
vocab = []
for line in open(dict_file, 'r'):
alias = line.strip('\n').strip('\r').split(',')
alias_target = (alias[0] if (alias[0] not in out_dict) else out_dict[alias[0]])
for a in alias:
out_dict[a] = alias_target
vocab.append(alias_target)
return (out_dict, vocab)
|
def clean_relations(string):
string = clean_string(string)
if (len(string) > 0):
return [string]
else:
return []
|
def get_synset_embedding(synset, word_vectors, get_vector):
class_name = wn.synset(synset).lemma_names()
class_name = ', '.join([_.replace('_', ' ') for _ in class_name])
class_name = class_name.lower()
feat = np.zeros(feat_len)
options = class_name.split(',')
cnt_word = 0
for j in range(len(options)):
now_feat = get_embedding(options[j].strip(), word_vectors, get_vector)
if (np.abs(now_feat.sum()) > 0):
cnt_word += 1
feat += now_feat
if (cnt_word > 0):
feat = (feat / cnt_word)
if (np.abs(feat.sum()) == 0):
return None
else:
return feat
|
def get_embedding(entity_str, word_vectors, get_vector):
try:
feat = get_vector(word_vectors, entity_str)
return feat
except:
feat = np.zeros(feat_len)
str_set = list(filter(None, re.split('[ \\-_]+', entity_str)))
cnt_word = 0
for i in range(len(str_set)):
temp_str = str_set[i]
try:
now_feat = get_vector(word_vectors, temp_str)
feat = (feat + now_feat)
cnt_word = (cnt_word + 1)
except:
continue
if (cnt_word > 0):
feat = (feat / cnt_word)
return feat
|
def get_vector(word_vectors, word):
if (word in word_vectors.stoi):
return word_vectors[word].numpy()
else:
raise NotImplementedError
|
def filter_annotations(ds, func):
ds = copy.deepcopy(ds)
ds.update({'annotations': func(ds['annotations'])})
return ds
|
def clean_string(string):
string = string.lower().strip()
if ((len(string) >= 1) and (string[(- 1)] == '.')):
return string[:(- 1)].strip()
return string
|
def clean_relations(string):
string = clean_string(string)
if (len(string) > 0):
return [string]
else:
return []
|
def get_synset_embedding(synset, word_vectors, get_vector):
class_name = wn.synset(synset).lemma_names()
class_name = ', '.join([_.replace('_', ' ') for _ in class_name])
class_name = class_name.lower()
feat = np.zeros(feat_len)
options = class_name.split(',')
cnt_word = 0
for j in range(len(options)):
now_feat = get_embedding(options[j].strip(), word_vectors, get_vector)
if (np.abs(now_feat.sum()) > 0):
cnt_word += 1
feat += now_feat
if (cnt_word > 0):
feat = (feat / cnt_word)
if (np.abs(feat.sum()) == 0):
return None
else:
return feat
|
def get_embedding(entity_str, word_vectors, get_vector):
try:
feat = get_vector(word_vectors, entity_str)
return feat
except:
feat = np.zeros(feat_len)
str_set = list(filter(None, re.split('[ \\-_]+', entity_str)))
cnt_word = 0
for i in range(len(str_set)):
temp_str = str_set[i]
try:
now_feat = get_vector(word_vectors, temp_str)
feat = (feat + now_feat)
cnt_word = (cnt_word + 1)
except:
continue
if (cnt_word > 0):
feat = (feat / cnt_word)
return feat
|
def get_vector(word_vectors, word):
if (word in word_vectors.stoi):
return word_vectors[word].numpy()
else:
raise NotImplementedError
|
def filter_annotations(ds, func):
ds = copy.deepcopy(ds)
ds.update({'annotations': func(ds['annotations'])})
return ds
|
def evaluate_boxes(json_dataset, all_boxes, output_dir, use_salt=True, cleanup=True, use_matlab=False):
salt = ('_{}'.format(str(uuid.uuid4())) if use_salt else '')
filenames = _write_voc_results_files(json_dataset, all_boxes, salt)
_do_python_eval(json_dataset, salt, output_dir)
if use_matlab:
_do_matlab_eval(json_dataset, salt, output_dir)
if cleanup:
for filename in filenames:
shutil.copy(filename, output_dir)
os.remove(filename)
return None
|
def _write_voc_results_files(json_dataset, all_boxes, salt):
filenames = []
image_set_path = voc_info(json_dataset)['image_set_path']
assert os.path.exists(image_set_path), 'Image set path does not exist: {}'.format(image_set_path)
with open(image_set_path, 'r') as f:
image_index = [x.strip() for x in f.readlines()]
roidb = json_dataset.get_roidb()
for (i, entry) in enumerate(roidb):
index = os.path.splitext(os.path.split(entry['image'])[1])[0]
assert (index == image_index[i])
for (cls_ind, cls) in enumerate(json_dataset.classes):
if (cls == '__background__'):
continue
logger.info('Writing VOC results for: {}'.format(cls))
filename = _get_voc_results_file_template(json_dataset, salt).format(cls)
filenames.append(filename)
assert (len(all_boxes[cls_ind]) == len(image_index))
with open(filename, 'wt') as f:
for (im_ind, index) in enumerate(image_index):
dets = all_boxes[cls_ind][im_ind]
if (type(dets) == list):
assert (len(dets) == 0), 'dets should be numpy.ndarray or empty list'
continue
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(index, dets[(k, (- 1))], (dets[(k, 0)] + 1), (dets[(k, 1)] + 1), (dets[(k, 2)] + 1), (dets[(k, 3)] + 1)))
return filenames
|
def _get_voc_results_file_template(json_dataset, salt):
info = voc_info(json_dataset)
year = info['year']
image_set = info['image_set']
devkit_path = info['devkit_path']
filename = (((('comp4' + salt) + '_det_') + image_set) + '_{:s}.txt')
return os.path.join(devkit_path, 'results', ('VOC' + year), 'Main', filename)
|
def _do_python_eval(json_dataset, salt, output_dir='output'):
info = voc_info(json_dataset)
year = info['year']
anno_path = info['anno_path']
image_set_path = info['image_set_path']
devkit_path = info['devkit_path']
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
use_07_metric = (True if (int(year) < 2010) else False)
logger.info(('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')))
if (not os.path.isdir(output_dir)):
os.mkdir(output_dir)
for (_, cls) in enumerate(json_dataset.classes):
if (cls == '__background__'):
continue
filename = _get_voc_results_file_template(json_dataset, salt).format(cls)
(rec, prec, ap) = voc_eval(filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
logger.info('AP for {} = {:.4f}'.format(cls, ap))
res_file = os.path.join(output_dir, (cls + '_pr.pkl'))
save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
logger.info('~~~~~~~~')
logger.info('Results:')
for ap in aps:
logger.info('{:.3f}'.format(ap))
logger.info('{:.3f}'.format(np.mean(aps)))
logger.info('~~~~~~~~')
logger.info('')
logger.info('----------------------------------------------------------')
logger.info('Results computed with the **unofficial** Python eval code.')
logger.info('Results should be very close to the official MATLAB code.')
logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
logger.info('-- Thanks, The Management')
logger.info('----------------------------------------------------------')
|
def _do_matlab_eval(json_dataset, salt, output_dir='output'):
import subprocess
logger.info('-----------------------------------------------------')
logger.info('Computing results with the official MATLAB eval code.')
logger.info('-----------------------------------------------------')
info = voc_info(json_dataset)
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets', 'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"'.format(info['devkit_path'], ('comp4' + salt), info['image_set'], output_dir)
logger.info('Running:\n{}'.format(cmd))
subprocess.call(cmd, shell=True)
|
def voc_info(json_dataset):
year = json_dataset.name[4:8]
image_set = json_dataset.name[9:]
devkit_path = DATASETS[json_dataset.name][DEVKIT_DIR]
assert os.path.exists(devkit_path), 'Devkit directory {} not found'.format(devkit_path)
anno_path = os.path.join(devkit_path, ('VOC' + year), 'Annotations', '{:s}.xml')
image_set_path = os.path.join(devkit_path, ('VOC' + year), 'ImageSets', 'Main', (image_set + '.txt'))
return dict(year=year, image_set=image_set, devkit_path=devkit_path, anno_path=anno_path, image_set_path=image_set_path)
|
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
(bs, ch, h, w) = ctx.input_shape
grad_input = _C.roi_align_backward(grad_output, rois, spatial_scale, output_size[0], output_size[1], bs, ch, h, w, sampling_ratio)
return (grad_input, None, None, None, None)
|
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += ')'
return tmpstr
|
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
(output, argmax) = _C.roi_pool_forward(input, roi, spatial_scale, output_size[0], output_size[1])
ctx.save_for_backward(input, roi, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, rois, argmax) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
(bs, ch, h, w) = ctx.input_shape
grad_input = _C.roi_pool_backward(grad_output, input, rois, argmax, spatial_scale, output_size[0], output_size[1], bs, ch, h, w)
return (grad_input, None, None, None)
|
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += ')'
return tmpstr
|
class MobileNet_v1_conv12_body(nn.Module):
def __init__(self):
super().__init__()
(self.conv, self.dim_out) = mobilenet_base(V1_CONV_DEFS[:12])
self.conv = nn.Sequential(*self.conv)
self.spatial_scale = (1 / 16)
self._init_modules()
def _init_modules(self):
assert (0 <= cfg.MOBILENET.FREEZE_AT <= 12)
for i in range(cfg.RESNETS.FREEZE_AT):
freeze_params(self.conv[i])
self.apply(freeze_bn)
def train(self, mode=True):
nn.Module.train(self, mode)
for i in range(cfg.MOBILENET.FREEZE_AT):
self.conv[i].eval()
self.apply(freeze_bn)
def forward(self, x):
return self.conv(x)
|
class MobileNet_v2_conv14_body(nn.Module):
def __init__(self):
super().__init__()
(self.conv, self.dim_out) = mobilenet_base(V2_CONV_DEFS[:6])
self.conv = nn.Sequential(*self.conv)
self.spatial_scale = (1 / 16)
self._init_modules()
def _init_modules(self):
assert (0 <= cfg.MOBILENET.FREEZE_AT <= 14)
for i in range(cfg.RESNETS.FREEZE_AT):
freeze_params(self.conv[i])
self.apply(freeze_bn)
def train(self, mode=True):
nn.Module.train(self, mode)
for i in range(cfg.MOBILENET.FREEZE_AT):
self.conv[i].eval()
self.apply(freeze_bn)
def forward(self, x):
return self.conv(x)
|
class MobileNet_roi_conv_head(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.stride_init = (cfg.FAST_RCNN.ROI_XFORM_RESOLUTION // 7)
self.avgpool = nn.AvgPool2d(7)
def _init_modules(self):
self.apply(freeze_bn)
def train(self, mode=True):
nn.Module.train(self, mode)
self.apply(freeze_bn)
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO)
feat = self.conv(x)
x = self.avgpool(feat)
if (cfg.MODEL.SHARE_RES5 and self.training):
return (x, feat)
else:
return x
|
class MobileNet_v1_roi_conv_head(MobileNet_roi_conv_head):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__(dim_in, roi_xform_func, spatial_scale)
tmp_conv_def = V1_CONV_DEFS[12:]
tmp_conv_def[0] = tmp_conv_def[0]._replace(stride=self.stride_init)
(self.conv, self.dim_out) = mobilenet_base(tmp_conv_def, in_channels=dim_in)
self.conv = nn.Sequential(OrderedDict(zip([str(_) for _ in range(12, (12 + len(self.conv)))], self.conv)))
self._init_modules()
|
class MobileNet_v2_roi_conv_head(MobileNet_roi_conv_head):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__(dim_in, roi_xform_func, spatial_scale)
tmp_conv_def = V2_CONV_DEFS[6:]
tmp_conv_def[0] = tmp_conv_def[0]._replace(stride=self.stride_init)
(self.conv, self.dim_out) = mobilenet_base(tmp_conv_def, in_channels=dim_in)
self.conv = nn.Sequential(OrderedDict(zip([str(_) for _ in range(14, (14 + len(self.conv)))], self.conv)))
self._init_modules()
|
def freeze_bn(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
freeze_params(m)
|
class Conv2d_tf(nn.Conv2d):
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
kwargs['padding'] = 0
if (not isinstance(self.stride, Iterable)):
self.stride = (self.stride, self.stride)
if (not isinstance(self.dilation, Iterable)):
self.dilation = (self.dilation, self.dilation)
def forward(self, input):
if (self.padding == 'VALID'):
return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups)
input_rows = input.size(2)
filter_rows = self.weight.size(2)
effective_filter_size_rows = (((filter_rows - 1) * self.dilation[0]) + 1)
out_rows = (((input_rows + self.stride[0]) - 1) // self.stride[0])
padding_rows = max(0, ((((out_rows - 1) * self.stride[0]) + effective_filter_size_rows) - input_rows))
rows_odd = ((padding_rows % 2) != 0)
input_cols = input.size(3)
filter_cols = self.weight.size(3)
effective_filter_size_cols = (((filter_cols - 1) * self.dilation[1]) + 1)
out_cols = (((input_cols + self.stride[1]) - 1) // self.stride[1])
padding_cols = max(0, ((((out_cols - 1) * self.stride[1]) + effective_filter_size_cols) - input_cols))
cols_odd = ((padding_cols % 2) != 0)
if (rows_odd or cols_odd):
input = F.pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, self.weight, self.bias, self.stride, padding=((padding_rows // 2), (padding_cols // 2)), dilation=self.dilation, groups=self.groups)
|
def _make_divisible(v, divisor, min_value=None):
if (min_value is None):
min_value = divisor
new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor))
if (new_v < (0.9 * v)):
new_v += divisor
return new_v
|
def depth_multiplier_v2(depth, multiplier, divisible_by=8, min_depth=8):
d = depth
return _make_divisible((d * multiplier), divisible_by, min_depth)
|
class _conv_bn(nn.Module):
def __init__(self, inp, oup, kernel, stride):
super(_conv_bn, self).__init__()
self.conv = nn.Sequential(Conv2d(inp, oup, kernel, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True))
self.depth = oup
def forward(self, x):
return self.conv(x)
|
class _conv_dw(nn.Module):
def __init__(self, inp, oup, stride):
super(_conv_dw, self).__init__()
self.conv = nn.Sequential(nn.Sequential(Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), nn.BatchNorm2d(inp), nn.ReLU6(inplace=True)), nn.Sequential(Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True)))
self.depth = oup
def forward(self, x):
return self.conv(x)
|
class _inverted_residual_bottleneck(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(_inverted_residual_bottleneck, self).__init__()
self.use_res_connect = ((stride == 1) and (inp == oup))
self.conv = nn.Sequential((nn.Sequential(Conv2d(inp, (inp * expand_ratio), 1, 1, 0, bias=False), nn.BatchNorm2d((inp * expand_ratio)), nn.ReLU6(inplace=True)) if (expand_ratio > 1) else nn.Sequential()), nn.Sequential(Conv2d((inp * expand_ratio), (inp * expand_ratio), 3, stride, 1, groups=(inp * expand_ratio), bias=False), nn.BatchNorm2d((inp * expand_ratio)), nn.ReLU6(inplace=True)), nn.Sequential(Conv2d((inp * expand_ratio), oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)))
self.depth = oup
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
|
def mobilenet_base(conv_defs=V1_CONV_DEFS, depth=(lambda x: x), in_channels=3):
layers = []
for conv_def in conv_defs:
if isinstance(conv_def, Conv):
layers += [_conv_bn(in_channels, depth(conv_def.depth), conv_def.kernel, conv_def.stride)]
in_channels = depth(conv_def.depth)
elif isinstance(conv_def, DepthSepConv):
layers += [_conv_dw(in_channels, depth(conv_def.depth), conv_def.stride)]
in_channels = depth(conv_def.depth)
elif isinstance(conv_def, InvertedResidual):
for n in range(conv_def.num):
stride = (conv_def.stride if (n == 0) else 1)
layers += [_inverted_residual_bottleneck(in_channels, depth(conv_def.depth), stride, conv_def.t)]
in_channels = depth(conv_def.depth)
return (layers, in_channels)
|
class MobileNet(nn.Module):
def __init__(self, version='1', depth_multiplier=1.0, min_depth=8, num_classes=1001, dropout=0.2):
super(MobileNet, self).__init__()
self.dropout = dropout
conv_defs = (V1_CONV_DEFS if (version == '1') else V2_CONV_DEFS)
if (version == '1'):
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
(self.features, out_channels) = mobilenet_base(conv_defs=conv_defs, depth=depth)
else:
depth = (lambda d: depth_multiplier_v2(d, depth_multiplier, min_depth=min_depth))
(self.features, out_channels) = mobilenet_base(conv_defs=conv_defs[:(- 1)], depth=depth)
depth = (lambda d: depth_multiplier_v2(d, max(depth_multiplier, 1.0), min_depth=min_depth))
(tmp, out_channels) = mobilenet_base(conv_defs=conv_defs[(- 1):], in_channels=out_channels, depth=depth)
self.features = (self.features + tmp)
self.features = nn.Sequential(*self.features)
self.classifier = nn.Conv2d(out_channels, num_classes, 1)
for m in self.modules():
if ('BatchNorm' in m.__class__.__name__):
m.eps = 0.001
m.momentum = 0.003
def forward(self, x):
x = self.features(x)
x = x.mean(2, keepdim=True).mean(3, keepdim=True)
x = F.dropout(x, self.dropout, self.training)
x = self.classifier(x)
x = x.squeeze(3).squeeze(2)
return x
|
def ResNet50_conv4_body():
return ResNet_convX_body((3, 4, 6))
|
def ResNet50_conv5_body():
return ResNet_convX_body((3, 4, 6, 3))
|
def ResNet101_conv4_body():
return ResNet_convX_body((3, 4, 23))
|
def ResNet101_conv5_body():
return ResNet_convX_body((3, 4, 23, 3))
|
def ResNet152_conv5_body():
return ResNet_convX_body((3, 8, 36, 3))
|
class ResNet_convX_body(nn.Module):
def __init__(self, block_counts):
super().__init__()
self.block_counts = block_counts
self.convX = (len(block_counts) + 1)
self.num_layers = (((sum(block_counts) + (3 * (self.convX == 4))) * 3) + 2)
self.res1 = globals()[cfg.RESNETS.STEM_FUNC]()
dim_in = 64
dim_bottleneck = (cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP)
(self.res2, dim_in) = add_stage(dim_in, 256, dim_bottleneck, block_counts[0], dilation=1, stride_init=1)
(self.res3, dim_in) = add_stage(dim_in, 512, (dim_bottleneck * 2), block_counts[1], dilation=1, stride_init=2)
(self.res4, dim_in) = add_stage(dim_in, 1024, (dim_bottleneck * 4), block_counts[2], dilation=1, stride_init=2)
if (len(block_counts) == 4):
stride_init = (2 if (cfg.RESNETS.RES5_DILATION == 1) else 1)
(self.res5, dim_in) = add_stage(dim_in, 2048, (dim_bottleneck * 8), block_counts[3], cfg.RESNETS.RES5_DILATION, stride_init)
self.spatial_scale = ((1 / 32) * cfg.RESNETS.RES5_DILATION)
else:
self.spatial_scale = (1 / 16)
self.dim_out = dim_in
self._init_modules()
def _init_modules(self):
assert (cfg.RESNETS.FREEZE_AT in [0, 2, 3, 4, 5])
assert (cfg.RESNETS.FREEZE_AT <= self.convX)
for i in range(1, (cfg.RESNETS.FREEZE_AT + 1)):
freeze_params(getattr(self, ('res%d' % i)))
self.apply((lambda m: (freeze_params(m) if isinstance(m, mynn.AffineChannel2d) else None)))
def detectron_weight_mapping(self):
if cfg.RESNETS.USE_GN:
mapping_to_detectron = {'res1.conv1.weight': 'conv1_w', 'res1.gn1.weight': 'conv1_gn_s', 'res1.gn1.bias': 'conv1_gn_b'}
orphan_in_detectron = ['pred_w', 'pred_b']
else:
mapping_to_detectron = {'res1.conv1.weight': 'conv1_w', 'res1.bn1.weight': 'res_conv1_bn_s', 'res1.bn1.bias': 'res_conv1_bn_b'}
orphan_in_detectron = ['conv1_b', 'fc1000_w', 'fc1000_b']
for res_id in range(2, (self.convX + 1)):
stage_name = ('res%d' % res_id)
(mapping, orphans) = residual_stage_detectron_mapping(getattr(self, stage_name), stage_name, self.block_counts[(res_id - 2)], res_id)
mapping_to_detectron.update(mapping)
orphan_in_detectron.extend(orphans)
return (mapping_to_detectron, orphan_in_detectron)
def train(self, mode=True):
self.training = mode
for i in range((cfg.RESNETS.FREEZE_AT + 1), (self.convX + 1)):
getattr(self, ('res%d' % i)).train(mode)
def forward(self, x):
for i in range(self.convX):
x = getattr(self, ('res%d' % (i + 1)))(x)
return x
|
class ResNet_roi_conv5_head(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
dim_bottleneck = (cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP)
stride_init = (cfg.FAST_RCNN.ROI_XFORM_RESOLUTION // 7)
(self.res5, self.dim_out) = add_stage(dim_in, 2048, (dim_bottleneck * 8), 3, dilation=1, stride_init=stride_init)
self.avgpool = nn.AvgPool2d(7)
self._init_modules()
def _init_modules(self):
self.apply((lambda m: (freeze_params(m) if isinstance(m, mynn.AffineChannel2d) else None)))
def detectron_weight_mapping(self):
(mapping_to_detectron, orphan_in_detectron) = residual_stage_detectron_mapping(self.res5, 'res5', 3, 5)
return (mapping_to_detectron, orphan_in_detectron)
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO)
res5_feat = self.res5(x)
x = self.avgpool(res5_feat)
if (cfg.MODEL.SHARE_RES5 and self.training):
return (x, res5_feat)
else:
return x
|
def add_stage(inplanes, outplanes, innerplanes, nblocks, dilation=1, stride_init=2):
'Make a stage consist of `nblocks` residual blocks.\n Returns:\n - stage module: an nn.Sequentail module of residual blocks\n - final output dimension\n '
res_blocks = []
stride = stride_init
for _ in range(nblocks):
res_blocks.append(add_residual_block(inplanes, outplanes, innerplanes, dilation, stride))
inplanes = outplanes
stride = 1
return (nn.Sequential(*res_blocks), outplanes)
|
def add_residual_block(inplanes, outplanes, innerplanes, dilation, stride):
'Return a residual block module, including residual connection, '
if ((stride != 1) or (inplanes != outplanes)):
shortcut_func = globals()[cfg.RESNETS.SHORTCUT_FUNC]
downsample = shortcut_func(inplanes, outplanes, stride)
else:
downsample = None
trans_func = globals()[cfg.RESNETS.TRANS_FUNC]
res_block = trans_func(inplanes, outplanes, innerplanes, stride, dilation=dilation, group=cfg.RESNETS.NUM_GROUPS, downsample=downsample)
return res_block
|
def basic_bn_shortcut(inplanes, outplanes, stride):
return nn.Sequential(nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False), mynn.AffineChannel2d(outplanes))
|
def basic_gn_shortcut(inplanes, outplanes, stride):
return nn.Sequential(nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False), nn.GroupNorm(net_utils.get_group_gn(outplanes), outplanes, eps=cfg.GROUP_NORM.EPSILON))
|
def basic_bn_stem():
return nn.Sequential(OrderedDict([('conv1', nn.Conv2d(3, 64, 7, stride=2, padding=3, bias=False)), ('bn1', mynn.AffineChannel2d(64)), ('relu', nn.ReLU(inplace=True)), ('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))
|
def basic_gn_stem():
return nn.Sequential(OrderedDict([('conv1', nn.Conv2d(3, 64, 7, stride=2, padding=3, bias=False)), ('gn1', nn.GroupNorm(net_utils.get_group_gn(64), 64, eps=cfg.GROUP_NORM.EPSILON)), ('relu', nn.ReLU(inplace=True)), ('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))
|
class bottleneck_transformation(nn.Module):
' Bottleneck Residual Block '
def __init__(self, inplanes, outplanes, innerplanes, stride=1, dilation=1, group=1, downsample=None):
super().__init__()
(str1x1, str3x3) = ((stride, 1) if cfg.RESNETS.STRIDE_1X1 else (1, stride))
self.stride = stride
self.conv1 = nn.Conv2d(inplanes, innerplanes, kernel_size=1, stride=str1x1, bias=False)
self.bn1 = mynn.AffineChannel2d(innerplanes)
self.conv2 = nn.Conv2d(innerplanes, innerplanes, kernel_size=3, stride=str3x3, bias=False, padding=(1 * dilation), dilation=dilation, groups=group)
self.bn2 = mynn.AffineChannel2d(innerplanes)
self.conv3 = nn.Conv2d(innerplanes, outplanes, kernel_size=1, stride=1, bias=False)
self.bn3 = mynn.AffineChannel2d(outplanes)
self.downsample = downsample
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class bottleneck_gn_transformation(nn.Module):
expansion = 4
def __init__(self, inplanes, outplanes, innerplanes, stride=1, dilation=1, group=1, downsample=None):
super().__init__()
(str1x1, str3x3) = ((stride, 1) if cfg.RESNETS.STRIDE_1X1 else (1, stride))
self.stride = stride
self.conv1 = nn.Conv2d(inplanes, innerplanes, kernel_size=1, stride=str1x1, bias=False)
self.gn1 = nn.GroupNorm(net_utils.get_group_gn(innerplanes), innerplanes, eps=cfg.GROUP_NORM.EPSILON)
self.conv2 = nn.Conv2d(innerplanes, innerplanes, kernel_size=3, stride=str3x3, bias=False, padding=(1 * dilation), dilation=dilation, groups=group)
self.gn2 = nn.GroupNorm(net_utils.get_group_gn(innerplanes), innerplanes, eps=cfg.GROUP_NORM.EPSILON)
self.conv3 = nn.Conv2d(innerplanes, outplanes, kernel_size=1, stride=1, bias=False)
self.gn3 = nn.GroupNorm(net_utils.get_group_gn(outplanes), outplanes, eps=cfg.GROUP_NORM.EPSILON)
self.downsample = downsample
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.gn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.gn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
def residual_stage_detectron_mapping(module_ref, module_name, num_blocks, res_id):
'Construct weight mapping relation for a residual stage with `num_blocks` of\n residual blocks given the stage id: `res_id`\n '
if cfg.RESNETS.USE_GN:
norm_suffix = '_gn'
else:
norm_suffix = '_bn'
mapping_to_detectron = {}
orphan_in_detectron = []
for blk_id in range(num_blocks):
detectron_prefix = ('res%d_%d' % (res_id, blk_id))
my_prefix = ('%s.%d' % (module_name, blk_id))
if getattr(module_ref[blk_id], 'downsample'):
dtt_bp = (detectron_prefix + '_branch1')
mapping_to_detectron[(my_prefix + '.downsample.0.weight')] = (dtt_bp + '_w')
orphan_in_detectron.append((dtt_bp + '_b'))
mapping_to_detectron[(my_prefix + '.downsample.1.weight')] = ((dtt_bp + norm_suffix) + '_s')
mapping_to_detectron[(my_prefix + '.downsample.1.bias')] = ((dtt_bp + norm_suffix) + '_b')
for (i, c) in zip([1, 2, 3], ['a', 'b', 'c']):
dtt_bp = ((detectron_prefix + '_branch2') + c)
mapping_to_detectron[(my_prefix + ('.conv%d.weight' % i))] = (dtt_bp + '_w')
orphan_in_detectron.append((dtt_bp + '_b'))
mapping_to_detectron[(((my_prefix + '.') + norm_suffix[1:]) + ('%d.weight' % i))] = ((dtt_bp + norm_suffix) + '_s')
mapping_to_detectron[(((my_prefix + '.') + norm_suffix[1:]) + ('%d.bias' % i))] = ((dtt_bp + norm_suffix) + '_b')
return (mapping_to_detectron, orphan_in_detectron)
|
def freeze_params(m):
'Freeze all the weights by setting requires_grad to False\n '
for p in m.parameters():
p.requires_grad = False
|
def vgg_detectron_weight_mapping(model):
mapping_to_detectron = {}
for k in model.state_dict():
if ('.weight' in k):
mapping_to_detectron.update({k: k.replace('.weight', '_w')})
if ('.bias' in k):
mapping_to_detectron.update({k: k.replace('.bias', '_b')})
orphan_in_detectron = []
return (mapping_to_detectron, orphan_in_detectron)
|
class VGG16_conv5_body(nn.Module):
def __init__(self):
super().__init__()
cfg = [[64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 'M'], [512, 512, 512, 'M'], [512, 512, 512]]
dim_in = 3
for i in range(len(cfg)):
for j in range(len(cfg[i])):
if (cfg[i][j] == 'M'):
setattr(self, ('pool%d' % (i + 1)), nn.MaxPool2d(kernel_size=2, stride=2))
else:
setattr(self, ('conv%d_%d' % ((i + 1), (j + 1))), nn.Conv2d(dim_in, cfg[i][j], kernel_size=3, padding=1))
setattr(self, ('relu%d_%d' % ((i + 1), (j + 1))), nn.ReLU(inplace=True))
dim_in = cfg[i][j]
self.spatial_scale = (1.0 / 16.0)
self.dim_out = dim_in
self._init_modules()
def _init_modules(self):
for (i, m) in enumerate(self.children()):
if (i < 10):
freeze_params(m)
def detectron_weight_mapping(self):
return vgg_detectron_weight_mapping(self)
def forward(self, x):
for m in self.children():
x = m(x)
return x
|
class VGG16_roi_fc_head(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.fc6 = nn.Linear(((dim_in * 7) * 7), 4096)
self.fc7 = nn.Linear(4096, 4096)
self.dim_out = 4096
def detectron_weight_mapping(self):
return vgg_detectron_weight_mapping(self)
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, spatial_scale=self.spatial_scale, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO)
x = F.relu(self.fc6(x.view(x.size(0), (- 1))), inplace=True)
x = F.relu(self.fc7(x), inplace=True)
return x
|
class SpatialCrossMapLRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True):
super(SpatialCrossMapLRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int(((local_size - 1.0) / 2)), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int(((local_size - 1.0) / 2)))
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
x = x.div(div)
return x
|
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return (output if output else input)
|
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
|
class VGGM_conv5_body(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 96, (7, 7), (2, 2))
self.relu1 = nn.ReLU(True)
self.norm1 = SpatialCrossMapLRN(5, 0.0005, 0.75, 2)
self.pool1 = nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True)
self.conv2 = nn.Conv2d(96, 256, (5, 5), (2, 2), (1, 1))
self.relu2 = nn.ReLU(True)
self.norm2 = SpatialCrossMapLRN(5, 0.0005, 0.75, 2)
self.pool2 = nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True)
self.conv3 = nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1))
self.relu3 = nn.ReLU(True)
self.conv4 = nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1))
self.relu4 = nn.ReLU(True)
self.conv5 = nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1))
self.relu5 = nn.ReLU(True)
self.spatial_scale = (1.0 / 16.0)
self.dim_out = 512
self._init_modules()
def _init_modules(self):
freeze_params(self.conv1)
def detectron_weight_mapping(self):
return vgg_detectron_weight_mapping(self)
def forward(self, x):
for m in self.children():
x = m(x)
return x
|
class VGGM_roi_fc_head(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.fc6 = nn.Linear(((dim_in * 6) * 6), 4096)
self.fc7 = nn.Linear(4096, 4096)
self.dim_out = 4096
def detectron_weight_mapping(self):
return vgg_detectron_weight_mapping(self)
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=6, spatial_scale=self.spatial_scale, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO)
x = F.relu(self.fc6(x.view(x.size(0), (- 1))), inplace=True)
x = F.relu(self.fc7(x), inplace=True)
return x
|
class keypoint_outputs(nn.Module):
'Mask R-CNN keypoint specific outputs: keypoint heatmaps.'
def __init__(self, dim_in):
super().__init__()
self.upsample_heatmap = (cfg.KRCNN.UP_SCALE > 1)
if cfg.KRCNN.USE_DECONV:
self.deconv = nn.ConvTranspose2d(dim_in, cfg.KRCNN.DECONV_DIM, cfg.KRCNN.DECONV_KERNEL, 2, padding=(int((cfg.KRCNN.DECONV_KERNEL / 2)) - 1))
dim_in = cfg.KRCNN.DECONV_DIM
if cfg.KRCNN.USE_DECONV_OUTPUT:
self.classify = nn.ConvTranspose2d(dim_in, cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.DECONV_KERNEL, 2, padding=int(((cfg.KRCNN.DECONV_KERNEL / 2) - 1)))
else:
self.classify = nn.Conv2d(dim_in, cfg.KRCNN.NUM_KEYPOINTS, 1, 1, padding=0)
if self.upsample_heatmap:
self.upsample = mynn.BilinearInterpolation2d(cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.UP_SCALE)
self._init_weights()
def _init_weights(self):
if cfg.KRCNN.USE_DECONV:
init.normal_(self.deconv.weight, std=0.01)
init.constant_(self.deconv.bias, 0)
if (cfg.KRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(self.classify.weight, std=0.001)
elif (cfg.KRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(self.classify.weight)
else:
raise ValueError(cfg.KRCNN.CONV_INIT)
init.constant_(self.classify.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {}
if cfg.KRCNN.USE_DECONV:
detectron_weight_mapping.update({'deconv.weight': 'kps_deconv_w', 'deconv.bias': 'kps_deconv_b'})
if self.upsample_heatmap:
blob_name = 'kps_score_lowres'
detectron_weight_mapping.update({'upsample.upconv.weight': None, 'upsample.upconv.bias': None})
else:
blob_name = 'kps_score'
detectron_weight_mapping.update({'classify.weight': (blob_name + '_w'), 'classify.bias': (blob_name + '_b')})
return (detectron_weight_mapping, [])
def forward(self, x):
if cfg.KRCNN.USE_DECONV:
x = F.relu(self.deconv(x), inplace=True)
x = self.classify(x)
if self.upsample_heatmap:
x = self.upsample(x)
return x
|
def keypoint_losses(kps_pred, keypoint_locations_int32, keypoint_weights, keypoint_loss_normalizer=None):
'Mask R-CNN keypoint specific losses.'
device_id = kps_pred.get_device()
kps_target = Variable(torch.from_numpy(keypoint_locations_int32.astype('int64'))).cuda(device_id)
keypoint_weights = Variable(torch.from_numpy(keypoint_weights)).cuda(device_id)
loss = F.cross_entropy(kps_pred.view((- 1), (cfg.KRCNN.HEATMAP_SIZE ** 2)), kps_target, reduce=False)
loss = (torch.sum((loss * keypoint_weights)) / torch.sum(keypoint_weights))
loss *= cfg.KRCNN.LOSS_WEIGHT
if (not cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS):
loss *= keypoint_loss_normalizer.item()
return loss
|
class roi_pose_head_v1convX(nn.Module):
'Mask R-CNN keypoint head. v1convX design: X * (conv).'
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
hidden_dim = cfg.KRCNN.CONV_HEAD_DIM
kernel_size = cfg.KRCNN.CONV_HEAD_KERNEL
pad_size = (kernel_size // 2)
module_list = []
for _ in range(cfg.KRCNN.NUM_STACKED_CONVS):
module_list.append(nn.Conv2d(dim_in, hidden_dim, kernel_size, 1, pad_size))
module_list.append(nn.ReLU(inplace=True))
dim_in = hidden_dim
self.conv_fcn = nn.Sequential(*module_list)
self.dim_out = hidden_dim
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
if (cfg.KRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(m.weight, std=0.01)
elif (cfg.KRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(m.weight)
else:
ValueError('Unexpected cfg.KRCNN.CONV_INIT: {}'.format(cfg.KRCNN.CONV_INIT))
init.constant_(m.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {}
orphan_in_detectron = []
for i in range(cfg.KRCNN.NUM_STACKED_CONVS):
detectron_weight_mapping[('conv_fcn.%d.weight' % (2 * i))] = ('conv_fcn%d_w' % (i + 1))
detectron_weight_mapping[('conv_fcn.%d.bias' % (2 * i))] = ('conv_fcn%d_b' % (i + 1))
return (detectron_weight_mapping, orphan_in_detectron)
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='keypoint_rois', method=cfg.KRCNN.ROI_XFORM_METHOD, resolution=cfg.KRCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.KRCNN.ROI_XFORM_SAMPLING_RATIO)
x = self.conv_fcn(x)
return x
|
class mask_rcnn_outputs(nn.Module):
'Mask R-CNN specific outputs: either mask logits or probs.'
def __init__(self, dim_in):
super().__init__()
self.dim_in = dim_in
n_classes = (cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1)
if cfg.MRCNN.USE_FC_OUTPUT:
self.classify = nn.Linear(dim_in, (n_classes * (cfg.MRCNN.RESOLUTION ** 2)))
else:
self.classify = nn.Conv2d(dim_in, n_classes, 1, 1, 0)
if (cfg.MRCNN.UPSAMPLE_RATIO > 1):
self.upsample = mynn.BilinearInterpolation2d(n_classes, n_classes, cfg.MRCNN.UPSAMPLE_RATIO)
self._init_weights()
def _init_weights(self):
if ((not cfg.MRCNN.USE_FC_OUTPUT) and cfg.MRCNN.CLS_SPECIFIC_MASK and (cfg.MRCNN.CONV_INIT == 'MSRAFill')):
weight_init_func = mynn.init.MSRAFill
else:
weight_init_func = partial(init.normal_, std=0.001)
weight_init_func(self.classify.weight)
init.constant_(self.classify.bias, 0)
def detectron_weight_mapping(self):
mapping = {'classify.weight': 'mask_fcn_logits_w', 'classify.bias': 'mask_fcn_logits_b'}
if hasattr(self, 'upsample'):
mapping.update({'upsample.upconv.weight': None, 'upsample.upconv.bias': None})
orphan_in_detectron = []
return (mapping, orphan_in_detectron)
def forward(self, x):
x = self.classify(x)
if (cfg.MRCNN.UPSAMPLE_RATIO > 1):
x = self.upsample(x)
if (not self.training):
x = F.sigmoid(x)
return x
|
def mask_rcnn_losses(masks_pred, masks_int32):
'Mask R-CNN specific losses.'
(n_rois, n_classes, _, _) = masks_pred.size()
device_id = masks_pred.get_device()
masks_gt = Variable(torch.from_numpy(masks_int32.astype('float32'))).cuda(device_id)
weight = (masks_gt > (- 1)).float()
loss = F.binary_cross_entropy_with_logits(masks_pred.view(n_rois, (- 1)), masks_gt, weight, size_average=False)
loss /= weight.sum()
return (loss * cfg.MRCNN.WEIGHT_LOSS_MASK)
|
def mask_rcnn_fcn_head_v1up4convs(dim_in, roi_xform_func, spatial_scale):
'v1up design: 4 * (conv 3x3), convT 2x2.'
return mask_rcnn_fcn_head_v1upXconvs(dim_in, roi_xform_func, spatial_scale, 4)
|
def mask_rcnn_fcn_head_v1up4convs_gn(dim_in, roi_xform_func, spatial_scale):
'v1up design: 4 * (conv 3x3), convT 2x2, with GroupNorm'
return mask_rcnn_fcn_head_v1upXconvs_gn(dim_in, roi_xform_func, spatial_scale, 4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.