code stringlengths 17 6.64M |
|---|
def test_nms_device_and_dtypes_cpu():
'\n CommandLine:\n xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_cpu\n '
iou_thr = 0.7
base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9], [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]])
dets = base_dets.astype(np.float32)
(supressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.FloatTensor(base_dets)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3)
dets = base_dets.astype(np.float64)
(supressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.DoubleTensor(base_dets)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3)
|
def test_nms_device_and_dtypes_gpu():
'\n CommandLine:\n xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_gpu\n '
if (not torch.cuda.is_available()):
import pytest
pytest.skip('test requires GPU and torch+cuda')
iou_thr = 0.7
base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9], [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]])
for device_id in range(torch.cuda.device_count()):
print('Run NMS on device_id = {!r}'.format(device_id))
dets = base_dets.astype(np.float32)
(supressed, inds) = nms(dets, iou_thr, device_id)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.FloatTensor(base_dets).to(device_id)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3)
|
def test_random_sampler():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
sampler = RandomSampler(num=10, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_random_sampler_empty_gt():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0).long()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(num=10, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(num=10, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def _context_for_ohem():
try:
from test_forward import _get_detector_cfg
except ImportError:
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from test_forward import _get_detector_cfg
(model, train_cfg, test_cfg) = _get_detector_cfg('faster_rcnn_ohem_r50_fpn_1x.py')
model['pretrained'] = None
model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True
from mmdet.models import build_detector
context = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
return context
|
def test_ohem_sampler():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_random_sample_result():
from mmdet.core.bbox.samplers.sampling_result import SamplingResult
SamplingResult.random(num_gts=0, num_preds=0)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=3, num_preds=3)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=7, num_preds=7)
SamplingResult.random(num_gts=7, num_preds=64)
SamplingResult.random(num_gts=24, num_preds=3)
for i in range(3):
SamplingResult.random(rng=i)
|
def test_soft_nms_device_and_dtypes_cpu():
'\n CommandLine:\n xdoctest -m tests/test_soft_nms.py test_soft_nms_device_and_dtypes_cpu\n '
iou_thr = 0.7
base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9], [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]])
dets = base_dets.astype(np.float32)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
dets = torch.FloatTensor(base_dets)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
dets = base_dets.astype(np.float64)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
dets = torch.DoubleTensor(base_dets)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
|
def test_params_to_string():
npt.assert_equal(params_to_string(1000000000.0), '1000.0 M')
npt.assert_equal(params_to_string(200000.0), '200.0 k')
npt.assert_equal(params_to_string(3e-09), '3e-09')
|
def cal_train_time(log_dicts, args):
for (i, log_dict) in enumerate(log_dicts):
print('{}Analyze train time of {}{}'.format(('-' * 5), args.json_logs[i], ('-' * 5)))
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean((- 1))
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print('slowest epoch {}, average time is {:.4f}'.format((slowest_epoch + 1), epoch_ave_time[slowest_epoch]))
print('fastest epoch {}, average time is {:.4f}'.format((fastest_epoch + 1), epoch_ave_time[fastest_epoch]))
print('time std over epochs is {:.4f}'.format(std_over_epoch))
print('average iter time: {:.4f} s/iter'.format(np.mean(all_times)))
print()
|
def plot_curve(log_dicts, args):
if (args.backend is not None):
plt.switch_backend(args.backend)
sns.set_style(args.style)
legend = args.legend
if (legend is None):
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append('{}_{}'.format(json_log, metric))
assert (len(legend) == (len(args.json_logs) * len(args.keys)))
metrics = args.keys
num_metrics = len(metrics)
for (i, log_dict) in enumerate(log_dicts):
epochs = list(log_dict.keys())
for (j, metric) in enumerate(metrics):
print('plot curve of {}, metric is {}'.format(args.json_logs[i], metric))
if (metric not in log_dict[epochs[0]]):
raise KeyError('{} does not contain metric {}'.format(args.json_logs[i], metric))
if ('mAP' in metric):
xs = np.arange(1, (max(epochs) + 1))
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
ax = plt.gca()
ax.set_xticks(xs)
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[((i * num_metrics) + j)], marker='o')
else:
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][(- 1)]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if (log_dict[epoch]['mode'][(- 1)] == 'val'):
iters = iters[:(- 1)]
xs.append((np.array(iters) + ((epoch - 1) * num_iters_per_epoch)))
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(xs, ys, label=legend[((i * num_metrics) + j)], linewidth=0.5)
plt.legend()
if (args.title is not None):
plt.title(args.title)
if (args.out is None):
plt.show()
else:
print('save curve to: {}'.format(args.out))
plt.savefig(args.out)
plt.cla()
|
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser('plot_curve', help='parser for plotting curves')
parser_plt.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_plt.add_argument('--keys', type=str, nargs='+', default=['bbox_mAP'], help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument('--legend', type=str, nargs='+', default=None, help='legend of each plot')
parser_plt.add_argument('--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument('--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
|
def add_time_parser(subparsers):
parser_time = subparsers.add_parser('cal_train_time', help='parser for computing the average time per training iteration')
parser_time.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_time.add_argument('--include-outliers', action='store_true', help='include the first value of every epoch when computing the average time')
|
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
|
def load_json_logs(json_logs):
log_dicts = [dict() for _ in json_logs]
for (json_log, log_dict) in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
if ('epoch' not in log):
continue
epoch = log.pop('epoch')
if (epoch not in log_dict):
log_dict[epoch] = defaultdict(list)
for (k, v) in log.items():
log_dict[epoch][k].append(v)
return log_dicts
|
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
|
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument('--skip-type', type=str, nargs='+', default=['DefaultFormatBundle', 'Normalize', 'Collect'], help='skip some useless pipeline')
parser.add_argument('--output-dir', default=None, type=str, help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument('--show-interval', type=int, default=999, help='the interval of show (ms)')
args = parser.parse_args()
return args
|
def retrieve_data_cfg(config_path, skip_type):
cfg = Config.fromfile(config_path)
train_data_cfg = cfg.data.train
train_data_cfg['pipeline'] = [x for x in train_data_cfg.pipeline if (x['type'] not in skip_type)]
return cfg
|
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = (os.path.join(args.output_dir, Path(item['filename']).name) if (args.output_dir is not None) else None)
mmcv.imshow_det_bboxes(item['img'], item['gt_bboxes'], (item['gt_labels'] - 1), class_names=dataset.CLASSES, show=(not args.not_show), out_file=filename, wait_time=args.show_interval)
progress_bar.update()
|
def parse_xml(args):
(xml_path, img_path) = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
annotation = {'filename': img_path, 'width': w, 'height': h, 'ann': {'bboxes': bboxes.astype(np.float32), 'labels': labels.astype(np.int64), 'bboxes_ignore': bboxes_ignore.astype(np.float32), 'labels_ignore': labels_ignore.astype(np.int64)}}
return annotation
|
def cvt_annotations(devkit_path, years, split, out_file):
if (not isinstance(years, list)):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
if (not osp.isfile(filelist)):
print('filelist does not exist: {}, skip voc{} {}'.format(filelist, year, split))
return
img_names = mmcv.list_from_file(filelist)
xml_paths = [osp.join(devkit_path, 'VOC{}/Annotations/{}.xml'.format(year, img_name)) for img_name in img_names]
img_paths = ['VOC{}/JPEGImages/{}.jpg'.format(year, img_name) for img_name in img_names]
part_annotations = mmcv.track_progress(parse_xml, list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
mmcv.dump(annotations, out_file)
return annotations
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = (args.out_dir if args.out_dir else devkit_path)
mmcv.mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if (('2007' in years) and ('2012' in years)):
years.append(['2007', '2012'])
if (not years):
raise IOError('The devkit path {} contains neither "VOC2007" nor "VOC2012" subfolder'.format(devkit_path))
for year in years:
if (year == '2007'):
prefix = 'voc07'
elif (year == '2012'):
prefix = 'voc12'
elif (year == ['2007', '2012']):
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = ((prefix + '_') + split)
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, split, osp.join(out_dir, (dataset_name + '.pkl')))
if (not isinstance(year, list)):
dataset_name = (prefix + '_test')
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, 'test', osp.join(out_dir, (dataset_name + '.pkl')))
print('Done!')
|
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
state_dict[(torch_name + '.bias')] = torch.from_numpy(blobs[(caffe_name + '_b')])
state_dict[(torch_name + '.weight')] = torch.from_numpy(blobs[(caffe_name + '_s')])
bn_size = state_dict[(torch_name + '.weight')].size()
state_dict[(torch_name + '.running_mean')] = torch.zeros(bn_size)
state_dict[(torch_name + '.running_var')] = torch.ones(bn_size)
converted_names.add((caffe_name + '_b'))
converted_names.add((caffe_name + '_s'))
|
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name, converted_names):
state_dict[(torch_name + '.weight')] = torch.from_numpy(blobs[(caffe_name + '_w')])
converted_names.add((caffe_name + '_w'))
if ((caffe_name + '_b') in blobs):
state_dict[(torch_name + '.bias')] = torch.from_numpy(blobs[(caffe_name + '_b')])
converted_names.add((caffe_name + '_b'))
|
def convert(src, dst, depth):
'Convert keys in detectron pretrained ResNet models to pytorch style.'
if (depth not in arch_settings):
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
caffe_model = mmcv.load(src, encoding='latin1')
blobs = (caffe_model['blobs'] if ('blobs' in caffe_model) else caffe_model)
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, (len(block_nums) + 1)):
for j in range(block_nums[(i - 1)]):
if (j == 0):
convert_conv_fc(blobs, state_dict, 'res{}_{}_branch1'.format((i + 1), j), 'layer{}.{}.downsample.0'.format(i, j), converted_names)
convert_bn(blobs, state_dict, 'res{}_{}_branch1_bn'.format((i + 1), j), 'layer{}.{}.downsample.1'.format(i, j), converted_names)
for (k, letter) in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict, 'res{}_{}_branch2{}'.format((i + 1), j, letter), 'layer{}.{}.conv{}'.format(i, j, (k + 1)), converted_names)
convert_bn(blobs, state_dict, 'res{}_{}_branch2{}_bn'.format((i + 1), j, letter), 'layer{}.{}.bn{}'.format(i, j, (k + 1)), converted_names)
for key in blobs:
if (key not in converted_names):
print('Not Convert: {}'.format(key))
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
|
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
|
def fuse_conv_bn(conv, bn):
'During inference, the functionary of batch norm layers is turned off but\n only the mean and var alone channels are used, which exposes the chance to\n fuse it with the preceding conv layers to save computations and simplify\n network structures.'
conv_w = conv.weight
conv_b = (conv.bias if (conv.bias is not None) else torch.zeros_like(bn.running_mean))
factor = (bn.weight / torch.sqrt((bn.running_var + bn.eps)))
conv.weight = nn.Parameter((conv_w * factor.reshape([conv.out_channels, 1, 1, 1])))
conv.bias = nn.Parameter((((conv_b - bn.running_mean) * factor) + bn.bias))
return conv
|
def fuse_module(m):
last_conv = None
last_conv_name = None
for (name, child) in m.named_children():
if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):
if (last_conv is None):
continue
fused_conv = fuse_conv_bn(last_conv, child)
m._modules[last_conv_name] = fused_conv
m._modules[name] = nn.Identity()
last_conv = None
elif isinstance(child, nn.Conv2d):
last_conv = child
last_conv_name = name
else:
fuse_module(child)
return m
|
def parse_args():
parser = argparse.ArgumentParser(description='fuse Conv and BN layers in a model')
parser.add_argument('config', help='config file path')
parser.add_argument('checkpoint', help='checkpoint file path')
parser.add_argument('out', help='output path of the converted model')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
model = init_detector(args.config, args.checkpoint)
fused_model = fuse_module(model)
save_checkpoint(fused_model, args.out)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[1280, 800], help='input image size')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError('FLOPs counter is currently not currently supported with {}'.format(model.__class__.__name__))
(flops, params) = get_model_complexity_info(model, input_shape)
split_line = ('=' * 30)
print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(split_line, input_shape, flops, params))
print('!!!Please be cautious if you use the results in papers. You may need to check if all ops are supported and verify that the flops computation is correct.')
|
def parse_args():
parser = argparse.ArgumentParser(description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
|
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = (out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]))
subprocess.Popen(['mv', out_file, final_file])
|
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
|
def export_onnx_model(model, inputs, passes):
'Trace and export a model to onnx format. Modified from\n https://github.com/facebookresearch/detectron2/\n\n Args:\n model (nn.Module):\n inputs (tuple[args]): the model will be called by `model(*inputs)`\n passes (None or list[str]): the optimization passed for ONNX model\n\n Returns:\n an onnx model\n '
assert isinstance(model, torch.nn.Module)
def _check_eval(module):
assert (not module.training)
model.apply(_check_eval)
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(model, inputs, f, operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK)
onnx_model = onnx.load_from_string(f.getvalue())
if (passes is not None):
all_passes = optimizer.get_available_passes()
assert all(((p in all_passes) for p in passes)), 'Only {} are supported'.format(all_passes)
onnx_model = optimizer.optimize(onnx_model, passes)
return onnx_model
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet pytorch model conversion to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', type=str, required=True, help='output ONNX filename')
parser.add_argument('--shape', type=int, nargs='+', default=[1280, 800], help='input image size')
parser.add_argument('--passes', type=str, nargs='+', help='ONNX optimization passes')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
if (not args.out.endswith('.onnx')):
raise ValueError('The output file must be a onnx file.')
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint, map_location='cpu')
model.cpu().eval()
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
m.use_torchvision = True
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError('ONNX conversion is currently not currently supported with {}'.format(model.__class__.__name__))
input_data = torch.empty((1, *input_shape), dtype=next(model.parameters()).dtype, device=next(model.parameters()).device)
onnx_model = export_onnx_model(model, (input_data,), args.passes)
onnx.helper.printable_graph(onnx_model.graph)
print('saving model in {}'.format(args.out))
onnx.save(onnx_model, args.out)
|
class MultipleKVAction(argparse.Action):
'\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options should\n be passed as comma separated values, i.e KEY=V1,V2,V3\n '
def _parse_int_float_bool(self, val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if (val.lower() in ['true', 'false']):
return (True if (val.lower() == 'true') else False)
return val
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
(key, val) = kv.split('=', maxsplit=1)
val = [self._parse_int_float_bool(v) for v in val.split(',')]
if (len(val) == 1):
val = val[0]
options[key] = val
setattr(namespace, self.dest, options)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse_conv_bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format_only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--gpu_collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu_collect is not specified')
parser.add_argument('--options', nargs='+', action=MultipleKVAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show), 'Please specify at least one operation (save/eval/format/show the results) with the argument "--out", "--eval", "--format_only" or "--show"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
if ('CLASSES' in checkpoint['meta']):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.options is None) else args.options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
|
def coco_eval_with_return(result_files, result_types, coco, max_dets=(100, 300, 1000)):
for res_type in result_types:
assert (res_type in ['proposal', 'bbox', 'segm', 'keypoints'])
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = ('bbox' if (res_type == 'proposal') else res_type)
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if (res_type == 'proposal'):
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if ((res_type == 'segm') or (res_type == 'bbox')):
metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl']
eval_results[res_type] = {metric_names[i]: cocoEval.stats[i] for i in range(len(metric_names))}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
|
def voc_eval_with_return(result_file, dataset, iou_thr=0.5, logger='print', only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if (hasattr(dataset, 'year') and (dataset.year == 2007)):
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
(mean_ap, eval_results) = eval_map(det_results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=dataset_name, logger=logger)
if only_ap:
eval_results = [{'ap': eval_results[i]['ap']} for i in range(len(eval_results))]
return (mean_ap, eval_results)
|
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=(not show), **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
|
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if (rank == 0):
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if (rank == 0):
batch_size = data['img'][0].size(0)
for _ in range((batch_size * world_size)):
prog_bar.update()
results = collect_results(results, len(dataset), tmpdir)
return results
|
def collect_results(result_part, size, tmpdir=None):
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument('--corruptions', type=str, nargs='+', default='benchmark', choices=['all', 'benchmark', 'noise', 'blur', 'weather', 'digital', 'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate'], help='corruptions')
parser.add_argument('--severities', type=int, nargs='+', default=[0, 1, 2, 3, 4, 5], help='corruption severity levels')
parser.add_argument('--eval', type=str, nargs='+', choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], help='eval types')
parser.add_argument('--iou-thr', type=float, default=0.5, help='IoU threshold for pascal voc evaluation')
parser.add_argument('--summaries', type=bool, default=False, help='Print summaries for every corruption and severity')
parser.add_argument('--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--final-prints', type=str, nargs='+', choices=['P', 'mPC', 'rPC'], default='mPC', help='corruption benchmark metric to print at the end')
parser.add_argument('--final-prints-aggregate', type=str, choices=['all', 'benchmark'], default='benchmark', help='aggregate all results or only those for benchmark corruptions')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
assert (args.out or args.show), 'Please specify at least one operation (save or show the results) with the argument "--out" or "--show"'
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.workers == 0):
args.workers = cfg.data.workers_per_gpu
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
if (args.seed is not None):
set_random_seed(args.seed)
if ('all' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif ('benchmark' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression']
elif ('noise' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif ('blur' in args.corruptions):
corruptions = ['defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur']
elif ('weather' in args.corruptions):
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif ('digital' in args.corruptions):
corruptions = ['contrast', 'elastic_transform', 'pixelate', 'jpeg_compression']
elif ('holdout' in args.corruptions):
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif ('None' in args.corruptions):
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
(rank, _) = get_dist_info()
aggregated_results = {}
for (corr_i, corruption) in enumerate(corruptions):
aggregated_results[corruption] = {}
for (sev_i, corruption_severity) in enumerate(args.severities):
if ((corr_i > 0) and (corruption_severity == 0)):
aggregated_results[corruption][0] = aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
if (corruption_severity > 0):
corruption_trans = dict(type='Corrupt', corruption=corruption, severity=corruption_severity)
test_data_cfg['pipeline'].insert(1, corruption_trans)
print('\nTesting {} at severity {}'.format(corruption, corruption_severity))
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=args.workers, dist=distributed, shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint['meta']):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if (args.out and (rank == 0)):
eval_results_filename = ((osp.splitext(args.out)[0] + '_results') + osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if (cfg.dataset_type == 'VOCDataset'):
if eval_types:
for eval_type in eval_types:
if (eval_type == 'bbox'):
test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
logger = ('print' if args.summaries else None)
(mean_ap, eval_results) = voc_eval_with_return(args.out, test_dataset, args.iou_thr, logger)
aggregated_results[corruption][corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation is supported for pascal voc')
elif eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if (eval_types == ['proposal_fast']):
result_file = args.out
elif (not isinstance(outputs[0], dict)):
result_files = dataset.results2json(outputs, args.out)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out
(+ '.{}'.format(name))
result_files = dataset.results2json(outputs_, result_file)
eval_results = coco_eval_with_return(result_files, eval_types, dataset.coco)
aggregated_results[corruption][corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;\nUse --eval to select a task')
mmcv.dump(aggregated_results, eval_results_filename)
if (rank == 0):
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if (cfg.dataset_type == 'VOCDataset'):
get_results(eval_results_filename, dataset='voc', prints=prints, aggregate=aggregate)
else:
get_results(eval_results_filename, dataset='coco', prints=prints, aggregate=aggregate)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if args.autoscale_lr:
cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * len(cfg.gpu_ids)) / 8)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join(['{}: {}'.format(k, v) for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
if (args.seed is not None):
logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
|
def convert(in_file, out_file):
'Convert keys in checkpoints.\n\n There can be some breaking changes during the development of mmdetection,\n and this tool is used for upgrading checkpoints trained with old versions\n to the latest one.\n '
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
for (key, val) in in_state_dict.items():
m = re.search('(cls_convs|reg_convs).\\d.(weight|bias)', key)
if (m is not None):
param = m.groups()[1]
new_key = key.replace(param, 'conv.{}'.format(param))
out_state_dict[new_key] = val
continue
out_state_dict[key] = val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
|
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
|
@HEADS.register_module
class SepcFreeAnchorRetinaHead(FreeAnchorRetinaHead):
def forward_single(self, x):
if (not isinstance(x, list)):
x = [x, x]
cls_feat = x[0]
reg_feat = x[1]
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return (cls_score, bbox_pred)
|
@HEADS.register_module
class SepcRetinaHead(RetinaHead):
def forward_single(self, x):
if (not isinstance(x, list)):
x = [x, x]
cls_feat = x[0]
reg_feat = x[1]
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return (cls_score, bbox_pred)
|
@NECKS.register_module
class SEPC(nn.Module):
def __init__(self, in_channels=([256] * 5), out_channels=256, num_outs=5, pconv_deform=False, lcconv_deform=False, iBN=False, Pconv_num=4):
super(SEPC, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
assert (num_outs == 5)
self.fp16_enabled = False
self.iBN = iBN
self.Pconvs = nn.ModuleList()
for i in range(Pconv_num):
self.Pconvs.append(PConvModule(in_channels[i], out_channels, iBN=self.iBN, part_deform=pconv_deform))
self.lconv = sepc_conv(256, 256, kernel_size=3, dilation=1, part_deform=lcconv_deform)
self.cconv = sepc_conv(256, 256, kernel_size=3, dilation=1, part_deform=lcconv_deform)
self.relu = nn.ReLU()
if self.iBN:
self.lbn = nn.BatchNorm2d(256)
self.cbn = nn.BatchNorm2d(256)
self.init_weights()
def init_weights(self):
for str in ['l', 'c']:
m = getattr(self, (str + 'conv'))
init.normal_(m.weight.data, 0, 0.01)
if (m.bias is not None):
m.bias.data.zero_()
@auto_fp16()
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
x = inputs
for pconv in self.Pconvs:
x = pconv(x)
cls = [self.cconv(level, item) for (level, item) in enumerate(x)]
loc = [self.lconv(level, item) for (level, item) in enumerate(x)]
if self.iBN:
cls = iBN(cls, self.cbn)
loc = iBN(loc, self.lbn)
outs = [[self.relu(s), self.relu(l)] for (s, l) in zip(cls, loc)]
return tuple(outs)
|
class PConvModule(nn.Module):
def __init__(self, in_channels=256, out_channels=256, kernel_size=[3, 3, 3], dilation=[1, 1, 1], groups=[1, 1, 1], iBN=False, part_deform=False):
super(PConvModule, self).__init__()
self.iBN = iBN
self.Pconv = nn.ModuleList()
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[0], dilation=dilation[0], groups=groups[0], padding=((kernel_size[0] + ((dilation[0] - 1) * 2)) // 2), part_deform=part_deform))
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[1], dilation=dilation[1], groups=groups[1], padding=((kernel_size[1] + ((dilation[1] - 1) * 2)) // 2), part_deform=part_deform))
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[2], dilation=dilation[2], groups=groups[2], padding=((kernel_size[2] + ((dilation[2] - 1) * 2)) // 2), stride=2, part_deform=part_deform))
if self.iBN:
self.bn = nn.BatchNorm2d(256)
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
for m in self.Pconv:
init.normal_(m.weight.data, 0, 0.01)
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x):
next_x = []
for (level, feature) in enumerate(x):
temp_fea = self.Pconv[1](level, feature)
if (level > 0):
temp_fea += self.Pconv[2](level, x[(level - 1)])
if (level < (len(x) - 1)):
temp_fea += F.upsample_bilinear(self.Pconv[0](level, x[(level + 1)]), size=[temp_fea.size(2), temp_fea.size(3)])
next_x.append(temp_fea)
if self.iBN:
next_x = iBN(next_x, self.bn)
next_x = [self.relu(item) for item in next_x]
return next_x
|
def iBN(fms, bn):
sizes = [p.shape[2:] for p in fms]
(n, c) = (fms[0].shape[0], fms[0].shape[1])
fm = torch.cat([p.view(n, c, 1, (- 1)) for p in fms], dim=(- 1))
fm = bn(fm)
fm = torch.split(fm, [(s[0] * s[1]) for s in sizes], dim=(- 1))
return [p.view(n, c, s[0], s[1]) for (p, s) in zip(fm, sizes)]
|
class sepc_conv(DeformConv):
def __init__(self, *args, part_deform=False, **kwargs):
super(sepc_conv, self).__init__(*args, **kwargs)
self.part_deform = part_deform
if self.part_deform:
self.conv_offset = nn.Conv2d(self.in_channels, (((self.deformable_groups * 2) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), bias=True)
self.init_offset()
self.bias = nn.Parameter(torch.zeros(self.out_channels))
self.start_level = 1
def init_offset(self):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, i, x):
if ((i < self.start_level) or (not self.part_deform)):
return torch.nn.functional.conv2d(x, self.weight, bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
offset = self.conv_offset(x)
return (deform_conv(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) + self.bias.unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1)))
|
class MultipleKVAction(argparse.Action):
'\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options should\n be passed as comma separated values, i.e KEY=V1,V2,V3\n '
def _parse_int_float_bool(self, val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if (val.lower() in ['true', 'false']):
return (True if (val.lower() == 'true') else False)
return val
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
(key, val) = kv.split('=', maxsplit=1)
val = [self._parse_int_float_bool(v) for v in val.split(',')]
if (len(val) == 1):
val = val[0]
options[key] = val
setattr(namespace, self.dest, options)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse_conv_bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format_only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--gpu_collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu_collect is not specified')
parser.add_argument('--options', nargs='+', action=MultipleKVAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show), 'Please specify at least one operation (save/eval/format/show the results) with the argument "--out", "--eval", "--format_only" or "--show"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
if ('CLASSES' in checkpoint['meta']):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.options is None) else args.options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if args.autoscale_lr:
cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * len(cfg.gpu_ids)) / 8)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join(['{}: {}'.format(k, v) for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
if (args.seed is not None):
logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
|
def squash(cap_input):
'\n squash function for keep the length of capsules between 0 - 1\n :arg\n cap_input: total input of capsules,\n with shape: [None, h, w, c] or [None, n, d]\n :return\n cap_output: output of each capsules, which has the shape as cap_input\n '
with tf.name_scope('squash'):
input_norm_square = tf.reduce_sum(tf.square(cap_input), axis=(- 1), keep_dims=True)
scale = ((input_norm_square / (1.0 + input_norm_square)) / tf.sqrt(input_norm_square))
return (cap_input * scale)
|
class CapsNet(object):
def __init__(self, mnist):
'initial class with mnist dataset'
self._mnist = mnist
self._dim = 28
self._num_caps = [0]
def _capsule(self, input, i_c, o_c, idx):
'\n compute a capsule,\n conv op with kernel: 9x9, stride: 2,\n padding: VALID, output channels: 8 per capsule.\n As described in the paper.\n :arg\n input: input for computing capsule, shape: [None, w, h, c]\n i_c: input channels\n o_c: output channels\n idx: index of the capsule about to create\n\n :return\n capsule: computed capsule\n '
with tf.variable_scope(('cap_' + str(idx))):
w = tf.get_variable('w', shape=[9, 9, i_c, o_c], dtype=tf.float32)
cap = tf.nn.conv2d(input, w, [1, 2, 2, 1], padding='VALID', name='cap_conv')
if cfg.USE_BIAS:
b = tf.get_variable('b', shape=[o_c], dtype=tf.float32, initializer=self._b_initializer)
cap = (cap + b)
capsule = squash(cap)
capsule = tf.expand_dims(capsule, axis=1)
return capsule
def _dynamic_routing(self, primary_caps, layer_index):
'"\n dynamic routing between capsules\n :arg\n primary_caps: primary capsules with shape [None, 1, 32 x 6 x 6, 1, 8]\n layer_index: index of the current capsule layer, i.e. the input layer for routing\n :return\n digit_caps: the output of digit capsule layer output, with shape: [None, 10, 16]\n '
num_caps = self._num_caps[layer_index]
cap_ws = tf.get_variable('cap_w', shape=[10, num_caps, 8, 16], dtype=tf.float32)
fn_init = tf.zeros([10, num_caps, 1, 16])
cap_predicts = tf.scan((lambda ac, x: tf.matmul(x, cap_ws)), tf.tile(primary_caps, [1, 10, 1, 1, 1]), initializer=fn_init, name='cap_predicts')
cap_predictions = tf.squeeze(cap_predicts, axis=[3])
log_prior = tf.get_variable('log_prior', shape=[10, num_caps], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=cfg.PRIOR_TRAINING)
if (cfg.ROUTING_WAY == 'static'):
digit_caps = self._dynamic_routingV1(log_prior, cap_predictions)
elif (cfg.ROUTING_WAY == 'dynamic'):
digit_caps = self._dynamic_routingV2(log_prior, cap_predictions, num_caps)
else:
raise NotImplementedError
return digit_caps
def _dynamic_routingV2(self, prior, cap_predictions, num_caps):
'\n doing dynamic routing with tf.while_loop\n :arg\n proir: log prior for scaling with shape [10, num_caps]\n cap_prediction: predictions from layer below with shape [None, 10, num_caps, 16]\n num_caps: num_caps\n :return\n digit_caps: digit capsules with shape [None, 10, 16]\n '
init_cap = tf.reduce_sum(cap_predictions, (- 2))
iters = tf.constant(cfg.ROUTING_ITERS)
prior = tf.expand_dims(prior, 0)
def body(i, prior, cap_out):
c = tf.nn.softmax(prior, dim=1)
c_expand = tf.expand_dims(c, axis=(- 1))
s_t = tf.multiply(cap_predictions, c_expand)
s = tf.reduce_sum(s_t, axis=[2])
cap_out = squash(s)
delta_prior = tf.reduce_sum(tf.multiply(tf.expand_dims(cap_out, axis=2), cap_predictions), axis=[(- 1)])
prior = (prior + delta_prior)
return [(i - 1), prior, cap_out]
condition = (lambda i, proir, cap_out: (i > 0))
(_, prior, digit_caps) = tf.while_loop(condition, body, [iters, prior, init_cap], shape_invariants=[iters.get_shape(), tf.TensorShape([None, 10, num_caps]), init_cap.get_shape()])
return digit_caps
def _dynamic_routingV1(self, prior, cap_predictions):
'\n doing dynamic routing with for loop as static implementation\n :arg\n proir: log prior for scaling with shape [10, num_caps]\n cap_prediction: predictions from layer below with shape [None, 10, num_caps, 16]\n :return\n digit_caps: digit capsules with shape [None, 10, 16]\n '
prior = tf.expand_dims(prior, 0)
for idx in xrange(cfg.ROUTING_ITERS):
with tf.name_scope(('routing_%s' % idx)):
c = tf.nn.softmax(prior, dim=1)
c_t = tf.expand_dims(c, axis=(- 1))
s_t = tf.multiply(cap_predictions, c_t)
s = tf.reduce_sum(s_t, axis=[2])
digit_caps = squash(s)
delta_prior = tf.reduce_sum(tf.multiply(tf.expand_dims(digit_caps, axis=2), cap_predictions), axis=[(- 1)])
prior = (prior + delta_prior)
return digit_caps
def _reconstruct(self, digit_caps):
'\n reconstruct from digit capsules with 3 fully connected layer\n :param\n digit_caps: digit capsules with shape [None, 10, 16]\n :return:\n out: out of reconstruction\n '
with tf.name_scope('reconstruct'):
y_ = tf.expand_dims(self._y_, axis=2)
target_cap = (y_ * digit_caps)
target_cap = tf.reduce_sum(target_cap, axis=1)
fc = slim.fully_connected(target_cap, 512, weights_initializer=self._w_initializer)
fc = slim.fully_connected(fc, 1024, weights_initializer=self._w_initializer)
fc = slim.fully_connected(fc, 784, weights_initializer=self._w_initializer, activation_fn=None)
out = tf.sigmoid(fc)
return out
def _add_loss(self, digit_caps):
'\n add the margin loss and reconstruction loss\n :arg\n digit_caps: output of digit capsule layer, shape [None, 10, 16]\n :return\n total_loss:\n '
with tf.name_scope('loss'):
self._digit_caps_norm = tf.norm(digit_caps, ord=2, axis=2, name='digit_caps_norm')
with tf.name_scope('pos_loss'):
pos_loss = tf.maximum(0.0, (cfg.M_POS - tf.reduce_sum((self._digit_caps_norm * self._y_), axis=1)), name='pos_max')
pos_loss = tf.square(pos_loss, name='pos_square')
pos_loss = tf.reduce_mean(pos_loss)
tf.summary.scalar('pos_loss', pos_loss)
y_negs = (1.0 - self._y_)
with tf.name_scope('neg_loss'):
neg_loss = tf.maximum(0.0, ((self._digit_caps_norm * y_negs) - cfg.M_NEG))
neg_loss = (tf.reduce_sum(tf.square(neg_loss), axis=(- 1)) * cfg.LAMBDA)
neg_loss = tf.reduce_mean(neg_loss)
tf.summary.scalar('neg_loss', neg_loss)
reconstruct = self._reconstruct(digit_caps)
with tf.name_scope('l2_loss'):
reconstruct_loss = tf.reduce_sum(tf.square((self._x - reconstruct)), axis=(- 1))
reconstruct_loss = tf.reduce_mean(reconstruct_loss)
tf.summary.scalar('reconstruct_loss', reconstruct_loss)
total_loss = ((pos_loss + neg_loss) + (cfg.RECONSTRUCT_W * reconstruct_loss))
self.reconstruct_loss = reconstruct_loss
tf.summary.scalar('loss', total_loss)
return total_loss
def creat_architecture(self):
'creat architecture of the whole network'
self._x = tf.placeholder(tf.float32, [None, 784])
self._y_ = tf.placeholder(tf.float32, [None, 10])
self._w_initializer = tf.truncated_normal_initializer(stddev=0.1)
self._b_initializer = tf.zeros_initializer()
with tf.variable_scope('CapsNet', initializer=self._w_initializer):
self._build_net()
self._global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(cfg.LR, self._global_step, cfg.STEP_SIZE, cfg.DECAY_RATIO, staircase=True)
tf.summary.scalar('learning rate', learning_rate)
self._optimizer = tf.train.AdamOptimizer(learning_rate)
gradidents = self._optimizer.compute_gradients(self._loss)
tf.summary.scalar('grad_norm', tf.global_norm(gradidents))
self._train_op = self._optimizer.apply_gradients(gradidents, global_step=self._global_step)
self._accuracy()
self._summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.train_writer = tf.summary.FileWriter((cfg.TB_DIR + '/train'))
self.val_writer = tf.summary.FileWriter((cfg.TB_DIR + '/val'))
def _build_net(self):
'build the graph of the network'
with tf.name_scope('x_reshape'):
x_image = tf.reshape(self._x, [(- 1), 28, 28, 1])
with tf.variable_scope('conv1'):
w = tf.get_variable('w', shape=[9, 9, 1, 256], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
conv1 = tf.nn.conv2d(x_image, w, [1, 1, 1, 1], padding='VALID', name='conv1')
if cfg.USE_BIAS:
b = tf.get_variable('b', shape=[256], dtype=tf.float32, initializer=self._b_initializer)
conv1 = tf.nn.relu((conv1 + b))
else:
conv1 = tf.nn.relu(conv1)
self._dim = (((self._dim - 9) // 1) + 1)
assert (self._dim == 20), 'after conv1, dimensions of feature mapshould be 20x20'
with tf.variable_scope('PrimaryCaps'):
self._dim = (((self._dim - 9) // 2) + 1)
self._num_caps.append(((self._dim ** 2) * cfg.PRIMARY_CAPS_CHANNELS))
assert (self._dim == 6), 'dims for primary caps grid should be 6x6.'
"\n caps = []\n for idx in xrange(cfg.PRIMARY_CAPS_CHANNELS):\n # get a capsule with 8-D\n cap = self._capsule(conv1, 256, 8, idx)\n # cap with shape: [None, 1, 6, 6, 8]\n caps.append(cap)\n\n # concat all the primary capsules\n primary_caps = tf.concat(caps, axis=1)\n # primary_caps with shape: [None, 32, 6, 6, 8]\n with tf.name_scope('primary_cap_reshape'):\n # reshape and expand dims for broadcasting in dynamic routing\n primary_caps = tf.reshape(primary_caps,\n shape=[-1, 1, self._num_caps[1], 1, 8])\n # primary_caps with shape: [None, 1, 1152, 1, 8]\n "
primary_caps = slim.conv2d(conv1, (32 * 8), 9, 2, padding='VALID', activation_fn=None)
primary_caps = tf.reshape(primary_caps, [(- 1), 1, self._num_caps[1], 1, 8])
primary_caps = squash(primary_caps)
with tf.variable_scope('digit_caps'):
self._digit_caps = self._dynamic_routing(primary_caps, 1)
self._loss = self._add_loss(self._digit_caps)
def _accuracy(self):
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(self._y_, 1), tf.argmax(self._digit_caps_norm, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
self.accuracy = tf.reduce_mean(correct_prediction)
tf.summary.scalar('accuracy', self.accuracy)
def train_with_summary(self, sess, batch_size=100, iters=0):
batch = self._mnist.train.next_batch(batch_size)
(loss, _, train_acc, train_summary) = sess.run([self._loss, self._train_op, self.accuracy, self._summary_op], feed_dict={self._x: batch[0], self._y_: batch[1]})
if (((iters % cfg.PRINT_EVERY) == 0) and (iters > 0)):
val_batch = self._mnist.validation.next_batch(batch_size)
self.train_writer.add_summary(train_summary, iters)
self.train_writer.flush()
print(('iters: %d / %d, loss ==> %.4f ' % (iters, cfg.MAX_ITERS, loss)))
print(('train accuracy: %.4f' % train_acc))
(test_acc, test_summary) = sess.run([self.accuracy, self._summary_op], feed_dict={self._x: val_batch[0], self._y_: val_batch[1]})
print(('val accuracy: %.4f' % test_acc))
self.val_writer.add_summary(test_summary, iters)
self.val_writer.flush()
if (((iters % cfg.SAVE_EVERY) == 0) and (iters > 0)):
self.snapshot(sess, iters=iters)
self.test(sess)
def snapshot(self, sess, iters=0):
save_path = (cfg.TRAIN_DIR + '/capsnet')
self.saver.save(sess, save_path, iters)
def test(self, sess, set='validation'):
if (set == 'test'):
x = self._mnist.test.images
y_ = self._mnist.test.labels
else:
x = self._mnist.validation.images
y_ = self._mnist.validation.labels
acc = []
for i in tqdm(xrange((len(x) // 100)), desc=('calculating %s accuracy' % set)):
x_i = x[(i * 100):((i + 1) * 100)]
y_i = y_[(i * 100):((i + 1) * 100)]
ac = sess.run(self.accuracy, feed_dict={self._x: x_i, self._y_: y_i})
acc.append(ac)
all_ac = np.mean(np.array(acc))
print('whole {} accuracy: {}'.format(set, all_ac))
def adv_validation(self, sess, set, x_adv, max_iter, fname=None):
if (set == 'test'):
x = self._mnist.test.images
y_ = self._mnist.test.labels
if (set == 'validation'):
x = self._mnist.validation.images
y_ = self._mnist.validation.labels
if (set == 'train'):
x = self._mnist.validation.images
y_ = self._mnist.validation.labels
acc = []
for i in tqdm(xrange((len(x) // 100)), desc=('calculating %s accuracy' % set)):
x_i = x[(i * 100):((i + 1) * 100)]
y_i = y_[(i * 100):((i + 1) * 100)]
for j in range(max_iter):
x_i = sess.run(x_adv, feed_dict={self._x: x_i, self._y_: y_i})
image_save(x_i, fname)
return
ac = sess.run(self.accuracy, feed_dict={self._x: x_i, self._y_: y_i})
acc.append(ac)
all_ac = np.mean(np.array(acc))
print('whole {} accuracy: {}'.format(set, all_ac))
|
def model_test():
model = CapsNet(None)
model.creat_architecture()
print('pass')
|
def main(_):
eps = (((1.0 * FLAGS.max_epsilon) / 256.0) / FLAGS.max_iter)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
tf.reset_default_graph()
caps_net = CapsNet(mnist)
caps_net.creat_architecture()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_dir = cfg.TRAIN_DIR
ckpt = tf.train.get_checkpoint_state(train_dir)
(dy_dx,) = tf.gradients(caps_net._loss, caps_net._x)
x_adv = tf.stop_gradient((caps_net._x + ((1 * eps) * tf.sign(dy_dx))))
x_adv = tf.clip_by_value(x_adv, 0.0, 1.0)
with tf.Session(config=config) as sess:
if (ckpt and cfg.USE_CKPT):
print(('Reading parameters from %s' % ckpt.model_checkpoint_path))
caps_net.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created model with fresh paramters.')
sess.run(tf.global_variables_initializer())
print(('Num params: %d' % sum((v.get_shape().num_elements() for v in tf.trainable_variables()))))
caps_net.train_writer.add_graph(sess.graph)
caps_net.adv_validation(sess, 'test', x_adv, FLAGS.max_iter, (((('samples/gsm_' + str(FLAGS.max_iter)) + '_') + str(FLAGS.max_epsilon)) + '.PNG'))
|
def model_test():
model = CapsNet(None)
model.creat_architecture()
print('pass')
|
def main(_):
eps = (((1.0 * FLAGS.max_epsilon) / 256.0) / FLAGS.max_iter)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
tf.reset_default_graph()
caps_net = CapsNet(mnist)
caps_net.creat_architecture()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_dir = cfg.TRAIN_DIR
ckpt = tf.train.get_checkpoint_state(train_dir)
target = tf.one_hot(tf.argmin(caps_net._digit_caps_norm, 1), 10, on_value=1.0, off_value=0.0)
pos_loss = tf.maximum(0.0, (cfg.M_POS - tf.reduce_sum((caps_net._digit_caps_norm * target), axis=1)), name='pos_max')
pos_loss = tf.square(pos_loss, name='pos_square')
pos_loss = tf.reduce_mean(pos_loss)
y_negs = (1.0 - target)
neg_loss = tf.maximum(0.0, ((caps_net._digit_caps_norm * y_negs) - cfg.M_NEG))
neg_loss = (tf.reduce_sum(tf.square(neg_loss), axis=(- 1)) * cfg.LAMBDA)
neg_loss = tf.reduce_mean(neg_loss)
loss = ((pos_loss + neg_loss) + (cfg.RECONSTRUCT_W * caps_net.reconstruct_loss))
(dy_dx,) = tf.gradients(loss, caps_net._x)
x_adv = tf.stop_gradient((caps_net._x - ((1 * eps) * tf.sign(dy_dx))))
x_adv = tf.clip_by_value(x_adv, 0.0, 1.0)
with tf.Session(config=config) as sess:
if (ckpt and cfg.USE_CKPT):
print(('Reading parameters from %s' % ckpt.model_checkpoint_path))
caps_net.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created model with fresh paramters.')
sess.run(tf.global_variables_initializer())
print(('Num params: %d' % sum((v.get_shape().num_elements() for v in tf.trainable_variables()))))
caps_net.train_writer.add_graph(sess.graph)
caps_net.adv_validation(sess, 'test', x_adv, FLAGS.max_iter, (((('samples/llcm_' + str(FLAGS.max_iter)) + '_') + str(FLAGS.max_epsilon)) + '.PNG'))
|
def model_test():
model = CapsNet(None)
model.creat_architecture()
print('pass')
|
def main(_):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
tf.reset_default_graph()
caps_net = CapsNet(mnist)
caps_net.creat_architecture()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_dir = cfg.TRAIN_DIR
ckpt = tf.train.get_checkpoint_state(train_dir)
with tf.Session(config=config) as sess:
if (ckpt and cfg.USE_CKPT):
print(('Reading parameters from %s' % ckpt.model_checkpoint_path))
caps_net.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created model with fresh paramters.')
sess.run(tf.global_variables_initializer())
print(('Num params: %d' % sum((v.get_shape().num_elements() for v in tf.trainable_variables()))))
caps_net.train_writer.add_graph(sess.graph)
iters = 0
tic = time.time()
for iters in xrange(cfg.MAX_ITERS):
sys.stdout.write(('>>> %d / %d \r' % ((iters % cfg.PRINT_EVERY), cfg.PRINT_EVERY)))
sys.stdout.flush()
caps_net.train_with_summary(sess, batch_size=100, iters=iters)
if (((iters % cfg.PRINT_EVERY) == 0) and (iters > 0)):
toc = time.time()
print(('average time: %.2f secs' % (toc - tic)))
tic = time.time()
caps_net.snapshot(sess, iters)
caps_net.test(sess, 'test')
|
class CocoDet(CocoDataset):
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, vis_root=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=100):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.vis_root = vis_root
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32), dict(type='FilterAnnotations', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotations', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = f'{self.vis_root}/annotations/instances_val2017.json'
img_prefix = (self.vis_root + '/val2017')
else:
ann_file = f'{self.vis_root}/annotations/instances_train2017.json'
img_prefix = (self.vis_root + '/train2017')
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
begin_str = '<image>\nIn the conversation below, you simply answer the category name based on what you see in the imagery inside a particular region.I will give you only one region each time. Categories Containing '
class_str = ', '.join(self.CLASSES)
self.begin_str = ((begin_str + class_str) + '.\n')
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels'].data
ori_bboxes = data_item['gt_bboxes'].data
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_bboxes = ori_bboxes[shuffle_ids]
ori_labels = ori_labels[shuffle_ids]
sources = dict()
sources['conversations'] = []
for i in range(len(ori_labels)):
question = random.choice(QUESTIONS).strip()
question = question.replace('<spi_descript>', '<bbox>')
if (i == 0):
question = (self.begin_str + question)
answer = self.CLASSES[ori_labels[i]]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
ori_bboxes = (copy.deepcopy(ori_bboxes) / image.shape[1])
data_dict['bboxes'] = ori_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def tokenize(self, text):
res = self.tokenizer((text['instruction'] + text['answer']), return_tensors=None, padding='do_not_pad', truncation=True, max_length=512)
if ((res['input_ids'][(- 1)] != self.tokenizer.eos_token_id) and (len(res['input_ids']) < 512) and self.add_eos):
res['input_ids'].append(self.tokenizer.eos_token_id)
res['attention_mask'].append(1)
labels = copy.deepcopy(res['input_ids'])
if self.ignore_instruction:
bbox_index = labels.index(self.tokenizer.encode('<bbox>')[1])
labels[:bbox_index] = ([(- 100)] * bbox_index)
res.update(labels=labels)
return res
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
data_dict = self.process_text(data_item=data_item)
return data_dict
|
@dataclass
class DataCollatorForDetDataset(object):
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances):
(input_ids, labels, img_metas, bboxes) = tuple(([instance.get(key, None) for instance in instances] for key in ('input_ids', 'labels', 'img_metas', 'bboxes')))
input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
batch = dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), img_metas=img_metas, bboxes=bboxes)
if ('image' in instances[0]):
images = [instance['image'] for instance in instances]
if all((((x is not None) and (x.shape == images[0].shape)) for x in images)):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
|
def make_multitask_data_module(tokenizer, data_args):
'Make dataset and collator for supervised fine-tuning.'
if (data_args.dataset_config is not None):
dataset_config = Config.fromfile(data_args.dataset_config)
multimodal_cfg = dict(is_multimodal=data_args.is_multimodal, sep_image_conv_front=data_args.sep_image_conv_front, image_token_len=data_args.image_token_len, image_aspect_ratio=data_args.image_aspect_ratio, use_im_start_end=getattr(data_args, 'mm_use_im_start_end', False), image_processor=getattr(data_args, 'image_processor', None))
train_dataset = build_spi_dataset(dataset_config.spi_datasets, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg)
data_collator = DataCollatorForDetDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
|
def build_spi_dataset(dataset_config, tokenizer=None, multimodal_cfg=None, **kwargs):
if isinstance(dataset_config, list):
datasets = []
for cfg in dataset_config:
temp_dataset = build_spi_dataset(cfg, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
datasets.append(temp_dataset)
type_string = [type(item) for item in datasets]
print(('#' * 20), type_string, ('#' * 20))
for dataset in datasets:
print(('#' * 20), type(dataset), f'len = {len(dataset)}', ('#' * 20))
return ConcatDataset(datasets)
dataset_type = dataset_config.pop('type')
ratio = dataset_config.pop('ratio', 1)
if (dataset_type == 'coco_det'):
dataset = CocoDet(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'flickr30k'):
dataset = Flickr30k(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'VGDATA'):
dataset = VGDATA(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'det_llava'):
dataset = DetLLava(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'vcr'):
dataset = VCRDataset(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'single_vcr'):
dataset = SingleVCRDataset(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'multi_vcr'):
dataset = MultiVCRDataset(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'RefCOCO'):
dataset = RefCOCO(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'RefCOCOP'):
dataset = RefCOCOP(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'RefCOCOG'):
dataset = RefCOCOG(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
else:
raise NotImplementedError
if (ratio < 1):
print(f'randomly sample {ratio} of the dataset {dataset_type}: {int((ratio * len(dataset)))}')
random_indices = np.random.choice(len(dataset), int((ratio * len(dataset))), replace=False)
subsample_dataset = torch.utils.data.Subset(dataset, random_indices)
subsample_dataset.collater = dataset.collater
return subsample_dataset
else:
return dataset
|
class ConcatDataset(ConcatDataset):
def __init__(self, datasets):
super().__init__(datasets)
def collater(self, samples):
all_keys = set()
for s in samples:
all_keys.update(s)
shared_keys = all_keys
for s in samples:
shared_keys = (shared_keys & set(s.keys()))
samples_shared_keys = []
for s in samples:
samples_shared_keys.append({k: s[k] for k in s.keys() if (k in shared_keys)})
return self.datasets[0].collater(samples_shared_keys)
|
class Flickr30k(CocoDataset):
CLASSES = ('object',)
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, ann_file=None, img_prefix=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=150):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.ann_file = ann_file
self.img_prefix = img_prefix
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = self.ann_file
img_prefix = self.img_prefix
else:
ann_file = self.ann_file
img_prefix = self.img_prefix
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
self.id_cap_dict = dict()
self.begin_str = 'The <image> provides an overview of the picture.\n'
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = self.img_ids[i]
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def load_annotations(self, ann_file):
'Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n '
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert (len(set(total_ann_ids)) == len(total_ann_ids)), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
self.id_cap_dict[img_info['file_name']] = img_info['caption']
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0)))
inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0)))
if ((inter_w * inter_h) == 0):
continue
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
if (ann['category_id'] in self.cat_ids):
pass
else:
raise ValueError('category_id not in self.cat_ids')
bbox = [x1, y1, (x1 + w), (y1 + h)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_list = [img_info['caption'][atp[0]:atp[1]] for atp in ann['tokens_positive']]
gt_labels.append(gt_list[0])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(bboxes=gt_bboxes, labels=gt_labels, caption=img_info['caption'], bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map)
return ann
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
question = random.choice(FINAL_QUESTIONS).strip()
s_bbox_string = ''
num_bboxes = min(len(ori_labels), self.max_gt_per_img)
for id in range(num_bboxes):
s_bbox_string = (s_bbox_string + f'region{(id + 1)} <bbox>,')
question = question.replace('<spi_descript>', s_bbox_string)
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': self.id_cap_dict[data_item['img_metas'].data['filename'].split('/')[(- 1)]]})
shuffle_ids = torch.randperm(len(ori_labels))
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(REGION_QUESTIONS).strip()
question = question.replace('<spi_descript>', f'region {(i + 1)}')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
select_bboxes = torch.cat([select_bboxes], dim=0)
select_bboxes = (copy.deepcopy(select_bboxes) / image.shape[1])
data_dict['bboxes'] = select_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
max_loops = 10
i = 0
while True:
if (i > max_loops):
raise ValueError('No gt_labels')
i += 1
if (len(data_item['gt_labels']) == 0):
idx = random.randint(0, (len(self) - 1))
data_item = super().__getitem__(idx)
else:
break
data_dict = self.process_text(data_item=data_item)
return data_dict
|
class RefCOCO(CocoDataset):
CLASSES = ('object',)
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, ann_file=None, img_prefix=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=15):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.ann_file = ann_file
self.img_prefix = img_prefix
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = self.ann_file
img_prefix = self.img_prefix
else:
ann_file = self.ann_file
img_prefix = self.img_prefix
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
self.id_cap_dict = dict()
self.begin_str = "<image>\n I will provide you with only one region containing only one object, although there may be other objects present in the image. It is recommended that you describe the object's relative position with respect to other objects in the image, as well as its position within the image and its basic attributes."
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = self.img_ids[i]
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def load_annotations(self, ann_file):
'Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n '
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
num_remove_images = 0
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
if (len(info['caption'].split(' ')) < 3):
num_remove_images += 1
continue
info['filename'] = info['file_name'].split('_')[(- 1)]
info['height'] = int(info['height'])
info['width'] = int(info['width'])
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert (len(set(total_ann_ids)) == len(total_ann_ids)), f"Annotation ids in '{ann_file}' are not unique!"
print(f'Filtered {num_remove_images} from {self.ann_file} ')
return data_infos
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
img_path = os.path.join(self.img_prefix, img_info['file_name'].split('_')[(- 1)])
self.id_cap_dict[img_info['file_name'].split('_')[(- 1)]] = img_info['caption']
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0)))
inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0)))
if ((inter_w * inter_h) == 0):
continue
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
gt_bboxes.append(bbox)
gt_labels.append(img_info['caption'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(bboxes=gt_bboxes, labels=gt_labels, caption=img_info['caption'], bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map)
return ann
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(QUESTIONS).strip()
question = question.replace('<spi_descript>', '<bbox>')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
ori_bboxes = select_bboxes
ori_bboxes = (copy.deepcopy(ori_bboxes) / image.shape[1])
data_dict['bboxes'] = ori_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
max_loops = 10
i = 0
while True:
if (i > max_loops):
raise ValueError('No gt_labels')
i += 1
if (len(data_item['gt_labels']) == 0):
idx = random.randint(0, (len(self) - 1))
data_item = super().__getitem__(idx)
else:
break
data_dict = self.process_text(data_item=data_item)
return data_dict
|
class RefCOCOP(RefCOCO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.begin_str = "<image>\n I will provide you with only one region containing only one object, although there may be other objects present in the image. It is recommended that you describe the object's relative position with respect to other objects in the image and its basic attibuts, you should not give its position within the image"
|
class RefCOCOG(RefCOCO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.begin_str = 'The <image> provides an overview of the picture.\n'
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(REFG_QUESTIONS).strip()
question = question.replace('<spi_descript>', f'region{(i + 1)} <bbox>')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
ori_bboxes = select_bboxes
ori_bboxes = (copy.deepcopy(ori_bboxes) / image.shape[1])
data_dict['bboxes'] = ori_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
|
class VGDATA(CocoDataset):
CLASSES = ('object',)
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, ann_file=None, img_prefix=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=15):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.ann_file = ann_file
self.img_prefix = img_prefix
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = self.ann_file
img_prefix = self.img_prefix
else:
ann_file = self.ann_file
img_prefix = self.img_prefix
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
self.begin_str = 'The <image> provides an overview of the picture.\n'
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = self.img_ids[i]
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def load_annotations(self, ann_file):
'Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n '
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert (len(set(total_ann_ids)) == len(total_ann_ids)), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0)))
inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0)))
if ((inter_w * inter_h) == 0):
continue
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
if (ann['category_id'] not in self.cat_ids):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(ann['caption'])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map)
return ann
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(FINAL_QUESTIONS).strip()
question = question.replace('<spi_descript>', f'region{(i + 1)} <bbox>')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
select_bboxes = (copy.deepcopy(select_bboxes) / image.shape[1])
data_dict['bboxes'] = select_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
max_loops = 10
i = 0
while True:
if (i > max_loops):
raise ValueError('No gt_labels')
i += 1
if (len(data_item['gt_labels']) == 0):
idx = random.randint(0, (len(self) - 1))
data_item = super().__getitem__(idx)
else:
break
data_dict = self.process_text(data_item=data_item)
return data_dict
|
def forward(self, hidden_states: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[(torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]])]:
'Input shape: Batch x Time x Channel.\n\n attention_mask: [bsz, q_len]\n '
(bsz, q_len, _) = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[(- 2)]
offset = 0
if (past_key_value is not None):
offset = past_key_value[0].shape[(- 2)]
kv_seq_len += offset
(cos, sin) = self.rotary_emb(value_states, seq_len=kv_seq_len)
(query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, offset=offset)
assert (not output_attentions), 'output_attentions is not supported'
assert (not use_cache), 'use_cache is not supported'
assert (past_key_value is None), 'past_key_value is not supported'
qkv = torch.stack([query_states, key_states, value_states], dim=2)
qkv = qkv.transpose(1, 3)
key_padding_mask = attention_mask
if (key_padding_mask is None):
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = q_len
cu_q_lens = torch.arange(0, ((bsz + 1) * q_len), step=q_len, dtype=torch.int32, device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(output, '(b s) ... -> b s ...', b=bsz)
else:
nheads = qkv.shape[(- 2)]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
(x_unpad, indices, cu_q_lens, max_s) = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices, bsz, q_len), 'b s (h d) -> b s h d', h=nheads)
return (self.o_proj(rearrange(output, 'b s h d -> b s (h d)')), None, None)
|
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
return attention_mask
|
def replace_llama_attn_with_flash_attn():
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
|
def unwrap_model(model: nn.Module) -> nn.Module:
'Recursively unwraps a model from potential containers (as used in\n distributed training).\n\n Args:\n model (`torch.nn.Module`): The model to unwrap.\n '
if hasattr(model, 'module'):
return unwrap_model(model.module)
else:
return model
|
class LLaVATrainer(Trainer):
def _save(self, output_dir: Optional[str]=None, state_dict=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
_state_dict = state_dict
if (_state_dict is None):
model_to_save = unwrap_model(self.model)
_state_dict = model_to_save.state_dict()
weight_to_save = {}
keys_to_match = ['mm_projector', 'embed_tokens', 'embed_in']
for (k, v) in _state_dict.items():
if any(((key_match in k) for key_match in keys_to_match)):
weight_to_save[k] = v
current_folder = output_dir.split('/')[(- 1)]
parent_folder = os.path.dirname(output_dir)
if current_folder.startswith('checkpoint-'):
mm_projector_folder = os.path.join(parent_folder, 'mm_projector')
os.makedirs(mm_projector_folder, exist_ok=True)
torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin'))
else:
torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
super(LLaVATrainer, self)._save(output_dir, state_dict)
def create_optimizer(self):
opt_model = (self.model_wrapped if is_sagemaker_mp_enabled() else self.model)
if (self.optimizer is None):
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if ('bias' not in name)]
train_str = 'spi_module'
if (os.environ.get('ONLY_SPI', None) and (not os.environ.get('PROJ', None))):
optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if ((train_str in n) and p.requires_grad)], 'weight_decay': 0.01}, {'params': [p for (n, p) in opt_model.named_parameters() if ((train_str not in n) and p.requires_grad)], 'weight_decay': 0.0, 'lr': 0.0}]
elif (os.environ.get('ONLY_SPI', None) and os.environ.get('PROJ', None)):
proj_train_str = 'proj'
spi_train_str = 'spi_module'
print('Only training SPI and PROJ')
optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if (((spi_train_str in n) or (proj_train_str in n)) and p.requires_grad)], 'weight_decay': 0.0}, {'params': [p for (n, p) in opt_model.named_parameters() if (((proj_train_str not in n) and (spi_train_str not in n)) and p.requires_grad)], 'weight_decay': 0.0, 'lr': 0.0}]
else:
optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if ((n in decay_parameters) and p.requires_grad)], 'weight_decay': self.args.weight_decay}, {'params': [p for (n, p) in opt_model.named_parameters() if ((n not in decay_parameters) and p.requires_grad)], 'weight_decay': 0.0}]
(optimizer_cls, optimizer_kwargs) = Trainer.get_optimizer_cls_and_kwargs(self.args)
if (self.sharded_ddp == ShardedDDPOption.SIMPLE):
if is_fairscale_available():
from fairscale.optim import OSS
else:
raise ImportError()
self.optimizer = OSS(params=optimizer_grouped_parameters, optim=optimizer_cls, **optimizer_kwargs)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if (optimizer_cls.__name__ == 'Adam8bit'):
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f'skipped {module}: {(skipped / (2 ** 20))}M params')
manager.register_module_override(module, 'weight', {'optim_bits': 32})
logger.debug(f'bitsandbytes: will optimize {module} in fp32')
print(f'skipped: {(skipped / (2 ** 20))}M params')
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
|
@dataclass
class ModelArguments():
model_name_or_path: Optional[str] = field(default='facebook/opt-125m')
version: Optional[str] = field(default='v0')
freeze_backbone: bool = field(default=False)
tune_mm_mlp_adapter: bool = field(default=False)
vision_tower: Optional[str] = field(default=None)
mm_vision_select_layer: Optional[int] = field(default=(- 1))
pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
mm_use_im_start_end: bool = field(default=False)
with_spi: bool = field(default=True)
load_from: Optional[str] = field(default=None)
|
@dataclass
class DataArguments():
lazy_preprocess: bool = False
is_multimodal: bool = False
sep_image_conv_front: bool = False
image_token_len: int = 0
image_aspect_ratio: str = 'square'
dataset_config: Optional[str] = field(default='./gpt4roi/configs/stage1.py', metadata={'help': 'Path to the dataset config file.'})
|
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default='adamw_torch')
remove_unused_columns: bool = field(default=False)
freeze_mm_mlp_adapter: bool = field(default=False)
force_fsdp: bool = field(default=False)
model_max_length: int = field(default=512, metadata={'help': 'Maximum sequence length. Sequences will be right padded (and possibly truncated).'})
|
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
'Collects the state dict and dump to disk.'
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for (key, value) in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict)
|
def smart_tokenizer_and_embedding_resize(special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel):
'Resize tokenizer and embedding.\n\n Note: This is the unoptimized version that may make your embedding size not be divisible by 64.\n '
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if (num_new_tokens > 0):
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
input_embeddings[(- num_new_tokens):] = input_embeddings_avg
output_embeddings[(- num_new_tokens):] = output_embeddings_avg
|
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
'Tokenize a list of strings.'
tokenized_list = [tokenizer(text, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True) for text in strings]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list]
return dict(input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens)
|
def _mask_targets(target, tokenized_lens, speakers):
cur_idx = tokenized_lens[0]
tokenized_lens = tokenized_lens[1:]
target[:cur_idx] = IGNORE_INDEX
for (tokenized_len, speaker) in zip(tokenized_lens, speakers):
if (speaker == 'human'):
target[(cur_idx + 2):(cur_idx + tokenized_len)] = IGNORE_INDEX
cur_idx += tokenized_len
|
def _add_speaker_and_signal(header, source, get_conversation=True):
'Add speaker and start/end signal on each round.'
BEGIN_SIGNAL = '### '
END_SIGNAL = '\n'
conversation = header
for sentence in source:
from_str = sentence['from']
if (from_str.lower() == 'human'):
from_str = conversation_lib.default_conversation.roles[0]
elif (from_str.lower() == 'gpt'):
from_str = conversation_lib.default_conversation.roles[1]
else:
from_str = 'unknown'
sentence['value'] = ((((BEGIN_SIGNAL + from_str) + ': ') + sentence['value']) + END_SIGNAL)
if get_conversation:
conversation += sentence['value']
conversation += BEGIN_SIGNAL
return conversation
|
def preprocess_multimodal(sources: Sequence[str], multimodal_cfg: dict, cur_token_len: int) -> Dict:
is_multimodal = multimodal_cfg['is_multimodal']
image_token_len = cur_token_len
if (not is_multimodal):
return sources
for source in sources:
if multimodal_cfg['sep_image_conv_front']:
assert (DEFAULT_IMAGE_TOKEN in source[0]['value'])
source[0]['value'] = source[0]['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
source[0]['value'] = ((((DEFAULT_IMAGE_TOKEN + conversation_lib.default_conversation.sep) + conversation_lib.default_conversation.roles[0]) + ': ') + source[0]['value'])
for sentence in source:
replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * image_token_len)
if multimodal_cfg['use_im_start_end']:
replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN)
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, replace_token)
return sources
|
def preprocess_v1(sources, tokenizer: transformers.PreTrainedTokenizer) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
conversations = []
for (i, source) in enumerate(sources):
if (roles[source[0]['from']] != conv.roles[0]):
source = source[1:]
conv.messages = []
for (j, sentence) in enumerate(source):
role = roles[sentence['from']]
assert (role == conv.roles[(j % 2)]), f'{i}'
conv.append_message(role, sentence['value'])
conversations.append(conv.get_prompt())
input_ids = tokenizer(conversations, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True).input_ids
targets = input_ids.clone()
assert (conv.sep_style == conversation_lib.SeparatorStyle.TWO)
sep = ((conv.sep + conv.roles[1]) + ': ')
for (conversation, target) in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for (i, rou) in enumerate(rounds):
if (rou == ''):
break
parts = rou.split(sep)
if (len(parts) != 2):
break
parts[0] += sep
round_len = len(tokenizer(rou).input_ids)
instruction_len = (len(tokenizer(parts[0]).input_ids) - 2)
target[cur_len:(cur_len + instruction_len)] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if (cur_len < tokenizer.model_max_length):
if (cur_len != total_len):
target[:] = IGNORE_INDEX
print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. (ignored)')
return dict(input_ids=input_ids, labels=targets)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.