code stringlengths 17 6.64M |
|---|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def test_imagenet_zero(fc_file_pred, has_train=1):
with open(classids_file_retrain) as fp:
classids = json.load(fp)
with open(word2vec_file, 'rb') as fp:
word2vec_feat = pkl.load(fp)
testlist = []
testlabels = []
with open(vallist_folder) as fp:
for line in fp:
(fname, lbl) = line.split()
assert (int(lbl) >= 1000)
feat_name = os.path.join(feat_folder, fname.replace('.JPEG', '.npy'))
testlist.append(feat_name)
testlabels.append(int(lbl))
with open(fc_file_pred, 'rb') as fp:
fc_layers_pred = pkl.load(fp)
fc_layers_pred = np.array(fc_layers_pred)
print('fc output', fc_layers_pred.shape)
valid_clss = np.zeros(22000)
cnt_zero_wv = 0
for j in range(len(classids)):
if (classids[j][1] == 1):
twv = word2vec_feat[j]
twv = (twv / (np.linalg.norm(twv) + 1e-06))
valid_clss[classids[j][0]] = 1
cnt_zero_wv = 0
(labels_train, word2vec_train) = ([], [])
fc_now = []
w2v_1k = [None for _ in range(1000)]
for j in range(len(classids)):
tfc = fc_layers_pred[j]
if (classids[j][1] == 0):
assert (classids[j][0] < 1000)
w2v_1k[classids[j][0]] = word2vec_feat[j]
if has_train:
if (classids[j][0] < 0):
continue
elif (classids[j][1] == 0):
continue
if (classids[j][0] >= 0):
twv = word2vec_feat[j]
labels_train.append(classids[j][0])
word2vec_train.append(twv)
feat_len = len(tfc)
tfc = tfc[(feat_len - fc_dim):feat_len]
fc_now.append(tfc)
fc_now = torch.from_numpy(np.array(fc_now)).float().cuda()
w2v_1k = torch.from_numpy(np.array(w2v_1k)).float().cuda()
print(('skip candidate class due to no word embedding: %d / %d:' % (cnt_zero_wv, (len(labels_train) + cnt_zero_wv))))
print('candidate class shape: ', fc_now.shape)
fc_now = fc_now.t()
labels_train = np.array(labels_train)
print('train + test class: ', len(labels_train))
topKs = [1]
top_retrv = [1, 2, 5, 10, 20]
hit_count = np.zeros((len(topKs), len(top_retrv)), dtype=np.float32)
cnt_valid = 0
t = time.time()
dataset = Dummy(testlist, testlabels, valid_clss, labels_train)
loader = torch.utils.data.DataLoader(dataset, 1000, shuffle=False, num_workers=4, collate_fn=(lambda x: torch.utils.data.dataloader.default_collate([_ for _ in x if (_ is not None)])))
for (i, (matfeat, label)) in enumerate(loader):
(matfeat, label) = (matfeat.cuda(), label.cuda())
cnt_valid += matfeat.size(0)
scores = torch.matmul(matfeat, fc_now).squeeze()
tmp = accuracy(scores, label, top_retrv)
for k in range(len(topKs)):
for k2 in range(len(top_retrv)):
hit_count[k][k2] = (hit_count[k][k2] + float(((tmp[k2] / 100) * matfeat.size(0))))
if ((cnt_valid % 1) == 0):
inter = (time.time() - t)
print(('processing %d / %d ' % (cnt_valid, len(dataset))), ', Estimated time: ', ((inter / (i + 1)) * ((len(loader) - i) - 1)))
print((hit_count / cnt_valid))
hit_count = (hit_count / cnt_valid)
fout = open((fc_file_pred + '_result_pred_zero.txt'), 'w')
for j in range(len(topKs)):
outstr = ''
for k in range(len(top_retrv)):
outstr = ((outstr + ' ') + str(hit_count[j][k]))
print(outstr)
print('total: %d', cnt_valid)
fout.write((outstr + '\n'))
fout.close()
return hit_count
|
def download(vid_file):
with open(vid_file) as fp:
vid_list = [line.strip() for line in fp]
url_list = 'http://www.image-net.org/download/synset?wnid='
url_key = ('&username=%s&accesskey=%s&release=latest&src=stanford' % (args.user, args.key))
testfile = urllib.URLopener()
for i in range(len(vid_list)):
wnid = vid_list[i]
url_acc = ((url_list + wnid) + url_key)
save_dir = os.path.join(scratch_dir, wnid)
lockname = (save_dir + '.lock')
if os.path.exists(save_dir):
continue
if os.path.exists(lockname):
continue
try:
os.makedirs(lockname)
except:
continue
tar_file = os.path.join(scratch_dir, (wnid + '.tar'))
try:
testfile.retrieve(url_acc, tar_file)
print(('Downloading %s' % wnid))
except:
print('!!! Error when downloading', wnid)
continue
if (not os.path.exists(os.path.join(scratch_dir, wnid))):
os.makedirs(os.path.join(scratch_dir, wnid))
cmd = ((('tar -xf ' + tar_file) + ' --directory ') + save_dir)
os.system(cmd)
cmd = ('rm ' + os.path.join(tar_file))
os.system(cmd)
cmd = ('rm -r %s' % lockname)
os.system(cmd)
if ((i % 10) == 0):
print(('%d / %d' % (i, len(vid_list))))
|
def make_image_list(list_file, image_dir, name, offset=1000):
with open(list_file) as fp:
wnid_list = [line.strip() for line in fp]
save_file = os.path.join(data_dir, 'list', ('img-%s.txt' % name))
wr_fp = open(save_file, 'w')
for (i, wnid) in enumerate(wnid_list):
img_list = glob.glob(os.path.join(image_dir, wnid, '*.JPEG'))
for path in img_list:
index = os.path.join(wnid, os.path.basename(path))
l = (i + offset)
wr_fp.write(('%s %d\n' % (index, l)))
if (len(img_list) == 0):
print(('Warning: does not have class %s. Do you forgot to download the picture??' % wnid))
wr_fp.close()
|
def rm_empty(vid_file):
with open(vid_file) as fp:
vid_list = [line.strip() for line in fp]
cnt = 0
for i in range(len(vid_list)):
save_dir = os.path.join(scratch_dir, vid_list[i])
jpg_list = glob.glob((save_dir + '/*.JPEG'))
if (len(jpg_list) < 10):
print(vid_list[i])
cmd = ('rm -r %s ' % save_dir)
os.system(cmd)
cnt += 1
print(cnt)
|
def down_sample(list_file, image_dir, size=256):
with open(list_file) as fp:
index_list = [line.split()[0] for line in fp]
for (i, index) in enumerate(index_list):
img_file = os.path.join(image_dir, index)
if (not os.path.exists(img_file)):
print('not exist:', img_file)
continue
img = downsample_image(img_file, size)
if (img is None):
continue
save_file = (os.path.join(os.path.dirname(img_file), (os.path.basename(img_file).split('.')[0] + 'copy')) + '.JPEG')
cv2.imwrite(save_file, img)
cmd = ('mv %s %s' % (save_file, img_file))
os.system(cmd)
if ((i % 1000) == 0):
print(i, len(index_list), index)
|
def downsample_image(img_file, target_size):
img = cv2.imread(img_file)
if (img is None):
return img
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_scale = (float(target_size) / float(im_size_min))
im_scale = min(1, im_scale)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return img
|
def parse_arg():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--hop', type=str, default='2', help='choice of test difficulties: 2,3,all')
parser.add_argument('--save_dir', type=str, default=None, help='path to save images')
parser.add_argument('--user', type=str, help='your username', required=True)
parser.add_argument('--key', type=str, help='your access key', required=True)
args = parser.parse_args()
if (args.save_dir is None):
print('Please set directory to save images')
return args
|
def my_make_dataset(dir, class_to_idx, extensions):
cached_fn = (('cached_' + dir.replace('/', '_').strip('_')) + '.pth')
if os.path.isfile(cached_fn):
print(('Load from cached file list: ' + cached_fn))
return torch.load(cached_fn)
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if (not os.path.isdir(d)):
continue
for (root, _, fnames) in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
torch.save(images, cached_fn)
return images
|
class MyDatasetFolder(DatasetFolder):
def __init__(self, root, loader, extensions, transform=None, target_transform=None):
(classes, class_to_idx) = find_classes(root)
samples = my_make_dataset(root, class_to_idx, extensions)
if (len(samples) == 0):
raise RuntimeError(((('Found 0 files in subfolders of: ' + root) + '\nSupported extensions are: ') + ','.join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.transform = transform
self.target_transform = target_transform
|
class MyImageFolder(MyDatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=default_loader):
super(MyImageFolder, self).__init__(root, loader, IMG_EXTENSIONS, transform=transform, target_transform=target_transform)
self.imgs = self.samples
def __getitem__(self, index):
'\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is class_index of the target class.\n '
(path, target) = self.imgs[index]
try:
img = self.loader(path)
except:
return None
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target, path)
|
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = (args.world_size > 1)
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.pretrained_weight:
print("=> using pre-trained weight '{}'".format(args.pretrained_weight))
model.load_state_dict(torch.load(args.pretrained_weight))
if args.arch.startswith('resnet'):
print('=> modify resnet')
model.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
for i in range(2, 5):
getattr(model, ('layer%d' % i))[0].conv1.stride = (2, 2)
getattr(model, ('layer%d' % i))[0].conv2.stride = (1, 1)
model.fc_net = model.fc
del model.fc
import types
model.fc = types.MethodType((lambda self, x: (self.fc_net(x), x)), model)
if (not args.distributed):
if (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, '')
valdir = os.path.join(args.data, '')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
def my_collate(batch):
batch = [_ for _ in batch if (_ is not None)]
return torch.utils.data.dataloader.default_collate(batch)
val_loader = torch.utils.data.DataLoader(MyImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, collate_fn=my_collate)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
train(train_loader, model, criterion, optimizer, epoch)
prec1 = validate(val_loader, model, criterion)
is_best = (prec1 > best_prec1)
best_prec1 = max(prec1, best_prec1)
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict()}, is_best)
|
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
end = time.time()
for (i, (input, target, paths)) in enumerate(train_loader):
data_time.update((time.time() - end))
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
(prec1, prec5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5))
|
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for (i, (input, target, paths)) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
(output, feats) = model(input)
for j in range(output.size(0)):
folder_name = os.path.join(args.out_feat_dir, paths[j].split('/')[(- 2)])
file_name = (os.path.splitext(paths[j].split('/')[(- 1)])[0] + '.npy')
if (not os.path.isdir(folder_name)):
os.makedirs(folder_name)
np.save(os.path.join(folder_name, file_name), feats[j].cpu().numpy())
if (len(val_loader.dataset.classes) == output.shape[1]):
loss = criterion(output, target)
(prec1, prec5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
return top1.avg
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def adjust_learning_rate(optimizer, epoch):
'Sets the learning rate to the initial LR decayed by 10 every 30 epochs'
lr = (args.lr * (0.1 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def embed_text_file(text_file, word_vectors, get_vector, save_file):
with open(text_file) as fp:
text_list = json.load(fp)
all_feats = []
has = 0
cnt_missed = 0
missed_list = []
for i in range(len(text_list)):
class_name = text_list[i].lower()
if ((i % 500) == 0):
print(('%d / %d : %s' % (i, len(text_list), class_name)))
feat = np.zeros(feat_len)
options = class_name.split(',')
cnt_word = 0
for j in range(len(options)):
now_feat = get_embedding(options[j].strip(), word_vectors, get_vector)
if (np.abs(now_feat.sum()) > 0):
cnt_word += 1
feat += now_feat
if (cnt_word > 0):
feat = (feat / cnt_word)
if (np.abs(feat.sum()) == 0):
print(('cannot find word ' + class_name))
cnt_missed = (cnt_missed + 1)
missed_list.append(class_name)
else:
has += 1
all_feats.append(feat)
all_feats = np.array(all_feats)
for each in missed_list:
print(each)
print('does not have semantic embedding: ', cnt_missed, 'has: ', has)
if (not os.path.exists(os.path.dirname(save_file))):
os.makedirs(os.path.dirname(save_file))
print(('## Make Directory: %s' % save_file))
with open(save_file, 'wb') as fp:
pkl.dump(all_feats, fp)
print(('save to : %s' % save_file))
|
def get_embedding(entity_str, word_vectors, get_vector):
try:
feat = get_vector(word_vectors, entity_str)
return feat
except:
feat = np.zeros(feat_len)
str_set = filter(None, re.split('[ \\-_]+', entity_str))
cnt_word = 0
for i in range(len(str_set)):
temp_str = str_set[i]
try:
now_feat = get_vector(word_vectors, temp_str)
feat = (feat + now_feat)
cnt_word = (cnt_word + 1)
except:
continue
if (cnt_word > 0):
feat = (feat / cnt_word)
return feat
|
def get_glove_dict(txt_dir):
print('load glove word embedding')
txt_file = os.path.join(txt_dir, 'glove.6B.300d.txt')
word_dict = {}
with open(txt_file) as fp:
for line in fp:
feat = np.zeros(feat_len)
words = line.split()
assert ((len(words) - 1) == feat_len)
for i in range(feat_len):
feat[i] = float(words[(i + 1)])
word_dict[words[0]] = feat
print('loaded to dict!')
return word_dict
|
def glove_google(word_vectors, word):
return word_vectors[word]
|
def fasttext(word_vectors, word):
return word_vectors.get_word_vector(word)
|
def get_vector(word_vectors, word):
if (word in word_vectors.stoi):
return word_vectors[word].numpy()
else:
raise NotImplementedError
|
def parse_arg():
parser = argparse.ArgumentParser(description='word embeddign type')
parser.add_argument('--wv', type=str, default='glove', help='word embedding type: [glove, google, fasttext]')
parser.add_argument('--path', type=str, default='', help='path to pretrained word embedding model')
args = parser.parse_args()
return args
|
def test(image_file, fc, feat_dir):
(index_list, label_list) = ([], [])
with open(image_file) as fp:
for line in fp:
(index, l) = line.split()
index_list.append(index.split('.')[0])
label_list.append(int(l))
top_retrv = [1, 5]
hit_count = np.zeros(len(top_retrv))
cnt_valid = len(index_list)
for (i, index) in enumerate(index_list):
feat_path = os.path.join(feat_dir, (index + '.npz'))
feat = np.load(feat_path)['feat']
scores = np.matmul(feat, fc)
if ((i % 10000) == 0):
print(i, len(index_list))
ids = np.argsort((- scores))
for k2 in range(len(top_retrv)):
current_len = top_retrv[k2]
for sort_id in range(current_len):
lbl = ids[sort_id]
if (lbl == label_list[i]):
hit_count[k2] = (hit_count[k2] + 1)
break
hit_count = (hit_count / cnt_valid)
outstr = ''
for k in range(len(top_retrv)):
outstr = ((outstr + ' ') + str(hit_count[k]))
print(outstr)
print('total: %d', cnt_valid)
|
def get_imdb(name):
'Get an imdb (image database) by name.'
if (name not in __sets):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
|
def list_imdbs():
'List all registered imdbs.'
return list(__sets.keys())
|
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
" A wrapper function to generate anchors given different scales\n Also return the number of anchors in variable 'length'\n "
anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
A = anchors.shape[0]
shift_x = (np.arange(0, width) * feat_stride)
shift_y = (np.arange(0, height) * feat_stride)
(shift_x, shift_y) = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
K = shifts.shape[0]
anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
anchors = anchors.reshape(((K * A), 4)).astype(np.float32, copy=False)
length = np.int32(anchors.shape[0])
return (anchors, length)
|
def get_output_dir(imdb, weights_filename):
'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if (weights_filename is None):
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir
|
def get_output_tb_dir(imdb, weights_filename):
'Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if (weights_filename is None):
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir
|
def _merge_a_into_b(a, b):
'Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n '
if (type(a) is not edict):
return
for (k, v) in a.items():
if (k not in b):
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if (old_type is not type(v)):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if (type(v) is edict):
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
|
def cfg_from_file(filename):
'Load a config file and merge it into the default options.'
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
|
def cfg_from_list(cfg_list):
'Set config keys via list (e.g., from command line).'
from ast import literal_eval
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:(- 1)]:
assert (subkey in d)
d = d[subkey]
subkey = key_list[(- 1)]
assert (subkey in d)
try:
value = literal_eval(v)
except:
value = v
assert (type(value) == type(d[subkey])), 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
|
def scale_lr(optimizer, scale):
'Scale the learning rate of the optimizer'
for param_group in optimizer.param_groups:
param_group['lr'] *= scale
|
class SolverWrapper(object):
'\n A wrapper class for the training process\n '
def __init__(self, network, imdb, roidb, valroidb, output_dir, tbdir, pretrained_model=None):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.output_dir = output_dir
self.tbdir = tbdir
self.tbvaldir = (tbdir + '_val')
if (not os.path.exists(self.tbvaldir)):
os.makedirs(self.tbvaldir)
self.pretrained_model = pretrained_model
def snapshot(self, iter):
net = self.net
if (not os.path.exists(self.output_dir)):
os.makedirs(self.output_dir)
filename = ((cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter)) + '.pth')
filename = os.path.join(self.output_dir, filename)
torch.save(self.net.state_dict(), filename)
print('Wrote snapshot to: {:s}'.format(filename))
nfilename = ((cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter)) + '.pkl')
nfilename = os.path.join(self.output_dir, nfilename)
st0 = np.random.get_state()
cur = self.data_layer._cur
perm = self.data_layer._perm
cur_val = self.data_layer_val._cur
perm_val = self.data_layer_val._perm
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
return (filename, nfilename)
def from_snapshot(self, sfile, nfile):
print('Restoring model snapshots from {:s}'.format(sfile))
self.net.load_state_dict(torch.load(str(sfile)))
print('Restored.')
with open(nfile, 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
np.random.set_state(st0)
self.data_layer._cur = cur
self.data_layer._perm = perm
self.data_layer_val._cur = cur_val
self.data_layer_val._perm = perm_val
return last_snapshot_iter
def construct_graph(self):
torch.manual_seed(cfg.RNG_SEED)
self.net.create_architecture(self.imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS)
lr = cfg.TRAIN.LEARNING_RATE
params = []
for (key, value) in dict(self.net.named_parameters()).items():
if value.requires_grad:
if ('bias' in key):
params += [{'params': [value], 'lr': (lr * (cfg.TRAIN.DOUBLE_BIAS + 1)), 'weight_decay': ((cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY) or 0)}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': getattr(value, 'weight_decay', cfg.TRAIN.WEIGHT_DECAY)}]
self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
self.writer = tb.writer.FileWriter(self.tbdir)
self.valwriter = tb.writer.FileWriter(self.tbvaldir)
return (lr, self.optimizer)
def find_previous(self):
sfiles = os.path.join(self.output_dir, (cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pth'))
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
redfiles = []
for stepsize in cfg.TRAIN.STEPSIZE:
redfiles.append(os.path.join(self.output_dir, (cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}.pth'.format((stepsize + 1)))))
sfiles = [ss for ss in sfiles if (ss not in redfiles)]
nfiles = os.path.join(self.output_dir, (cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl'))
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
redfiles = [redfile.replace('.pth', '.pkl') for redfile in redfiles]
nfiles = [nn for nn in nfiles if (nn not in redfiles)]
lsf = len(sfiles)
assert (len(nfiles) == lsf)
return (lsf, nfiles, sfiles)
def initialize(self):
np_paths = []
ss_paths = []
print('Loading initial model weights from {:s}'.format(self.pretrained_model))
self.net.load_pretrained_cnn(torch.load(self.pretrained_model))
print('Loaded.')
last_snapshot_iter = 0
lr = cfg.TRAIN.LEARNING_RATE
stepsizes = list(cfg.TRAIN.STEPSIZE)
return (lr, last_snapshot_iter, stepsizes, np_paths, ss_paths)
def restore(self, sfile, nfile):
np_paths = [nfile]
ss_paths = [sfile]
last_snapshot_iter = self.from_snapshot(sfile, nfile)
lr_scale = 1
stepsizes = []
for stepsize in cfg.TRAIN.STEPSIZE:
if (last_snapshot_iter > stepsize):
lr_scale *= cfg.TRAIN.GAMMA
else:
stepsizes.append(stepsize)
scale_lr(self.optimizer, lr_scale)
lr = (cfg.TRAIN.LEARNING_RATE * lr_scale)
return (lr, last_snapshot_iter, stepsizes, np_paths, ss_paths)
def remove_snapshot(self, np_paths, ss_paths):
to_remove = (len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT)
for c in range(to_remove):
nfile = np_paths[0]
os.remove(str(nfile))
np_paths.remove(nfile)
to_remove = (len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT)
for c in range(to_remove):
sfile = ss_paths[0]
os.remove(str(sfile))
ss_paths.remove(sfile)
def train_model(self, max_iters):
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
(lr, train_op) = self.construct_graph()
(lsf, nfiles, sfiles) = self.find_previous()
if (lsf == 0):
(lr, last_snapshot_iter, stepsizes, np_paths, ss_paths) = self.initialize()
else:
(lr, last_snapshot_iter, stepsizes, np_paths, ss_paths) = self.restore(str(sfiles[(- 1)]), str(nfiles[(- 1)]))
iter = (last_snapshot_iter + 1)
last_summary_time = time.time()
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
self.net.train()
self.net.to(self.net._device)
while (iter < (max_iters + 1)):
if (iter == (next_stepsize + 1)):
self.snapshot(iter)
lr *= cfg.TRAIN.GAMMA
scale_lr(self.optimizer, cfg.TRAIN.GAMMA)
next_stepsize = stepsizes.pop()
utils.timer.timer.tic()
blobs = self.data_layer.forward()
now = time.time()
if ((iter == 1) or ((now - last_summary_time) > cfg.TRAIN.SUMMARY_INTERVAL)):
(rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss, summary) = self.net.train_step_with_summary(blobs, self.optimizer)
for _sum in summary:
self.writer.add_summary(_sum, float(iter))
blobs_val = self.data_layer_val.forward()
summary_val = self.net.get_summary(blobs_val)
for _sum in summary_val:
self.valwriter.add_summary(_sum, float(iter))
last_summary_time = now
else:
(rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss) = self.net.train_step(blobs, self.optimizer)
utils.timer.timer.toc()
if ((iter % cfg.TRAIN.DISPLAY) == 0):
print(('iter: %d / %d, total loss: %.6f\n >>> rpn_loss_cls: %.6f\n >>> rpn_loss_box: %.6f\n >>> loss_cls: %.6f\n >>> loss_box: %.6f\n >>> lr: %f' % (iter, max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, lr)))
print('speed: {:.3f}s / iter'.format(utils.timer.timer.average_time()))
if ((iter % cfg.TRAIN.SNAPSHOT_ITERS) == 0):
last_snapshot_iter = iter
(ss_path, np_path) = self.snapshot(iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
if (len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT):
self.remove_snapshot(np_paths, ss_paths)
iter += 1
if (last_snapshot_iter != (iter - 1)):
self.snapshot((iter - 1))
self.writer.close()
self.valwriter.close()
|
def get_training_roidb(imdb):
'Returns a roidb (Region of Interest database) for use in training.'
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
|
def filter_roidb(roidb):
'Remove roidb entries that have no usable RoIs.'
def is_valid(entry):
overlaps = entry['max_overlaps']
fg_inds = np.where((overlaps >= cfg.TRAIN.FG_THRESH))[0]
bg_inds = np.where(((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO)))[0]
valid = ((len(fg_inds) > 0) or (len(bg_inds) > 0))
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print('Filtered {} roidb entries: {} -> {}'.format((num - num_after), num, num_after))
return filtered_roidb
|
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir, pretrained_model=None, max_iters=40000):
'Train a Faster R-CNN network.'
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
sw = SolverWrapper(network, imdb, roidb, valroidb, output_dir, tb_dir, pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(max_iters)
print('done solving')
|
def mobilenet_v1_base(final_endpoint='Conv2d_13_pointwise', min_depth=8, depth_multiplier=1.0, conv_defs=None, output_stride=None):
"Mobilenet v1.\n\n Constructs a Mobilenet v1 network from inputs to the given final endpoint.\n\n Args:\n inputs: a tensor of shape [batch_size, height, width, channels].\n final_endpoint: specifies the endpoint to construct the network up to. It\n can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5_pointwise',\n 'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',\n 'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',\n 'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n conv_defs: A list of ConvDef namedtuples specifying the net architecture.\n output_stride: An integer that specifies the requested ratio of input to\n output spatial resolution. If not None, then we invoke atrous convolution\n if necessary to prevent the network from reducing the spatial resolution\n of the activation maps. Allowed values are 8 (accurate fully convolutional\n mode), 16 (fast fully convolutional mode), 32 (classification mode).\n scope: Optional variable_scope.\n\n Returns:\n tensor_out: output tensor corresponding to the final_endpoint.\n end_points: a set of activations for external use, for example summaries or\n losses.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values,\n or depth_multiplier <= 0, or the target output_stride is not\n allowed.\n "
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
end_points = OrderedDict()
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
if (conv_defs is None):
conv_defs = _CONV_DEFS
if ((output_stride is not None) and (output_stride not in [8, 16, 32])):
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
def conv_bn(in_channels, out_channels, kernel_size=3, stride=1):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, stride, ((kernel_size - 1) // 2), bias=False), nn.BatchNorm2d(out_channels), nn.ReLU6(inplace=True))
def conv_dw(in_channels, kernel_size=3, stride=1, dilation=1):
return nn.Sequential(nn.Conv2d(in_channels, in_channels, kernel_size, stride, ((kernel_size - 1) // 2), groups=in_channels, dilation=dilation, bias=False), nn.BatchNorm2d(in_channels), nn.ReLU6(inplace=True))
def conv_pw(in_channels, out_channels, kernel_size=3, stride=1, dilation=1):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, stride, 0, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU6(inplace=True))
current_stride = 1
rate = 1
in_channels = 3
for (i, conv_def) in enumerate(conv_defs):
end_point_base = ('Conv2d_%d' % i)
if ((output_stride is not None) and (current_stride == output_stride)):
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
out_channels = depth(conv_def.depth)
if isinstance(conv_def, Conv):
end_point = end_point_base
end_points[end_point] = conv_bn(in_channels, out_channels, conv_def.kernel, stride=conv_def.stride)
if (end_point == final_endpoint):
return nn.Sequential(end_points)
elif isinstance(conv_def, DepthSepConv):
end_points[end_point_base] = nn.Sequential(OrderedDict([('depthwise', conv_dw(in_channels, conv_def.kernel, stride=layer_stride, dilation=layer_rate)), ('pointwise', conv_pw(in_channels, out_channels, 1, stride=1))]))
if ((end_point_base + '_pointwise') == final_endpoint):
return nn.Sequential(end_points)
else:
raise ValueError(('Unknown convolution type %s for layer %d' % (conv_def.ltype, i)))
in_channels = out_channels
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
|
class mobilenetv1(Network):
def __init__(self):
Network.__init__(self)
self._feat_stride = [16]
self._feat_compress = [(1.0 / float(self._feat_stride[0]))]
self._depth_multiplier = cfg.MOBILENET.DEPTH_MULTIPLIER
self._net_conv_channels = 512
self._fc7_channels = 1024
def init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
'\n weight initalizer: truncated normal and random normal.\n '
if (m.__class__.__name__.find('Conv') == (- 1)):
return
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
if (m.bias is not None):
m.bias.data.zero_()
self.mobilenet.apply((lambda m: normal_init(m, 0, 0.09, True)))
normal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)
def _image_to_head(self):
net_conv = self._layers['head'](self._image)
self._act_summaries['conv'] = net_conv
return net_conv
def _head_to_tail(self, pool5):
fc7 = self._layers['tail'](pool5)
fc7 = fc7.mean(3).mean(2)
return fc7
def _init_head_tail(self):
self.mobilenet = mobilenet_v1_base()
assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
for m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:
for p in m.parameters():
p.requires_grad = False
def set_bn_fix(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
for p in m.parameters():
p.requires_grad = False
self.mobilenet.apply(set_bn_fix)
def l2_regularizer(m, wd, regu_depth):
if (m.__class__.__name__.find('Conv') != (- 1)):
if (regu_depth or (m.groups == 1)):
m.weight.weight_decay = wd
else:
m.weight.weight_decay = 0
self.mobilenet.apply((lambda x: l2_regularizer(x, cfg.MOBILENET.WEIGHT_DECAY, cfg.MOBILENET.REGU_DEPTH)))
self._layers['head'] = nn.Sequential(*list(self.mobilenet.children())[:12])
self._layers['tail'] = nn.Sequential(*list(self.mobilenet.children())[12:])
def train(self, mode=True):
nn.Module.train(self, mode)
if mode:
for m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:
m.eval()
def set_bn_eval(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
self.mobilenet.apply(set_bn_eval)
def load_pretrained_cnn(self, state_dict):
print('Warning: No available pretrained model yet')
self.mobilenet.load_state_dict({k: state_dict[('features.' + k)] for k in list(self.mobilenet.state_dict()) if (('features.' + k) in state_dict)})
|
class ResNet(torchvision.models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__(block, layers, num_classes)
for i in range(2, 4):
getattr(self, ('layer%d' % i))[0].conv1.stride = (2, 2)
getattr(self, ('layer%d' % i))[0].conv2.stride = (1, 1)
self.layer4[0].conv2.stride = (1, 1)
self.layer4[0].downsample[0].stride = (1, 1)
del self.avgpool, self.fc
|
def resnet18(pretrained=False):
'Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
|
def resnet34(pretrained=False):
'Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
|
def resnet50(pretrained=False):
'Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
|
def resnet101(pretrained=False):
'Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
def resnet152(pretrained=False):
'Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
class resnetv1(Network):
def __init__(self, num_layers=50):
Network.__init__(self)
self._feat_stride = [16]
self._feat_compress = [(1.0 / float(self._feat_stride[0]))]
self._num_layers = num_layers
self._net_conv_channels = 1024
self._fc7_channels = 2048
def _crop_pool_layer(self, bottom, rois):
return Network._crop_pool_layer(self, bottom, rois, cfg.RESNET.MAX_POOL)
def _image_to_head(self):
net_conv = self._layers['head'](self._image)
self._act_summaries['conv'] = net_conv
return net_conv
def _head_to_tail(self, pool5):
fc7 = self.resnet.layer4(pool5).mean(3).mean(2)
return fc7
def _init_head_tail(self):
if (self._num_layers == 50):
self.resnet = resnet50()
elif (self._num_layers == 101):
self.resnet = resnet101()
elif (self._num_layers == 152):
self.resnet = resnet152()
else:
raise NotImplementedError
for p in self.resnet.bn1.parameters():
p.requires_grad = False
for p in self.resnet.conv1.parameters():
p.requires_grad = False
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if (cfg.RESNET.FIXED_BLOCKS >= 3):
for p in self.resnet.layer3.parameters():
p.requires_grad = False
if (cfg.RESNET.FIXED_BLOCKS >= 2):
for p in self.resnet.layer2.parameters():
p.requires_grad = False
if (cfg.RESNET.FIXED_BLOCKS >= 1):
for p in self.resnet.layer1.parameters():
p.requires_grad = False
def set_bn_fix(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
for p in m.parameters():
p.requires_grad = False
self.resnet.apply(set_bn_fix)
self._layers['head'] = nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu, self.resnet.maxpool, self.resnet.layer1, self.resnet.layer2, self.resnet.layer3)
def train(self, mode=True):
nn.Module.train(self, mode)
if mode:
self.resnet.eval()
if (cfg.RESNET.FIXED_BLOCKS <= 3):
self.resnet.layer4.train()
if (cfg.RESNET.FIXED_BLOCKS <= 2):
self.resnet.layer3.train()
if (cfg.RESNET.FIXED_BLOCKS <= 1):
self.resnet.layer2.train()
if (cfg.RESNET.FIXED_BLOCKS == 0):
self.resnet.layer1.train()
def set_bn_eval(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
self.resnet.apply(set_bn_eval)
def load_pretrained_cnn(self, state_dict):
self.resnet.load_state_dict({k: v for (k, v) in state_dict.items() if (k in self.resnet.state_dict())})
|
class vgg16(Network):
def __init__(self):
Network.__init__(self)
self._feat_stride = [16]
self._feat_compress = [(1.0 / float(self._feat_stride[0]))]
self._net_conv_channels = 512
self._fc7_channels = 4096
def _init_head_tail(self):
self.vgg = models.vgg16()
self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier._modules.values())[:(- 1)])
for layer in range(10):
for p in self.vgg.features[layer].parameters():
p.requires_grad = False
self._layers['head'] = nn.Sequential(*list(self.vgg.features._modules.values())[:(- 1)])
def _image_to_head(self):
net_conv = self._layers['head'](self._image)
self._act_summaries['conv'] = net_conv
return net_conv
def _head_to_tail(self, pool5):
pool5_flat = pool5.view(pool5.size(0), (- 1))
fc7 = self.vgg.classifier(pool5_flat)
return fc7
def load_pretrained_cnn(self, state_dict):
self.vgg.load_state_dict({k: v for (k, v) in state_dict.items() if (k in self.vgg.state_dict())})
|
def prepare_roidb(imdb):
"Enrich the imdb's roidb by adding some derived quantities that\n are useful for training. This function precomputes the maximum\n overlap, taken over ground-truth boxes, between each ROI and\n each ground-truth box. The class with maximum overlap is also\n recorded.\n "
roidb = imdb.roidb
if (not imdb.name.startswith('coco')):
sizes = [PIL.Image.open(imdb.image_path_at(i)).size for i in range(imdb.num_images)]
for i in range(len(imdb.image_index)):
roidb[i]['image'] = imdb.image_path_at(i)
if (not imdb.name.startswith('coco')):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
zero_inds = np.where((max_overlaps == 0))[0]
assert all((max_classes[zero_inds] == 0))
nonzero_inds = np.where((max_overlaps > 0))[0]
assert all((max_classes[nonzero_inds] != 0))
|
class Timer(object):
'A simple timer.'
def __init__(self):
self._total_time = {}
self._calls = {}
self._start_time = {}
self._diff = {}
self._average_time = {}
def tic(self, name='default'):
if torch.cuda.is_available():
torch.cuda.synchronize()
self._start_time[name] = time.time()
def toc(self, name='default', average=True):
if torch.cuda.is_available():
torch.cuda.synchronize()
self._diff[name] = (time.time() - self._start_time[name])
self._total_time[name] = (self._total_time.get(name, 0.0) + self._diff[name])
self._calls[name] = (self._calls.get(name, 0) + 1)
self._average_time[name] = (self._total_time[name] / self._calls[name])
if average:
return self._average_time[name]
else:
return self._diff[name]
def average_time(self, name='default'):
return self._average_time[name]
def total_time(self, name='default'):
return self._total_time[name]
|
def add_path(path):
if (path not in sys.path):
sys.path.insert(0, path)
|
def parse_args():
'\n Parse input arguments\n '
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('output_dir', nargs=1, help='results directory', type=str)
parser.add_argument('--imdb', dest='imdb_name', help='dataset to re-evaluate', default='voc_2007_test', type=str)
parser.add_argument('--matlab', dest='matlab_eval', help='use matlab for evaluation', action='store_true')
parser.add_argument('--comp', dest='comp_mode', help='competition mode', action='store_true')
parser.add_argument('--nms', dest='apply_nms', help='apply nms', action='store_true')
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
|
def from_dets(imdb_name, output_dir, args):
imdb = get_imdb(imdb_name)
imdb.competition_mode(args.comp_mode)
imdb.config['matlab_eval'] = args.matlab_eval
with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
dets = pickle.load(f)
if args.apply_nms:
print('Applying NMS to all detections')
nms_dets = apply_nms(dets, cfg.TEST.NMS)
else:
nms_dets = dets
print('Evaluating detections')
imdb.evaluate_detections(nms_dets, output_dir)
|
def parse_args():
'\n Parse input arguments\n '
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str)
parser.add_argument('--model', dest='model', help='model to test', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode', action='store_true')
parser.add_argument('--num_dets', dest='max_per_image', help='max number of detections per image', default=100, type=int)
parser.add_argument('--tag', dest='tag', help='tag of the model', default='', type=str)
parser.add_argument('--net', dest='net', help='vgg16, res50, res101, res152, mobile', default='res50', type=str)
parser.add_argument('--set', dest='set_cfgs', help='set config keys', default=None, nargs=argparse.REMAINDER)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
|
def parse_args():
'\n Parse input arguments\n '
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str)
parser.add_argument('--weight', dest='weight', help='initialize with pretrained model weights', type=str)
parser.add_argument('--imdb', dest='imdb_name', help='dataset to train on', default='voc_2007_trainval', type=str)
parser.add_argument('--imdbval', dest='imdbval_name', help='dataset to validate on', default='voc_2007_test', type=str)
parser.add_argument('--iters', dest='max_iters', help='number of iterations to train', default=70000, type=int)
parser.add_argument('--tag', dest='tag', help='tag of the model', default=None, type=str)
parser.add_argument('--net', dest='net', help='vgg16, res50, res101, res152, mobile', default='res50', type=str)
parser.add_argument('--set', dest='set_cfgs', help='set config keys', default=None, nargs=argparse.REMAINDER)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
|
def combined_roidb(imdb_names):
'\n Combine multiple roidbs\n '
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if (len(roidbs) > 1):
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
return (imdb, roidb)
|
def complex_flatten(real, imag):
real = tf.keras.layers.Flatten()(real)
imag = tf.keras.layers.Flatten()(imag)
return (real, imag)
|
def CReLU(real, imag):
real = tf.keras.layers.ReLU()(real)
imag = tf.keras.layers.ReLU()(imag)
return (real, imag)
|
def zReLU(real, imag):
real = tf.keras.layers.ReLU()(real)
imag = tf.keras.layers.ReLU()(imag)
real_flag = tf.cast(tf.cast(real, tf.bool), tf.float32)
imag_flag = tf.cast(tf.cast(imag, tf.bool), tf.float32)
flag = (real_flag * imag_flag)
real = tf.math.multiply(real, flag)
imag = tf.math.multiply(imag, flag)
return (real, imag)
|
def modReLU(real, imag):
norm = tf.abs(tf.complex(real, imag))
bias = tf.Variable(np.zeros([norm.get_shape()[(- 1)]]), trainable=True, dtype=tf.float32)
relu = tf.nn.relu((norm + bias))
real = tf.math.multiply(((relu / norm) + 100000.0), real)
imag = tf.math.multiply(((relu / norm) + 100000.0), imag)
return (real, imag)
|
def CLeaky_ReLU(real, imag):
real = tf.nn.leaky_relu(real)
imag = tf.nn.leaky_relu(imag)
return (real, imag)
|
def complex_tanh(real, imag):
real = tf.nn.tanh(real)
imag = tf.nn.tanh(imag)
return (real, imag)
|
def complex_softmax(real, imag):
magnitude = tf.abs(tf.complex(real, imag))
magnitude = tf.keras.layers.Softmax()(magnitude)
return magnitude
|
def update_params(lr, epoch):
for p in net.parameters():
if (not hasattr(p, 'buf')):
p.buf = torch.zeros(p.size()).cuda(device_id)
d_p = p.grad.data
d_p.add_(weight_decay, p.data)
buf_new = (((1 - args.alpha) * p.buf) - (lr * d_p))
if (((epoch % 50) + 1) > 45):
eps = torch.randn(p.size()).cuda(device_id)
buf_new += ((((((2.0 * lr) * args.alpha) * args.temperature) / datasize) ** 0.5) * eps)
p.data.add_(buf_new)
p.buf = buf_new
|
def adjust_learning_rate(epoch, batch_idx):
rcounter = ((epoch * num_batch) + batch_idx)
cos_inner = (np.pi * (rcounter % (T // M)))
cos_inner /= (T // M)
cos_out = (np.cos(cos_inner) + 1)
lr = ((0.5 * cos_out) * lr_0)
return lr
|
def train(epoch):
print(('\nEpoch: %d' % epoch))
net.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
net.zero_grad()
lr = adjust_learning_rate(epoch, batch_idx)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
update_params(lr, epoch)
train_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((train_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
|
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)' % ((test_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format((test_loss / len(testloader)), correct, total, ((100.0 * correct.item()) / total)))
|
def noise_loss(lr, alpha):
noise_loss = 0.0
noise_std = (((2 / lr) * alpha) ** 0.5)
for var in net.parameters():
means = torch.zeros(var.size()).cuda(device_id)
noise_loss += torch.sum((var * torch.normal(means, std=noise_std).cuda(device_id)))
return noise_loss
|
def adjust_learning_rate(optimizer, epoch, batch_idx):
rcounter = ((epoch * num_batch) + batch_idx)
cos_inner = (np.pi * (rcounter % (T // M)))
cos_inner /= (T // M)
cos_out = (np.cos(cos_inner) + 1)
lr = ((0.5 * cos_out) * lr_0)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
|
def train(epoch):
print(('\nEpoch: %d' % epoch))
net.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
optimizer.zero_grad()
lr = adjust_learning_rate(optimizer, epoch, batch_idx)
outputs = net(inputs)
if (((epoch % 50) + 1) > 47):
loss_noise = (noise_loss(lr, args.alpha) * ((args.temperature / datasize) ** 0.5))
loss = (criterion(outputs, targets) + loss_noise)
else:
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((train_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
|
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)' % ((test_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format((test_loss / len(testloader)), correct, total, ((100.0 * correct.item()) / total)))
|
def get_accuracy(truth, pred):
assert (len(truth) == len(pred))
right = 0
for i in range(len(truth)):
if (truth[i] == pred[i]):
right += 1.0
return (right / len(truth))
|
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
pred_list = []
truth_res = []
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
truth_res += list(targets.data)
outputs = net(inputs)
pred_list.append(F.softmax(outputs, dim=1))
loss = criterion(outputs, targets)
test_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format((test_loss / len(testloader)), correct, total, ((100.0 * correct.item()) / total)))
pred_list = torch.cat(pred_list, 0)
return (pred_list, truth_res)
|
def update_params(lr, epoch):
for p in net.parameters():
if (not hasattr(p, 'buf')):
p.buf = torch.zeros(p.size()).cuda(device_id)
d_p = p.grad.data
d_p.add_(weight_decay, p.data)
buf_new = (((1 - args.alpha) * p.buf) - (lr * d_p))
if (((epoch % 50) + 1) > 45):
eps = torch.randn(p.size()).cuda(device_id)
buf_new += ((((((2.0 * lr) * args.alpha) * args.temperature) / datasize) ** 0.5) * eps)
p.data.add_(buf_new)
p.buf = buf_new
|
def adjust_learning_rate(epoch, batch_idx):
rcounter = ((epoch * num_batch) + batch_idx)
cos_inner = (np.pi * (rcounter % (T // M)))
cos_inner /= (T // M)
cos_out = (np.cos(cos_inner) + 1)
lr = ((0.5 * cos_out) * lr_0)
return lr
|
def train(epoch):
print(('\nEpoch: %d' % epoch))
net.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
net.zero_grad()
lr = adjust_learning_rate(epoch, batch_idx)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
update_params(lr, epoch)
train_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((train_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
|
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)' % ((test_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format((test_loss / len(testloader)), correct, total, ((100.0 * correct.item()) / total)))
|
def noise_loss(lr, alpha):
noise_loss = 0.0
noise_std = (((2 / lr) * alpha) ** 0.5)
for var in net.parameters():
means = torch.zeros(var.size()).cuda(device_id)
noise_loss += torch.sum((var * torch.normal(means, std=noise_std).cuda(device_id)))
return noise_loss
|
def adjust_learning_rate(optimizer, epoch, batch_idx):
rcounter = ((epoch * num_batch) + batch_idx)
cos_inner = (np.pi * (rcounter % (T // M)))
cos_inner /= (T // M)
cos_out = (np.cos(cos_inner) + 1)
lr = ((0.5 * cos_out) * lr_0)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
|
def train(epoch):
print(('\nEpoch: %d' % epoch))
net.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
optimizer.zero_grad()
lr = adjust_learning_rate(optimizer, epoch, batch_idx)
outputs = net(inputs)
if (((epoch % 50) + 1) > 45):
loss_noise = (noise_loss(lr, args.alpha) * ((args.temperature / datasize) ** 0.5))
loss = (criterion(outputs, targets) + loss_noise)
else:
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((train_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
|
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
if ((batch_idx % 100) == 0):
print(('Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)' % ((test_loss / (batch_idx + 1)), ((100.0 * correct.item()) / total), correct, total)))
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format((test_loss / len(testloader)), correct, total, ((100.0 * correct.item()) / total)))
|
def get_accuracy(truth, pred):
assert (len(truth) == len(pred))
right = 0
for i in range(len(truth)):
if (truth[i] == pred[i]):
right += 1.0
return (right / len(truth))
|
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
pred_list = []
truth_res = []
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
if use_cuda:
(inputs, targets) = (inputs.cuda(device_id), targets.cuda(device_id))
truth_res += list(targets.data)
outputs = net(inputs)
pred_list.append(F.softmax(outputs, dim=1))
loss = criterion(outputs, targets)
test_loss += loss.data.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format((test_loss / len(testloader)), correct, total, ((100.0 * correct.item()) / total)))
pred_list = torch.cat(pred_list, 0)
return (pred_list, truth_res)
|
def learning_rate(init, epoch):
optim_factor = 0
if (epoch > 160):
optim_factor = 3
elif (epoch > 120):
optim_factor = 2
elif (epoch > 60):
optim_factor = 1
return (init * math.pow(0.2, optim_factor))
|
def get_hms(seconds):
(m, s) = divmod(seconds, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
|
def plot_result(data, title, range_limit, point=True, step=1, alpha=1.0):
bbox = [range_limit[0], range_limit[1], range_limit[0], range_limit[1]]
df = pd.DataFrame(data)
fig = plt.figure(figsize=[5, 5])
g = sns.JointGrid(x=0, y=1, data=df, xlim=range_limit, ylim=range_limit)
g.plot_joint(sns.kdeplot, cmap='Blues', n_levels=50, kernel='biw', bw='silverman', ratio=100, stat_func=None, joint_kws={'shade_lowest': True})
if point:
g.plot_joint(plt.scatter, c='grey', s=5, linewidth=1, marker='o', alpha=alpha)
g.set_axis_labels('', '')
plt.yticks([])
plt.xticks([])
plt.savefig((title + '.pdf'), bbox_inches='tight')
plt.show()
|
def gmm(x):
for i in range(num_mixtures):
d = st.multivariate_normal(u_mean[i], [[std, 0.0], [0.0, std]])
if (i == 0):
ans = (d.pdf(x) / num_mixtures)
else:
ans += (d.pdf(x) / num_mixtures)
return ans
|
def evaluate_bivariate(range, npoints):
'Evaluate (possibly unnormalized) pdf over a meshgrid.'
side = np.linspace(range[0], range[1], npoints)
(z1, z2) = np.meshgrid(side, side)
zv = np.hstack([z1.reshape((- 1), 1), z2.reshape((- 1), 1)])
return (z1, z2, zv)
|
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, (4 * growth_rate), kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d((4 * growth_rate))
self.conv2 = nn.Conv2d((4 * growth_rate), growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out, x], 1)
return out
|
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
|
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = (2 * growth_rate)
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += (nblocks[0] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += (nblocks[1] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += (nblocks[2] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += (nblocks[3] * growth_rate)
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def DenseNet121():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32)
|
def DenseNet169():
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32)
|
def DenseNet201():
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32)
|
def DenseNet161():
return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48)
|
def densenet_cifar():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12)
|
def test_densenet():
net = densenet_cifar()
x = torch.randn(1, 3, 32, 32)
y = net(Variable(x))
print(y)
|
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.ReLU(True))
self.b2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1), nn.BatchNorm2d(n3x3red), nn.ReLU(True), nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), nn.BatchNorm2d(n3x3), nn.ReLU(True))
self.b3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1), nn.BatchNorm2d(n5x5red), nn.ReLU(True), nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ReLU(True), nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ReLU(True))
self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), nn.ReLU(True))
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1, y2, y3, y4], 1)
|
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True))
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.