text
stringlengths 1
93.6k
|
|---|
for y, m in enumerate(model.classifier.modules()):
|
if isinstance(m, nn.Linear):
|
m.weight.data.normal_(0, 0.01)
|
m.bias.data.fill_(0.1)
|
model.top_layer.bias.data.fill_(0.1)
|
if args.fc6_8:
|
# freeze some layers
|
for param in model.features.parameters():
|
param.requires_grad = False
|
# unfreeze batchnorm scaling
|
if args.train_batchnorm:
|
for layer in model.modules():
|
if isinstance(layer, torch.nn.BatchNorm2d):
|
for param in layer.parameters():
|
param.requires_grad = True
|
# set optimizer
|
optimizer = torch.optim.SGD(
|
filter(lambda x: x.requires_grad, model.parameters()),
|
lr=args.lr,
|
momentum=0.9,
|
weight_decay=args.wd,
|
)
|
criterion = nn.BCEWithLogitsLoss(reduction='none')
|
print('Start training')
|
it = 0
|
losses = AverageMeter()
|
while it < args.nit:
|
it = train(
|
loader,
|
model,
|
optimizer,
|
criterion,
|
args.fc6_8,
|
losses,
|
it=it,
|
total_iterations=args.nit,
|
stepsize=args.stepsize,
|
)
|
print('Evaluation')
|
if args.eval_random_crops:
|
transform_eval = [
|
transforms.RandomHorizontalFlip(),
|
transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)),
|
transforms.ToTensor(),
|
normalize,
|
]
|
else:
|
transform_eval = [
|
transforms.Resize(256),
|
transforms.TenCrop(224),
|
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))
|
]
|
print('Train set')
|
train_dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose(transform_eval))
|
train_loader = torch.utils.data.DataLoader(
|
train_dataset,
|
batch_size=1,
|
shuffle=False,
|
num_workers=24,
|
pin_memory=True,
|
)
|
evaluate(train_loader, model, args.eval_random_crops)
|
print('Test set')
|
test_dataset = VOC2007_dataset(args.vocdir, split=args.test, transform=transforms.Compose(transform_eval))
|
test_loader = torch.utils.data.DataLoader(
|
test_dataset,
|
batch_size=1,
|
shuffle=False,
|
num_workers=24,
|
pin_memory=True,
|
)
|
evaluate(test_loader, model, args.eval_random_crops)
|
def evaluate(loader, model, eval_random_crops):
|
model.eval()
|
gts = []
|
scr = []
|
for crop in range(9 * eval_random_crops + 1):
|
for i, (input, target) in enumerate(loader):
|
# move input to gpu and optionally reshape it
|
if len(input.size()) == 5:
|
bs, ncrops, c, h, w = input.size()
|
input = input.view(-1, c, h, w)
|
input = input.cuda(non_blocking=True)
|
# forward pass without grad computation
|
with torch.no_grad():
|
output = model(input)
|
if crop < 1 :
|
scr.append(torch.sum(output, 0, keepdim=True).cpu().numpy())
|
gts.append(target)
|
else:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.