text
stringlengths
1
93.6k
def scatterplot(x: np.array, y: np.array, filename: str, xlabel: str, ylabel: str, xlim: Optional[Tuple[float, float]]=None, ylim: Optional[Tuple[float, float]]=None, calibration_line: bool=False):
sns.scatterplot(x=x, y=y, color='coral')
if calibration_line: sns.lineplot(x=np.arange(xlim[0], xlim[1]), y=np.arange(ylim[0], ylim[1]), color='gray', linestyle='--')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
plt.xlim(*xlim) if xlim is not None else plt.margins(x=0)
plt.ylim(*ylim) if ylim is not None else plt.margins(y=0)
plt.tight_layout()
plt.savefig(f'results/{filename}.png')
plt.close()
# <FILESEP>
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import math
import time
import glob
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from sklearn import metrics
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from util import AverageMeter, load_model
from eval_linear import accuracy
parser = argparse.ArgumentParser()
parser.add_argument('--vocdir', type=str, required=False, default='', help='pascal voc 2007 dataset')
parser.add_argument('--split', type=str, required=False, default='train', choices=['train', 'trainval'], help='training split')
parser.add_argument('--model', type=str, required=False, default='',
help='evaluate this model')
parser.add_argument('--nit', type=int, default=80000, help='Number of training iterations')
parser.add_argument('--fc6_8', type=int, default=1, help='If true, train only the final classifier')
parser.add_argument('--train_batchnorm', type=int, default=0, help='If true, train batch-norm layer parameters')
parser.add_argument('--eval_random_crops', type=int, default=1, help='If true, eval on 10 random crops, otherwise eval on 10 fixed crops')
parser.add_argument('--stepsize', type=int, default=5000, help='Decay step')
parser.add_argument('--lr', type=float, required=False, default=0.003, help='learning rate')
parser.add_argument('--wd', type=float, required=False, default=1e-6, help='weight decay')
parser.add_argument('--min_scale', type=float, required=False, default=0.1, help='scale')
parser.add_argument('--max_scale', type=float, required=False, default=0.5, help='scale')
parser.add_argument('--seed', type=int, default=31, help='random seed')
def main():
args = parser.parse_args()
print(args)
# fix random seeds
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# create model and move it to gpu
model = load_model(args.model)
model.top_layer = nn.Linear(model.top_layer.weight.size(1), 20)
model.cuda()
cudnn.benchmark = True
# what partition of the data to use
if args.split == 'train':
args.test = 'val'
elif args.split == 'trainval':
args.test = 'test'
# data loader
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)),
transforms.ToTensor(),
normalize,
]))
loader = torch.utils.data.DataLoader(dataset,
batch_size=16, shuffle=False,
num_workers=24, pin_memory=True)
print('PASCAL VOC 2007 ' + args.split + ' dataset loaded')
# re initialize classifier