Spaces:
Running
Running
File size: 6,065 Bytes
9c4b1c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import argparse
import os
import time
import util
import torch
#import models
#import data
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--mode', default='binary')
parser.add_argument('--arch', type=str, default='res50', help='architecture for binary classification')
parser.add_argument("--task", type=str, help="Task: train/test")
# data augmentation
parser.add_argument('--rz_interp', default='bilinear')
parser.add_argument('--blur_prob', type=float, default=0)
parser.add_argument('--blur_sig', default='0.5')
parser.add_argument('--jpg_prob', type=float, default=0)
parser.add_argument('--jpg_method', default='cv2')
parser.add_argument('--jpg_qual', default='75')
# parser.add_argument('--dataroot', default='./dataset/', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
# parser.add_argument('--classes', default='', help='image classes to train on')
parser.add_argument("--split_file", type=str, help="Path to split json")
parser.add_argument("--data_root", type=str, help="Path to dataset")
parser.add_argument("--data_keys", type=str, help="Dataset specifications")
# parser.add_argument('--class_bal', action='store_true')
parser.add_argument('--batch_size', type=int, default=64, help='input batch size')
parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
parser.add_argument('--cropSize', type=int, default=224, help='then crop to this size')
# parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--device', type=str, default='cpu', help='')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
# parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--num_threads', default=8, type=int, help='# threads for loading data')
# parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--resize_or_crop', type=str, default='scale_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
parser.add_argument('--delr_freq', type=int, default=10, help='frequency of changing lr')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
self.parser = parser
return opt #parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
# expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
expr_dir = os.path.join('checkpoint', opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self, print_options=True):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
#opt.name = opt.name + time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
if print_options:
self.print_options(opt)
# set gpu ids
# str_ids = opt.gpu_ids.split(',')
# opt.gpu_ids = []
# for str_id in str_ids:
# id = int(str_id)
# if id >= 0:
# opt.gpu_ids.append(id)
# if len(opt.gpu_ids) > 0:
# torch.cuda.set_device(opt.gpu_ids[0])
# additional
# opt.classes = opt.classes.split(',')
opt.rz_interp = opt.rz_interp.split(',')
opt.blur_sig = [float(s) for s in opt.blur_sig.split(',')]
opt.jpg_method = opt.jpg_method.split(',')
opt.jpg_qual = [int(s) for s in opt.jpg_qual.split(',')]
if len(opt.jpg_qual) == 2:
opt.jpg_qual = list(range(opt.jpg_qual[0], opt.jpg_qual[1] + 1))
elif len(opt.jpg_qual) > 2:
raise ValueError("Shouldn't have more than 2 values for --jpg_qual.")
self.opt = opt
return self.opt
|