code stringlengths 17 6.64M |
|---|
def make_dataset(directory: str, class_to_idx: Optional[Dict[(str, int)]]=None, extensions: Optional[Tuple[(str, ...)]]=None, is_valid_file: Optional[Callable[([str], bool)]]=None, class_num=10, target_list=[]) -> List[Tuple[(str, int)]]:
'Generates a list of samples of a form (path_to_sample, class).\n\n See ... |
class DatasetFolder(VisionDataset):
'A generic data loader.\n\n This default directory structure can be customized by overriding the\n :meth:`find_classes` method.\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n exten... |
def pil_loader(path: str) -> Image.Image:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
|
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
return pil_loader(path)
|
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if (get_image_backend() == 'accimage'):
return accimage_loader(path)
else:
return pil_loader(path)
|
class ImageFolder(DatasetFolder):
'A generic data loader where the images are arranged in this way by default: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/[...]/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/[...]/asd932_.png\n\n This class inhe... |
def fix_random_seeds(seed=31):
'\n Fix random seeds.\n '
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
|
def get_logger(file_path_name):
logger = logging.getLogger()
logger.setLevel('INFO')
BASIC_FORMAT = '%(levelname)s:%(message)s'
DATE_FORMAT = ''
formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT)
chlr = logging.StreamHandler()
chlr.setFormatter(formatter)
chlr.setLevel('INFO')
... |
def bool_flag(s):
'\n Parse boolean arguments from the command line.\n '
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
if (s.lower() in FALSY_STRINGS):
return False
elif (s.lower() in TRUTHY_STRINGS):
return True
else:
raise argparse.A... |
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_p... |
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], ... |
def init_distributed_mode2(args):
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ.get('LOCAL_RANK', 0))
print('args.rank', args.rank, 'args.world_size', args.... |
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=wi... |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor... |
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
'\n Re-start from checkpoint\n '
if (not os.path.isfile(ckp_path)):
return
print('Found checkpoint at {}'.format(ckp_path))
checkpoint = torch.load(ckp_path, map_location='cpu')
for (key, value) in kwargs.items():
... |
def _load_checkpoint_for_ema(model_ema, checkpoint):
'\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n '
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
|
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location='cpu')
if ((checkpoint_key is not None) and (checkpoint_key in state_dict)):
print(f'Take key ... |
def load_state_dict(model, state_dict, prefix='', ignore_missing='relative_position_index'):
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadat... |
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=(- 1)):
warmup_schedule = np.array([])
warmup_iters = (warmup_epochs * niter_per_ep)
if (warmup_steps > 0):
warmup_iters = warmup_steps
print(('Set warmup steps = %d' % warmu... |
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.resume:
resume = os.path.join(output_dir, 'checkpoint.pth')
if os.path.exists(resume):
checkpoint = torch.load(resume, map_location='cpu')
... |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None, is_best=False):
output_dir = Path(args.output_dir)
if (is_best == True):
checkpoint_paths = [(output_dir / 'checkpoint-best.pth'), (output_dir / 'checkpoint.pth')]
else:
checkpoint_paths = [(outpu... |
def trunc_normal_(tensor, mean=0.0, std=1.0, a=(- 2.0), b=2.0):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be... |
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None, category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
path_json = os.path.join(root, f... |
class CarsDataset(ImageFolder):
def __init__(self, root, train=True, transform=None, target_transform=None, loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
data = scio.loadmat(os.path.join(root, f'cars_annos.mat'))['... |
class FlwrsDataset(ImageFolder):
def __init__(self, root, train=True, transform=None, target_transform=None, loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
data = np.array(sorted(os.listdir(os.path.join(root, 'jpg')... |
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if (args.data_set == 'CIFAR'):
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif (args.data_set == 'CIFAR10'):
dataset = datasets.CIFAR... |
def build_transform(is_train, args):
resize_im = (args.input_size > 32)
if is_train:
transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_cou... |
class RASampler(torch.utils.data.Sampler):
'Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.Di... |
def fix_random_seeds(seed=31):
'\n Fix random seeds.\n '
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
|
def get_logger(file_path_name):
logger = logging.getLogger()
logger.setLevel('INFO')
BASIC_FORMAT = '%(levelname)s:%(message)s'
DATE_FORMAT = ''
formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT)
chlr = logging.StreamHandler()
chlr.setFormatter(formatter)
chlr.setLevel('INFO')
... |
def bool_flag(s):
'\n Parse boolean arguments from the command line.\n '
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
if (s.lower() in FALSY_STRINGS):
return False
elif (s.lower() in TRUTHY_STRINGS):
return True
else:
raise argparse.A... |
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_p... |
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], ... |
def init_distributed_mode2(args):
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ.get('LOCAL_RANK', 0))
print('args.rank', args.rank, 'args.world_size', args.... |
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=wi... |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor... |
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
'\n Re-start from checkpoint\n '
if (not os.path.isfile(ckp_path)):
return
print('Found checkpoint at {}'.format(ckp_path))
checkpoint = torch.load(ckp_path, map_location='cpu')
for (key, value) in kwargs.items():
... |
def _load_checkpoint_for_ema(model_ema, checkpoint):
'\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n '
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
|
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location='cpu')
if ((checkpoint_key is not None) and (checkpoint_key in state_dict)):
print(f'Take key ... |
def load_state_dict(model, state_dict, prefix='', ignore_missing='relative_position_index'):
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadat... |
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=(- 1)):
warmup_schedule = np.array([])
warmup_iters = (warmup_epochs * niter_per_ep)
if (warmup_steps > 0):
warmup_iters = warmup_steps
print(('Set warmup steps = %d' % warmu... |
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.resume:
resume = os.path.join(output_dir, 'checkpoint.pth')
if os.path.exists(resume):
checkpoint = torch.load(resume, map_location='cpu')
... |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None, is_best=False):
output_dir = Path(args.output_dir)
if (is_best == True):
checkpoint_paths = [(output_dir / 'checkpoint-best.pth'), (output_dir / 'checkpoint.pth')]
else:
checkpoint_paths = [(outpu... |
def trunc_normal_(tensor, mean=0.0, std=1.0, a=(- 2.0), b=2.0):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be... |
def get_args_parser():
parser = argparse.ArgumentParser('Mugs', add_help=False)
parser.add_argument('--arch', type=str, default='vit_small', choices=['vit_small', 'vit_base', 'vit_large'], help='Name of architecture to train.')
parser.add_argument('--patch_size', type=int, default=16, help='Size in pixels... |
def train_mugs(args):
'\n main training code for Mugs, including building dataloader, models, losses, optimizers, etc\n '
logger = utils.get_logger((args.output_dir + '/train.log'))
logger.info(args)
if (args.output_dir and utils.is_main_process()):
with (Path(args.output_dir) / 'log.txt... |
def train_one_epoch(student, teacher, teacher_without_ddp, all_losses, all_weights, data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule, epoch, fp16_scaler, student_mem, teacher_mem, logger, args):
'\n main training code for each epoch\n '
metric_logger = utils.MetricLogger(delimiter=' ... |
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
|
def _check_args_tf(kwargs):
if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
|
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
|
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
|
def translate_x_rel(img, pct, **kwargs):
pixels = (pct * img.size[0])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_rel(img, pct, **kwargs):
pixels = (pct * img.size[1])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if (_PIL_VER >= (5, 2)):
return img.rotate(degrees, **kwargs)
elif (_PIL_VER >= (5, 0)):
(w, h) = img.size
post_trans = (0, 0)
rotn_center = ((w / 2.0), (h / 2.0))
angle = (- math.radians(degrees))
m... |
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
|
def invert(img, **__):
return ImageOps.invert(img)
|
def equalize(img, **__):
return ImageOps.equalize(img)
|
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
|
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if (i < thresh):
lut.append(min(255, (i + add)))
else:
lut.append(i)
if (img.mode in ('L', 'RGB')):
if ((img.mode == 'RGB') and (len(lut) == 256)):
lut = ((lut + lut) + l... |
def posterize(img, bits_to_keep, **__):
if (bits_to_keep >= 8):
return img
return ImageOps.posterize(img, bits_to_keep)
|
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
|
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
|
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
|
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
|
def _randomly_negate(v):
'With 50% prob, negate the value'
return ((- v) if (random.random() > 0.5) else v)
|
def _rotate_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 30.0)
level = _randomly_negate(level)
return (level,)
|
def _enhance_level_to_arg(level, _hparams):
return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
|
def _enhance_increasing_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.9)
level = (1.0 + _randomly_negate(level))
return (level,)
|
def _shear_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.3)
level = _randomly_negate(level)
return (level,)
|
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams['translate_const']
level = ((level / _MAX_LEVEL) * float(translate_const))
level = _randomly_negate(level)
return (level,)
|
def _translate_rel_level_to_arg(level, hparams):
translate_pct = hparams.get('translate_pct', 0.45)
level = ((level / _MAX_LEVEL) * translate_pct)
level = _randomly_negate(level)
return (level,)
|
def _posterize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 4)),)
|
def _posterize_increasing_level_to_arg(level, hparams):
return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
|
def _posterize_original_level_to_arg(level, _hparams):
return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
|
def _solarize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 256)),)
|
def _solarize_increasing_level_to_arg(level, _hparams):
return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
|
def _solarize_add_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 110)),)
|
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = (transforms or _RAND_TRANSFORMS)
assert (weight_idx == 0)
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
|
class AugmentOp():
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = (hparams or _HPARAMS_DEFAULT)
self.name = name
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hp... |
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = (hparams or _HPARAMS_DEFAULT)
transforms = (transforms or _RAND_TRANSFORMS)
return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
|
class RandAugment():
'\n Apply RandAug on image\n '
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
ops = np.random.choice(self.ops, self.num_layers... |
def rand_augment_transform(config_str, hparams):
"\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand').... |
def has_file_allowed_extension(filename: str, extensions: Tuple[(str, ...)]) -> bool:
'Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename en... |
def is_image_file(filename: str) -> bool:
'Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n '
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
|
def find_classes(directory: str, class_num: int) -> Tuple[(List[str], Dict[(str, int)])]:
'Finds the class folders in a dataset.\n\n See :class:`DatasetFolder` for details.\n '
classes = sorted((entry.name for entry in os.scandir(directory) if entry.is_dir()))
if (not classes):
raise FileNot... |
def make_dataset(directory: str, class_to_idx: Optional[Dict[(str, int)]]=None, extensions: Optional[Tuple[(str, ...)]]=None, is_valid_file: Optional[Callable[([str], bool)]]=None, class_num=10) -> List[Tuple[(str, int)]]:
'Generates a list of samples of a form (path_to_sample, class).\n\n See :class:`DatasetF... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.