code
stringlengths
17
6.64M
class FeatureStandartization(): 'Normalize features of provided sample with given stats' def __init__(self, shift, scale): self.shift = torch.Tensor(shift) self.scale = torch.Tensor(scale) def __call__(self, sample): updated_sample = {} for (key, value) in sample.items():...
class GTtandartization(): 'Normalize features of provided sample with given stats\n * Supports multimodal gt represented as dictionary\n * For dictionary gts, only those values are updated for which the stats are provided\n ' def __init__(self, shift, scale): 'If ground truth is a di...
def crop(image, region): cropped_image = F.crop(image, *region) return cropped_image
def resize(image, size, max_size=None): def get_size_with_aspect_ratio(image_size, size, max_size=None): (w, h) = image_size if (max_size is not None): min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if (((max_original_size / min_o...
class RandomCrop(object): def __init__(self, size): self.size = size def __call__(self, img): region = T.RandomCrop.get_params(img, self.size) return crop(img, region)
class RandomSizeCrop(object): def __init__(self, min_size: int, max_size: int): self.min_size = min_size self.max_size = max_size def __call__(self, img: PIL.Image.Image): w = random.randint(self.min_size, min(img.width, self.max_size)) h = random.randint(self.min_size, min(i...
class CenterCrop(object): def __init__(self, size): self.size = size def __call__(self, img): (image_width, image_height) = img.size (crop_height, crop_width) = self.size crop_top = int(round(((image_height - crop_height) / 2.0))) crop_left = int(round(((image_width -...
class Normalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, img): image = F.normalize(image, mean=self.mean, std=self.std) return image
class Compose(object): def __init__(self, transforms): self.transforms = transforms def __call__(self, image): for t in self.transforms: image = t(image) return image def __repr__(self): format_string = (self.__class__.__name__ + '(') for t in self.tr...
def tv_make_color_img_transforms(): return T.Compose([T.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 5)), T.ColorJitter(brightness=0.5, hue=0.3)])
def tv_make_geo_img_transforms(color=0): print(color) return T.RandomApply(transforms=[T.RandomPerspective(distortion_scale=0.2, p=0.8, fill=color), T.RandomRotation(degrees=(0, 45), fill=color), T.RandomAffine(degrees=(0, 0), translate=(0.2, 0.1), scale=(0.75, 1), fill=color), T.RandomPosterize(bits=2)])
def tv_make_img_transforms(): return T.Compose([T.CenterCrop(400), T.Resize(384), T.ToTensor()])
def denormalize_img_transforms(mean=torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32), std=torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32)): return T.Compose([T.ToPILImage()])
class ExperimentWrappper(object): 'Class provides \n * a convenient way to store & load experiment info with integration to wandb \n * some functions & params shortcuts to access wandb functionality\n * for implemented functions, transparent workflow for finished and active (initialized) run ...
def eval_detr_metrics(model, criterion, data_warpper, rank=0, section='test'): device = ('cuda:{}'.format(rank) if torch.cuda.is_available() else 'cpu') model.to(device) model.eval() criterion.to(device) criterion.eval() criterion.with_quality_eval() data_warpper.dataset.set_training(False...
def _eval_detr_metrics_per_loader(model, criterion, loader, device): current_metrics = dict.fromkeys(['full_loss'], []) counter = 0 loader_iter = iter(loader) start_time = time.time() score_dict = {} collect_keys = ['st_f1s'] best_score_dict = {} while True: try: ba...
def eval_pad_vector(data_stats={}): if data_stats: shift = torch.Tensor(data_stats['shift']) scale = torch.Tensor(data_stats['scale']) return ((- shift) / scale) else: return None
def build_model(args): if (args['NN']['model'] == 'GarmentBackbone'): return build_backbone(args) else: return build_former(args)
class Transformer(nn.Module): def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False): super().__init__() encoder_layer = TransformerEncoderLayer(d_model, nhea...
class TransformerEncoder(nn.Module): def __init__(self, encoder_layer, num_layers, norm=None): super().__init__() self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers self.norm = norm def forward(self, src, mask: Optional[Tensor]=None, src_key_pad...
class TransformerDecoder(nn.Module): def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): super().__init__() self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.norm = norm self.return_intermediate = return...
class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feed...
class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.multihead_attn = nn.MultiheadAttentio...
def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args): return Transformer(d_model=args['NN']['hidden_dim'], dropout=args['NN']['dropout'], nhead=args['NN']['nheads'], dim_feedforward=args['NN']['dim_feedforward'], num_encoder_layers=args['NN']['enc_layers'], num_decoder_layers=args['NN']['dec_layers'], normalize_before=args['NN']['pre_nor...
def _get_activation_fn(activation): 'Return an activation function given a string' if (activation == 'relu'): return F.relu if (activation == 'gelu'): return F.gelu if (activation == 'glu'): return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}....
def get_values_from_args(): 'command line arguments to control the run for running wandb Sweeps!' parser = argparse.ArgumentParser() parser.add_argument('--config', '-c', help='YAML configuration file', type=str, default='./models/att/att.yaml') parser.add_argument('--test-only', '-t', action='store_t...
class Trainer(): def __init__(self, setup, experiment_tracker, dataset=None, data_split={}, with_norm=True, with_visualization=False): 'Initialize training and dataset split (if given)\n * with_visualization toggles image prediction logging to wandb board. Only works on custom garment datasets...
class TrainerDetr(Trainer): def __init__(self, setup, experiment_tracker, dataset=None, data_split={}, with_norm=True, with_visualization=False): super().__init__(setup, experiment_tracker, dataset=dataset, data_split=data_split, with_norm=with_norm, with_visualization=with_visualization) self.de...
class GradualWarmupScheduler(_LRScheduler): def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None): self.multiplier = multiplier self.total_epoch = total_epoch self.after_scheduler = after_scheduler self.finished = False super().__init__(optimizer) ...
def _parse_args(): (args_config, remaining) = config_parser.parse_known_args() if args_config.config: with open(args_config.config, 'r') as f: cfg = yaml.safe_load(f) parser.set_defaults(**cfg) args = parser.parse_args(remaining) args_text = yaml.safe_dump(args.__dict__...
def main(): setup_default_logging() (args, args_text) = _parse_args() args.prefetcher = (not args.no_prefetcher) args.distributed = False if ('WORLD_SIZE' in os.environ): args.distributed = (int(os.environ['WORLD_SIZE']) > 1) args.device = 'cuda:0' args.world_size = 1 args.rank...
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None, optimizers=None): if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)): if (args.prefetcher and loader.mixup_en...
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''): batch_time_m = AverageMeter() losses_m = AverageMeter() top1_m = AverageMeter() top5_m = AverageMeter() model.eval() end = time.time() last_idx = (len(loader) - 1) with torch.no_grad(): for (batch...
class Decoder(): def __init__(self, labels, lm_path=None, alpha=1, beta=1.5, cutoff_top_n=40, cutoff_prob=0.99, beam_width=200, num_processes=24, blank_id=0): self.vocab_list = (['_'] + labels) self._decoder = CTCBeamDecoder((['_@'] + labels[1:]), lm_path, alpha, beta, cutoff_top_n, cutoff_prob, ...
class LipNet(nn.Module): def __init__(self, opt, vocab_size): super(LipNet, self).__init__() self.opt = opt self.conv = nn.Sequential(nn.Conv3d(3, 32, kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2)), nn.ReLU(True), nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)), nn.Drop...
class Exp(): def __init__(self, opt): self.trainset = GRIDDataset(opt, dset='train') self.trainset.load_data() self.testset = GRIDDataset(opt, dset='test') self.testset.load_data() self.trainloader = DataLoader(self.trainset, batch_size=opt.batch_size, shuffle=True, num_wo...
def train_class_batch(model, samples, target, criterion): outputs = model(samples) loss = criterion(outputs, target) return (loss, outputs)
def get_loss_scale_for_deepspeed(model): optimizer = model.optimizer return (optimizer.loss_scale if hasattr(optimizer, 'loss_scale') else optimizer.cur_scale)
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, log_writer=None, start_steps=None, lr_schedule_values=N...
@torch.no_grad() def evaluate(data_loader, model, device): criterion = torch.nn.CrossEntropyLoss() metric_logger = utils_for_finetuning.MetricLogger(delimiter=' ') header = 'Test:' model.eval() for batch in metric_logger.log_every(data_loader, 10, header): images = batch[0] target...
def parse_args(): parser = argparse.ArgumentParser(description='This script extracts backbone weights from a checkpoint') parser.add_argument('--checkpoint', help='checkpoint file') parser.add_argument('--output', type=str, help='destination file name') parser.add_argument('--checkpoint_key', default=...
def main(): args = parse_args() assert args.output.endswith('.pth') ck = torch.load(args.checkpoint, map_location=torch.device('cpu')) output_dict = dict() has_backbone = False for (key, value) in ck[args.checkpoint_key].items(): if key.startswith('backbone'): output_dict[k...
def get_num_layer_for_vit(var_name, num_max_layer): if (var_name in ('cls_token', 'mask_token', 'pos_embed')): return 0 elif var_name.startswith('patch_embed'): return 0 elif var_name.startswith('rel_pos_bias'): return (num_max_layer - 1) elif var_name.startswith('blocks'): ...
def get_num_layer_for_swin(var_name, num_max_layer, depths): if (var_name in ('cls_token', 'mask_token', 'pos_embed')): return 0 elif var_name.startswith('patch_embed'): return 0 elif var_name.startswith('rel_pos_bias'): return (num_max_layer - 1) elif var_name.startswith('laye...
class LayerDecayValueAssigner(object): def __init__(self, values, is_swin=False, depths=None): self.values = values self.is_swin = is_swin self.depths = depths def get_scale(self, layer_id): return self.values[layer_id] def get_layer_id(self, var_name): return (g...
def get_parameter_groups(model, weight_decay=1e-05, skip_list=(), get_num_layer=None, get_layer_scale=None): parameter_group_names = {} parameter_group_vars = {} for (name, param) in model.named_parameters(): if (not param.requires_grad): continue if ((len(param.shape) == 1) or...
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None): opt_lower = args.opt.lower() weight_decay = args.weight_decay if (weight_decay and filter_bias_and_bn): skip = {} if (skip_list is not None): skip = skip_list ...
def fix_random_seeds(seed=31): '\n Fix random seeds.\n ' torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed)
def create_ds_config(args): args.deepspeed_config = os.path.join(args.output_dir, 'deepspeed_config.json') with open(args.deepspeed_config, mode='w') as writer: ds_config = {'train_batch_size': ((args.batch_size * args.update_freq) * get_world_size()), 'train_micro_batch_size_per_gpu': args.batch_size...
def bool_flag(s): '\n Parse boolean arguments from the command line.\n ' FALSY_STRINGS = {'off', 'false', '0'} TRUTHY_STRINGS = {'on', 'true', '1'} if (s.lower() in FALSY_STRINGS): return False elif (s.lower() in TRUTHY_STRINGS): return True else: raise argparse.A...
def is_dist_avail_and_initialized(): if (not dist.is_available()): return False if (not dist.is_initialized()): return False return True
def get_world_size(): if (not is_dist_avail_and_initialized()): return 1 return dist.get_world_size()
def get_rank(): if (not is_dist_avail_and_initialized()): return 0 return dist.get_rank()
def is_main_process(): return (get_rank() == 0)
def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs)
def setup_for_distributed(is_master): '\n This function disables printing when not in master process\n ' import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if (is_master or force): builtin_p...
def init_distributed_mode(args): if args.dist_on_itp: args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], ...
class TensorboardLogger(object): def __init__(self, log_dir): self.writer = SummaryWriter(logdir=log_dir) self.step = 0 def set_step(self, step=None): if (step is not None): self.step = step else: self.step += 1 def update(self, head='scalar', ste...
def load_state_dict(model, state_dict, prefix='', ignore_missing='relative_position_index'): missing_keys = [] unexpected_keys = [] error_msgs = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if (metadata is not None): state_dict._metadata = metadat...
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=(- 1)): warmup_schedule = np.array([]) warmup_iters = (warmup_epochs * niter_per_ep) if (warmup_steps > 0): warmup_iters = warmup_steps print(('Set warmup steps = %d' % warmu...
def _load_checkpoint_for_ema(model_ema, checkpoint): '\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n ' mem_file = io.BytesIO() torch.save(checkpoint, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) if args.resume: resume = os.path.join(output_dir, 'checkpoint.pth') if os.path.exists(resume): checkpoint = torch.load(resume, map_location='cpu') ...
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None, is_best=False): output_dir = Path(args.output_dir) if (is_best == True): checkpoint_paths = [(output_dir / 'checkpoint-best.pth'), (output_dir / 'checkpoint.pth')] else: checkpoint_paths = [(outpu...
class SmoothedValue(object): 'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n ' def __init__(self, window_size=20, fmt=None): if (fmt is None): fmt = '{median:.4f} ({global_avg:.4f})' self.deque = deque(maxlen=wi...
class MetricLogger(object): def __init__(self, delimiter='\t'): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for (k, v) in kwargs.items(): if (v is None): continue if isinstance(v, torch.Tensor...
class NativeScalerWithGradNormCount(): state_dict_key = 'amp_scaler' def __init__(self): self._scaler = torch.cuda.amp.GradScaler() def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): self._scaler.scale(loss).backward(create_graph=c...
def get_grad_norm_(parameters, norm_type: float=2.0) -> torch.Tensor: if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = [p for p in parameters if (p.grad is not None)] norm_type = float(norm_type) if (len(parameters) == 0): return torch.tensor(0.0) dev...
def parse_args(): parser = argparse.ArgumentParser(description='This script extracts backbone weights from a checkpoint') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('output', type=str, help='destination file name') parser.add_argument('--checkpoint_key', default='sta...
def main(): args = parse_args() assert args.output.endswith('.pth') ck = torch.load(args.checkpoint, map_location=torch.device('cpu')) output_dict = dict(state_dict=dict()) has_backbone = False for (key, value) in ck[args.checkpoint_key].items(): if key.startswith('backbone'): ...
def get_num_layer_for_vit(var_name, num_max_layer): if (var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed')): return 0 elif var_name.startswith('backbone.patch_embed'): return 0 elif var_name.startswith('backbone.blocks'): layer_id = int(var_name.split('...
@OPTIMIZER_BUILDERS.register_module() class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor): def add_params(self, params, module, prefix='', is_dcn_module=None): "Add all parameters of module to the params list.\n The parameters of the given module will be added to the list of param\n ...
def save_checkpoint(model, filename, optimizer=None, meta=None): 'Save checkpoint to file.\n The checkpoint will have 4 fields: ``meta``, ``state_dict`` and\n ``optimizer``, ``amp``. By default ``meta`` will contain version\n and time info.\n Args:\n model (Module): Module whose params are to b...
@RUNNERS.register_module() class EpochBasedRunnerAmp(EpochBasedRunner): 'Epoch-based Runner with AMP support.\n This runner train models epoch by epoch.\n ' def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True): 'Save the checkpoin...
def parse_args(): parser = argparse.ArgumentParser(description='MMDet test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--out', help='output result file in pickle format') parser.add_...
def main(): args = parse_args() assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"' if (args.eval and ...
def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the checkpoint file to resume from') ...
def main(): args = parse_args() cfg = Config.fromfile(args.config) if (args.cfg_options is not None): cfg.merge_from_dict(args.cfg_options) if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']...
def parse_args(): parser = argparse.ArgumentParser(description='This script extracts backbone weights from a checkpoint') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('output', type=str, help='destination file name') parser.add_argument('--checkpoint_key', default='sta...
def main(): args = parse_args() assert args.output.endswith('.pth') ck = torch.load(args.checkpoint, map_location=torch.device('cpu')) output_dict = dict(state_dict=dict()) has_backbone = False for (key, value) in ck[args.checkpoint_key].items(): if key.startswith('backbone'): ...
@RUNNERS.register_module() class IterBasedRunnerAmp(IterBasedRunner): 'Iteration-based Runner with AMP support.\n\n This runner train models iteration by iteration.\n ' def save_checkpoint(self, out_dir, filename_tmpl='iter_{}.pth', meta=None, save_optimizer=True, create_symlink=False): "Save c...
def save_checkpoint(model, filename, optimizer=None, meta=None): 'Save checkpoint to file.\n\n The checkpoint will have 4 fields: ``meta``, ``state_dict`` and\n ``optimizer``, ``amp``. By default ``meta`` will contain version\n and time info.\n\n Args:\n model (Module): Module whose params are ...
@HOOKS.register_module() class DistOptimizerHook(OptimizerHook): 'Optimizer hook for distributed training.' def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=(- 1), use_fp16=False): self.grad_clip = grad_clip self.coalesce = coalesce self.bucket_size_...
def get_num_layer_for_vit(var_name, num_max_layer): if (var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed')): return 0 elif var_name.startswith('backbone.patch_embed'): return 0 elif var_name.startswith('backbone.blocks'): layer_id = int(var_name.split('...
@OPTIMIZER_BUILDERS.register_module() class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor): def add_params(self, params, module, prefix='', is_dcn_module=None): "Add all parameters of module to the params list.\n The parameters of the given module will be added to the list of param\n ...
@PIPELINES.register_module() class SETR_Resize(object): 'Resize images & seg.\n\n This transform resizes the input image to some scale. If the input dict\n contains the key "scale", then the scale in the input dict is used,\n otherwise the specified scale in the init method is used.\n\n ``img_scale`` ...
@PIPELINES.register_module() class CenterCrop(PIPELINES.get('RandomCrop')): def __init__(self, **kwargs): super(CenterCrop, self).__init__(**kwargs) def get_crop_bbox(self, img): 'Get a center crop bounding box.' offset_h = ((max((img.shape[0] - self.crop_size[0]), 0) + 1) // 2) ...
@PIPELINES.register_module(force=True) class Collect(PIPELINES.get('Collect')): def __call__(self, results): 'Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the d...
def concat_all_gather(tensor): '\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n ' tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather,...
class Dataset(Dataset): def __init__(self, data): self.data = data def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data)
def set_random_seed(seed, deterministic=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn....
def fast_eval_wrapper(dataloaders, model, cfg, distributed): for (idx, dataloader) in enumerate(dataloaders): all_data = [] for data in tqdm(dataloader): new_data = data.copy() for (i, newd) in enumerate(new_data['img'].data): output = model.module.extract_f...
def train_segmentor(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): 'Launch segmentor training.' logger = get_root_logger(cfg.log_level) dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset]) data_loaders = [build_dataloader(ds, cfg.data.samples_pe...
def parse_args(): parser = argparse.ArgumentParser(description='mmseg test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--aug-test', action='store_true', help='Use Flip and Multi scale au...
def main(): args = parse_args() assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"' if (args.eval and ...
def parse_args(): parser = argparse.ArgumentParser(description='Train a segmentor') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--load-from', help='the checkpoint file to load weights from')...
def main(): args = parse_args() cfg = Config.fromfile(args.config) if (args.options is not None): cfg.merge_from_dict(args.options) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if (args.work_dir is not None): cfg.work_dir = args.work_dir e...
def has_file_allowed_extension(filename: str, extensions: Tuple[(str, ...)]) -> bool: 'Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename en...
def is_image_file(filename: str) -> bool: 'Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n ' return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def find_classes(directory: str, class_num: int) -> Tuple[(List[str], Dict[(str, int)])]: 'Finds the class folders in a dataset.\n\n See :class:`DatasetFolder` for details.\n ' classes = sorted((entry.name for entry in os.scandir(directory) if entry.is_dir())) if (not classes): raise FileNot...