code
stringlengths
17
6.64M
def rlav5_resnet50(rla_channel=32): ' Constructs a RLAv5_ResNet-50 model.\n default: \n num_classes=1000, rla_channel=32, SE=False, ECA=None\n ECA: a list of kernel sizes in ECA\n ' print('Constructing rlav5_resnet50......') model = RLAv5_ResNet(RLAv5_Bottleneck, [3, 4, 6, 3]) return model
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): '3x3 convolution with padding' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1): '1x1 convolution' return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class RLAv6_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16): super(RLAv6_Bottleneck, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d width = (int((planes * (base_width / 64.0))) * groups) self.conv1 = conv1x1((inplanes + rla_channel), width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, (planes * self.expansion)) self.bn3 = norm_layer((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.averagePooling = None if ((downsample is not None) and (stride != 1)): self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2)) self.se = None if SE: self.se = SELayer((planes * self.expansion), reduction) self.eca = None if (ECA_size != None): self.eca = eca_layer((planes * self.expansion), int(ECA_size)) def forward(self, x, h): identity = x x = torch.cat((x, h), dim=1) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.se != None): out = self.se(out) if (self.eca != None): out = self.eca(out) y = out if (self.downsample is not None): identity = self.downsample(identity) if (self.averagePooling is not None): h = self.averagePooling(h) out += identity out = self.relu(out) return (out, y, h, identity)
class RLAv6_ResNet(nn.Module): '\n rla_channel: the number of filters of the shared(recurrent) conv in RLA\n SE: whether use SE or not \n ECA: None: not use ECA, or specify a list of kernel sizes\n ' def __init__(self, block, layers, num_classes=1000, rla_channel=32, SE=False, ECA=None, zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(RLAv6_ResNet, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if (replace_stride_with_dilation is None): replace_stride_with_dilation = [False, False, False] if (len(replace_stride_with_dilation) != 3): raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation)) if (ECA is None): ECA = ([None] * 4) elif (len(ECA) != 4): raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA)) self.rla_channel = rla_channel self.flops = False self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) conv_outs = ([None] * 4) recurrent_convs = ([None] * 4) stages = ([None] * 4) stage_bns = ([None] * 4) (stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0]) (stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0]) (stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1]) (stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2]) self.conv_outs = nn.ModuleList(conv_outs) self.recurrent_convs = nn.ModuleList(recurrent_convs) self.stages = nn.ModuleList(stages) self.stage_bns = nn.ModuleList(stage_bns) self.tanh = nn.Tanh() self.bn2 = norm_layer(rla_channel) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(((512 * block.expansion) + rla_channel), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_last_bn: for m in self.modules(): if isinstance(m, RLAv6_Bottleneck): nn.init.constant_(m.bn3.weight, 0) def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False): conv_out = conv1x1((planes * block.expansion), rla_channel) recurrent_conv = conv3x3(rla_channel, rla_channel) norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer)) self.inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) bns = [norm_layer(rla_channel) for _ in range(blocks)] return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_conv) def _forward_impl(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) (batch, _, height, width) = x.size() if self.flops: h = torch.zeros(batch, self.rla_channel, height, width) else: h = torch.zeros(batch, self.rla_channel, height, width, device='cuda') for (layers, bns, conv_out, recurrent_conv) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs): for (layer, bn) in zip(layers, bns): (x, y, h, identity) = layer(x, h) h = bn(h) h = self.tanh(h) h = recurrent_conv(h) identity_out = conv_out(identity) h = (h + identity_out) h = self.bn2(h) h = self.relu(h) x = torch.cat((x, h), dim=1) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x def forward(self, x): return self._forward_impl(x)
def rlav6_resnet50(rla_channel=32): ' Constructs a RLAv6_ResNet-50 model.\n default: \n num_classes=1000, rla_channel=32, SE=False, ECA=None\n ECA: a list of kernel sizes in ECA\n ' print('Constructing rlav6_resnet50......') model = RLAv6_ResNet(RLAv6_Bottleneck, [3, 4, 6, 3]) return model
class SELayer(nn.Module): def __init__(self, channel, reduction=16): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(nn.Linear(channel, (channel // reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((channel // reduction), channel, bias=False), nn.Sigmoid()) def forward(self, x): (b, c, _, _) = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return (x * y.expand_as(x))
def main(): global args args = parser.parse_args() model = models.__dict__[args.arch]() print(model) input = torch.randn(1, 3, args.input_size, args.input_size) model.train() device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) model = model.to(device) input = input.to(device) (flops, params) = profile(model, inputs=(input,)) print('flops = ', flops) print('params = ', params) (flops, params) = clever_format([flops, params], '%.3f') print('flops = ', flops) print('params = ', params)
def clever_format(nums, format='%.2f'): clever_nums = [] for num in nums: if (num > 1000000000000.0): clever_nums.append(((format % (num / (1024 ** 4))) + 'T')) elif (num > 1000000000.0): clever_nums.append(((format % (num / (1024 ** 3))) + 'G')) elif (num > 1000000.0): clever_nums.append(((format % (num / (1024 ** 2))) + 'M')) elif (num > 1000.0): clever_nums.append(((format % (num / 1024)) + 'K')) else: clever_nums.append(((format % num) + 'B')) clever_nums = (clever_nums[0] if (len(clever_nums) == 1) else (*clever_nums,)) return clever_nums
def main(): global args args = parser.parse_args() if (args.seed is not None): random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.') if (args.gpu is not None): warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.') if ((args.dist_url == 'env://') and (args.world_size == (- 1))): args.world_size = int(os.environ['WORLD_SIZE']) args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed) ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: args.world_size = (ngpus_per_node * args.world_size) mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args): global best_acc1 args.gpu = gpu if (args.gpu is not None): print('Use GPU: {} for training'.format(args.gpu)) if args.distributed: if ((args.dist_url == 'env://') and (args.rank == (- 1))): args.rank = int(os.environ['RANK']) if args.multiprocessing_distributed: args.rank = ((args.rank * ngpus_per_node) + gpu) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) print("=> creating model '{}' for training".format(args.arch)) if (args.rla == None): model = models.__dict__[args.arch]() else: model = models.__dict__[args.arch](rla_channel=args.rla) if args.pretrained_conv1: pretrain_model = None arch = args.arch if (arch.find('resnet50') > (- 1)): pretrain_model = torchmodels.resnet50(pretrained=True) elif (arch.find('resnet101') > (- 1)): pretrain_model = torchmodels.resnet101(pretrained=True) elif (arch.find('resnet152') > (- 1)): pretrain_model = torchmodels.resnet152(pretrained=True) elif (arch.find('resnext50_32x4d') > (- 1)): pretrain_model = torchmodels.resnext50_32x4d(pretrained=True) elif (arch.find('resnext101_32x4d') > (- 1)): pretrain_model = torchmodels.resnext101_32x8d(pretrained=True) print('[!] resnext101_32x4d is not available in torchvision.models, proceed with resnext101_32x8d pretrained conv1') if (pretrain_model is not None): model.conv1.weight = pretrain_model.conv1.weight model.conv1.weight.requires_grad = False print('[!] Using pretrained conv1') if (not torch.cuda.is_available()): print('using CPU, this will be slow') elif args.distributed: if (args.gpu is not None): torch.cuda.set_device(args.gpu) model.cuda(args.gpu) args.batch_size = int((args.batch_size / ngpus_per_node)) args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node)) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) else: model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) elif (args.gpu is not None): torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) else: model = torch.nn.DataParallel(model).cuda() print('Number of models parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()]))) criterion = nn.CrossEntropyLoss().cuda(args.gpu) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if (args.gpu is None): checkpoint = torch.load(args.resume) else: loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] if (args.gpu is not None): best_acc1 = best_acc1.to(args.gpu) model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler) val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) if args.evaluate: m = time.time() (_, _) = validate(val_loader, model, criterion, args) n = time.time() print('evaluate_time (h): ', ((n - m) / 3600)) return if (not os.path.exists(os.path.abspath(args.work_dir))): os.mkdir(os.path.abspath(args.work_dir)) save_path = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action))) if (not os.path.exists(save_path)): os.mkdir(save_path) print('save checkpoint file and log in: ', save_path) loss_plot = {} train_acc1_plot = {} train_acc5_plot = {} val_acc1_plot = {} val_acc5_plot = {} for epoch in range(args.start_epoch, args.epochs): start_time = time.time() if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch, args) (loss_temp, train_acc1_temp, train_acc5_temp) = train(train_loader, model, criterion, optimizer, epoch, args) loss_plot[epoch] = loss_temp train_acc1_plot[epoch] = train_acc1_temp train_acc5_plot[epoch] = train_acc5_temp (acc1, acc5) = validate(val_loader, model, criterion, args) val_acc1_plot[epoch] = acc1 val_acc5_plot[epoch] = acc5 is_best = (acc1 > best_acc1) best_acc1 = max(acc1, best_acc1) if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))): save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best, args) data_save((save_path + 'loss_plot.txt'), loss_plot) data_save((save_path + 'train_acc1.txt'), train_acc1_plot) data_save((save_path + 'train_acc5.txt'), train_acc5_plot) data_save((save_path + 'val_acc1.txt'), val_acc1_plot) data_save((save_path + 'val_acc5.txt'), val_acc5_plot) end_time = time.time() time_value = ((end_time - start_time) / 3600) print(('-' * 80)) print('epoch {} train_time (h): {}'.format(epoch, time_value)) print(('-' * 80))
def train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') losses_batch = {} progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch)) model.train() end = time.time() for (i, (images, target)) in enumerate(train_loader): data_time.update((time.time() - end)) if (args.gpu is not None): images = images.cuda(args.gpu, non_blocking=True) if torch.cuda.is_available(): target = target.cuda(args.gpu, non_blocking=True) output = model(images) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update((time.time() - end)) end = time.time() if ((args.gpu is None) or (args.gpu == 0)): if ((i % args.print_freq) == 0): lr_temp = (args.lr * (0.1 ** (epoch // 30))) print('Epoch: [{0}][{1}/{2}]\tLR {3:.5f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), lr_temp, batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) return (losses.avg, top1.avg, top5.avg)
def validate(val_loader, model, criterion, args): batch_time = AverageMeter('Time', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ') model.eval() with torch.no_grad(): end = time.time() for (i, (images, target)) in enumerate(val_loader): if (args.gpu is not None): images = images.cuda(args.gpu, non_blocking=True) if torch.cuda.is_available(): target = target.cuda(args.gpu, non_blocking=True) output = model(images) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) batch_time.update((time.time() - end)) end = time.time() if ((args.gpu is None) or (args.gpu == 0)): if ((i % args.print_freq) == 0): print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5)) print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5)) return (top1.avg, top5.avg)
def save_checkpoint(state, is_best, args, filename='checkpoint.pth.tar'): save_path = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action))) filepath = os.path.join(save_path, filename) bestpath = os.path.join(save_path, 'model_best.pth.tar') torch.save(state, filepath) if is_best: shutil.copyfile(filepath, bestpath)
class AverageMeter(object): 'Computes and stores the average and current value' def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count) def __str__(self): fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})') return fmtstr.format(**self.__dict__)
class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=''): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [(self.prefix + self.batch_fmtstr.format(batch))] entries += [str(meter) for meter in self.meters] print('\t'.join(entries)) def _get_batch_fmtstr(self, num_batches): num_digits = len(str((num_batches // 1))) fmt = (('{:' + str(num_digits)) + 'd}') return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']')
def adjust_learning_rate(optimizer, epoch, args): 'Sets the learning rate to the initial LR decayed by 10 every 30 epochs' lr = (args.lr * (0.1 ** (epoch // 30))) for param_group in optimizer.param_groups: param_group['lr'] = lr
def accuracy(output, target, topk=(1,)): 'Computes the accuracy over the k top predictions for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
def data_save(root, file): if (not os.path.exists(root)): os.mknod(root) file_temp = open(root, 'r') lines = file_temp.readlines() if (not lines): epoch = (- 1) else: epoch = lines[(- 1)][:lines[(- 1)].index(' ')] epoch = int(epoch) file_temp.close() file_temp = open(root, 'a') for line in file: if (line > epoch): file_temp.write((((str(line) + ' ') + str(file[line])) + '\n')) file_temp.close()
def main(): global args, best_acc1 args = parser.parse_args() if (args.seed is not None): random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.') if (args.gpu is not None): warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.') args.distributed = (args.world_size > 1) if args.distributed: dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size) print("=> creating model '{}' for training".format(args.arch)) if (args.rla == None): model = models.__dict__[args.arch]() else: model = models.__dict__[args.arch](rla_channel=args.rla) if args.pretrained_conv1: pretrain_model = None arch = args.arch if (arch.find('mobilenetv2') > (- 1)): pretrain_model = torchmodels.mobilenet_v2(pretrained=True) if (pretrain_model is not None): model.conv1[0].weight = pretrain_model.features[0][0].weight model.conv1[0].weight.requires_grad = False print('[!] Using pretrained conv1') if (args.gpu is not None): model = model.cuda(args.gpu) elif args.distributed: model.cuda() model = torch.nn.parallel.DistributedDataParallel(model) elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')): model.features = torch.nn.DataParallel(model.features) model.cuda() else: model = torch.nn.DataParallel(model).cuda() print('Number of models parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()]))) criterion = nn.CrossEntropyLoss().cuda(args.gpu) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) del checkpoint else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler) val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) if args.evaluate: m = time.time() (_, _) = validate(val_loader, model, criterion) n = time.time() print('evaluate_time (h): ', ((n - m) / 3600)) return if (not os.path.exists(os.path.abspath(args.work_dir))): os.mkdir(os.path.abspath(args.work_dir)) directory = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action))) if (not os.path.exists(directory)): os.mkdir(directory) print('save checkpoint file and log in: ', directory) loss_plot = {} train_acc1_plot = {} train_acc5_plot = {} val_acc1_plot = {} val_acc5_plot = {} for epoch in range(args.start_epoch, args.epochs): start_time = time.time() if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch) (loss_temp, train_acc1_temp, train_acc5_temp) = train(train_loader, model, criterion, optimizer, epoch) loss_plot[epoch] = loss_temp train_acc1_plot[epoch] = train_acc1_temp train_acc5_plot[epoch] = train_acc5_temp (acc1, acc5) = validate(val_loader, model, criterion) val_acc1_plot[epoch] = acc1 val_acc5_plot[epoch] = acc5 is_best = (acc1 > best_acc1) best_acc1 = max(acc1, best_acc1) save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best) data_save((directory + 'loss_plot.txt'), loss_plot) data_save((directory + 'train_acc1.txt'), train_acc1_plot) data_save((directory + 'train_acc5.txt'), train_acc5_plot) data_save((directory + 'val_acc1.txt'), val_acc1_plot) data_save((directory + 'val_acc5.txt'), val_acc5_plot) end_time = time.time() time_value = ((end_time - start_time) / 3600) print(('-' * 80)) print('epoch {} train_time (h): {}'.format(epoch, time_value)) print(('-' * 80))
def train(train_loader, model, criterion, optimizer, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() losses_batch = {} model.train() directory = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action))) end = time.time() for (i, (input, target)) in enumerate(train_loader): data_time.update((time.time() - end)) if (args.gpu is not None): input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) output = model(input) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) top5.update(acc5[0], input.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update((time.time() - end)) end = time.time() if ((args.gpu is None) or (args.gpu == 0)): if ((i % args.print_freq) == 0): lr_temp = (args.lr * (0.98 ** epoch)) print('Epoch: [{0}][{1}/{2}]\tLR {3:.5f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), lr_temp, batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) return (losses.avg, top1.avg, top5.avg)
def validate(val_loader, model, criterion): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() with torch.no_grad(): end = time.time() for (i, (input, target)) in enumerate(val_loader): if (args.gpu is not None): input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) output = model(input) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) top5.update(acc5[0], input.size(0)) batch_time.update((time.time() - end)) end = time.time() if ((i % args.print_freq) == 0): print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5)) print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5)) return (top1.avg, top5.avg)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): directory = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action))) filename = (directory + filename) torch.save(state, filename) if is_best: shutil.copyfile(filename, (directory + 'model_best.pth.tar'))
class AverageMeter(object): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count)
def adjust_learning_rate(optimizer, epoch): 'Sets the learning rate to the initial LR decayed by 10 every 30 epochs' lr = (args.lr * (0.98 ** epoch)) print('lr = ', lr) for param_group in optimizer.param_groups: param_group['lr'] = lr
def accuracy(output, target, topk=(1,)): 'Computes the accuracy@k for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
def data_save(root, file): if (not os.path.exists(root)): os.mknod(root) file_temp = open(root, 'r') lines = file_temp.readlines() if (not lines): epoch = (- 1) else: epoch = lines[(- 1)][:lines[(- 1)].index(' ')] epoch = int(epoch) file_temp.close() file_temp = open(root, 'a') for line in file: if (line > epoch): file_temp.write((((str(line) + ' ') + str(file[line])) + '\n')) file_temp.close()
def embed(params, data, policy, states, k=100): if (params['embedding'] == 'a_s'): embedding = np.concatenate([policy.forward(x, eval=False) for x in states], axis=0) return embedding
def get_experiment(params): if (params['env_name'] in ['HalfCheetah-v2', 'HalfCheetah-v1']): params['h_dim'] = 32 params['layers'] = 2 params['sensings'] = 100 params['learning_rate'] = 0.05 params['sigma'] = 0.1 params['steps'] = 1000 elif (params['env_name'] in ['Walker2d-v2']): params['h_dim'] = 32 params['layers'] = 2 params['sensings'] = 100 params['learning_rate'] = 0.05 params['sigma'] = 0.1 params['steps'] = 1000 elif (params['env_name'] == 'Swimmer-v2'): params['h_dim'] = 16 params['layers'] = 2 params['sensings'] = 100 params['learning_rate'] = 0.05 params['sigma'] = 0.1 params['steps'] = 1000 elif (params['env_name'] == 'BipedalWalker-v2'): params['h_dim'] = 32 params['layers'] = 2 params['sensings'] = 100 params['learning_rate'] = 0.05 params['sigma'] = 0.1 params['steps'] = 1600 elif (params['env_name'] == 'point-v0'): params['h_dim'] = 16 params['layers'] = 2 params['sensings'] = 100 params['learning_rate'] = 0.05 params['sigma'] = 0.1 params['steps'] = 50 return params
class Learner(object): def __init__(self, params): params['zeros'] = False self.agents = {i: get_policy(params, (params['seed'] + (1000 * i))) for i in range(params['num_agents'])} self.timesteps = 0 self.w_reward = 1 self.w_size = 0 self.dists = 0 self.adam_params = {i: [0, 0] for i in range(params['num_agents'])} self.buffer = [] self.states = [] self.embeddings = {i: [] for i in range(params['num_agents'])} self.best = {i: (- 9999) for i in range(params['num_agents'])} self.reward = {i: [(- 9999)] for i in range(params['num_agents'])} self.min_dist = 0 self.num_workers = params['num_workers'] self.init_workers(params) def init_workers(self, params): deltas_id = create_shared_noise.remote() self.deltas = SharedNoiseTable(ray.get(deltas_id), seed=(params['seed'] + 3)) self.workers = [Worker.remote((params['seed'] + (7 * i)), env_name=params['env_name'], policy=params['policy'], h_dim=params['h_dim'], layers=params['layers'], deltas=deltas_id, rollout_length=params['steps'], delta_std=params['sigma'], num_evals=params['num_evals'], ob_filter=params['ob_filter']) for i in range(params['num_workers'])] def get_agent(self): self.policy = deepcopy(self.agents[self.agent]) self.embedding = self.embeddings[self.agent].copy() self.m = self.adam_params[self.agent][0] self.v = self.adam_params[self.agent][1] def update_agent(self): self.agents[self.agent] = deepcopy(self.policy) self.embeddings[self.agent] = self.embedding.copy() self.adam_params[self.agent] = [self.m, self.v] def update_embeddings(self, params, data=[]): for j in range(params['num_agents']): if (params['embedding'] == 'a_s'): self.embeddings[j] = [embed(params, [], self.agents[j], self.selected)] else: self.embeddings[j] = [embed(params, s, self.agents[j], self.selected) for s in data[j][1]] def calc_pairwise_dists(self, params): dists = np.zeros([params['num_agents'], params['num_agents']]) min_dist = 999 for i in range(params['num_agents']): for j in range(params['num_agents']): dists[i][j] = np.linalg.norm((self.embeddings[i][0] - self.embeddings[j][0])) if ((i != j) & (dists[i][j] < min_dist)): min_dist = dists[i][j] self.dists = np.mean(dists) self.min_dist = min_dist self.dist_vec = np.mean(dists, axis=1) self.dist_vec /= np.sum(self.dist_vec) def select_agent(self): if (min([x[(- 1)] for x in list(self.reward.values())]) > (- 9999)): reward_vec = rankdata([max(x[(- 5):]) for x in list(self.reward.values())]) reward_vec /= np.sum(reward_vec) dist_vec = rankdata(self.dist_vec) dist_vec /= np.sum(dist_vec) vec = ((dist_vec + reward_vec) / 2) self.agent = np.argmax(np.random.multinomial(1, vec)) else: self.agent = np.argmax(np.random.multinomial(1, self.dist_vec))
def get_policy(params, seed=None): if seed: params['seed'] = seed return FullyConnected(params, params['seed'])
class FullyConnected(object): def __init__(self, params, seed=0): np.random.seed(seed) self.layers = params['layers'] self.hidden = {} self.bias = {} self.observation_filter = get_filter(params['ob_filter'], shape=(params['ob_dim'],)) self.update_filter = True self.hidden['h1'] = (np.random.randn(params['h_dim'], params['ob_dim']) / np.sqrt((params['h_dim'] * params['ob_dim']))) self.bias['b1'] = (np.random.randn(params['h_dim']) / np.sqrt(params['h_dim'])) if (params['layers'] > 1): for i in range(2, (params['layers'] + 1)): self.hidden[('h%s' % str(i))] = (np.random.randn(params['h_dim'], params['h_dim']) / np.sqrt((params['h_dim'] * params['h_dim']))) self.bias[('b%s' % str(i))] = (np.random.randn(params['h_dim']) / np.sqrt(params['h_dim'])) self.hidden['h999'] = (np.random.randn(params['ac_dim'], params['h_dim']) / np.sqrt((params['ac_dim'] * params['h_dim']))) self.w_hidden = np.concatenate([self.hidden[x].reshape(self.hidden[x].size) for x in self.hidden.keys()]) self.w_bias = np.concatenate([self.bias[x].reshape(self.bias[x].size) for x in self.bias.keys()]) self.params = np.concatenate((self.w_hidden, self.w_bias)) self.used = 1 self.N = self.params.size def get_observation_filter(self): return self.observation_filter def get_weights_plus_stats(self): (mu, std) = self.observation_filter.get_stats() aux = np.asarray([self.weights, mu, std]) return aux def forward(self, x, eval=True): x = self.observation_filter(x, update=self.update_filter) self.used = 0 a = x.copy() for i in range(1, (self.layers + 1)): a = np.tanh((np.dot(self.hidden[('h%s' % str(i))], a) + self.bias[('b%s' % str(i))])) action = np.tanh(np.dot(self.hidden['h999'], a)) return action def update(self, w): w_hidden = w[:self.w_hidden.size] w = w[self.w_hidden.size:] w_bias = w for i in range(1, len(self.hidden.keys())): update = w_hidden[:self.hidden[('h%s' % i)].size] w_hidden = w_hidden[self.hidden[('h%s' % i)].size:] self.hidden[('h%s' % i)] = update.reshape(self.hidden[('h%s' % i)].shape) self.hidden['h999'] = w_hidden.reshape(self.hidden['h999'].shape) for i in range(1, (len(self.bias.keys()) + 1)): update = w_bias[:self.bias[('b%s' % i)].size] w_bias = w_bias[self.bias[('b%s' % i)].size:] self.bias[('b%s' % i)] = update.reshape(self.bias[('b%s' % i)].shape) self.w_hidden = np.concatenate([self.hidden[x].reshape(self.hidden[x].size) for x in self.hidden.keys()]) self.w_bias = np.concatenate([self.bias[x].reshape(self.bias[x].size) for x in self.bias.keys()]) self.params = np.concatenate((self.w_hidden, self.w_bias)) def rollout(self, env, steps, incl_data=False, seed=0, train=True): if (not hasattr(env, 'tasks')): env.seed(seed) state = env.reset() env._max_episode_steps = steps total_reward = 0 done = False data = [] while (not done): action = self.forward(state) action = np.clip(action, env.action_space.low[0], env.action_space.high[0]) action = action.reshape(len(action)) (state, reward, done, _) = env.step(action) total_reward += reward data.append([state, reward, action]) self.observation_filter.stats_increment() if incl_data: return (total_reward, data) else: return total_reward
class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self): mujoco_env.MujocoEnv.__init__(self, 'point.xml', 2) utils.EzPickle.__init__(self) def step(self, action): action = np.clip(action, (- 1.0), 1.0) self.do_simulation(action, self.frame_skip) next_obs = self._get_obs() qpos = next_obs[:2] goal = [25.0, 0.0] reward = (- np.linalg.norm((goal - qpos))) return (next_obs, reward, False, {}) def _get_obs(self): return np.concatenate([self.sim.data.qpos.flat, self.sim.data.qvel.flat]) def reset_model(self): qpos = (self.init_qpos + self.np_random.uniform(low=(- 0.1), high=0.1, size=self.model.nq)) qvel = (self.init_qvel + (self.np_random.randn(self.model.nv) * 0.1)) self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = (self.model.stat.extent * 0.5)
def select_states(master, params, states): if (int(params['states'].split('-')[1]) < len(states)): selected = sample(states, int(params['states'].split('-')[1])) return selected else: return states
def reset_ray(master, params): ray.disconnect() ray.shutdown() time.sleep(5) del os.environ['RAY_USE_NEW_GCS'] ray.init(plasma_directory='/tmp') os.environ['RAY_USE_NEW_GCS'] = 'True' flush_policy = ray.experimental.SimpleGcsFlushPolicy(flush_period_secs=0.1) ray.experimental.set_flushing_policy(flush_policy)
def train(params): env = gym.make(params['env_name']) params['ob_dim'] = env.observation_space.shape[0] params['ac_dim'] = env.action_space.shape[0] master = Learner(params) n_eps = 0 n_iter = 0 ts_cumulative = 0 (ts, rollouts, rewards, max_rwds, dists, min_dists, agents, lambdas) = ([0], [0], [], [], [], [], [], []) params['num_sensings'] = params['sensings'] master.agent = 0 population = [master.agents[x].rollout(env, params['steps'], incl_data=True) for x in master.agents.keys()] all_states = [s[0] for x in population for s in x[1]] master.selected = select_states(master, params, all_states) master.update_embeddings(params, population) master.calc_pairwise_dists(params) master.select_agent() master.get_agent() reward = master.policy.rollout(env, params['steps'], incl_data=False) rewards.append(reward) agents.append(master.agent) dists.append(master.dists) max_reward = reward max_rwds.append(max_reward) min_dists.append(master.min_dist) if (params['w_nov'] < 0): bb = BayesianBandits() params['w_nov'] = 0 lambdas.append(params['w_nov']) while (n_iter < params['max_iter']): print(('Iter: %s, Eps: %s, Mean: %s, Max: %s, Best: %s, MeanD: %s, MinD: %s, Lam: %s' % (n_iter, n_eps, np.round(reward, 4), np.round(max_reward, 4), master.agent, np.round(master.dists, 4), np.round(master.min_dist, 4), params['w_nov']))) if ((n_iter > 0) & (params['num_agents'] > 1)): master.calc_pairwise_dists(params) master.select_agent() master.get_agent() params['n_iter'] = n_iter if (params['num_agents'] > 1): (gradient, timesteps) = population_update(master, params) n_eps += ((2 * params['num_sensings']) * params['num_agents']) else: (gradient, timesteps) = individual_update(master, params) n_eps += (2 * params['num_sensings']) ts_cumulative += timesteps all_states += master.states if (params['num_sensings'] < len(all_states)): all_states = sample(all_states, params['num_sensings']) gradient /= ((np.linalg.norm(gradient) / master.policy.N) + 1e-08) n_iter += 1 update = Adam(gradient, master, params['learning_rate'], n_iter) (rwds, trajectories) = ([], []) if (params['num_evals'] > 0): seeds = [int((np.random.uniform() * 10000)) for _ in range(params['num_evals'])] for i in range(params['num_agents']): master.agent = i master.get_agent() master.policy.update((master.policy.params + update[(i * master.policy.N):((i + 1) * master.policy.N)])) if (params['num_evals'] > 0): reward = 0 for j in range(params['num_evals']): (r, traj) = master.policy.rollout(env, params['steps'], incl_data=True, seed=seeds[j]) reward += r reward /= params['num_evals'] else: (reward, traj) = master.policy.rollout(env, params['steps'], incl_data=True) rwds.append(reward) trajectories.append(traj) if (reward > master.best[i]): master.best[i] = reward np.save(('data/%s/weights/Seed%s_Agent%s' % (params['dir'], params['seed'], i)), master.policy.params) master.reward[i].append(reward) master.update_agent() reward = np.mean(rwds) max_reward = max(rwds) traj = trajectories[np.argmax(rwds)] master.agent = np.argmax(rwds) master.selected = select_states(master, params, all_states) master.update_embeddings(params) master.embedding = embed(params, traj, master.policy, master.selected) rewards.append(reward) max_rwds.append(max_reward) master.reward[master.agent].append(reward) if (reward > master.best[master.agent]): master.best[master.agent] = reward np.save(('data/%s/weights/Seed%s_Agent%s' % (params['dir'], params['seed'], master.agent)), master.policy.params) try: bb.update_dists(reward) params['w_nov'] = bb.sample() except NameError: pass lambdas.append(params['w_nov']) rollouts.append(n_eps) agents.append(master.agent) dists.append(master.dists) min_dists.append(master.min_dist) ts.append(ts_cumulative) master.update_agent() if ((n_iter % params['flush']) == 0): reset_ray(master, params) master.init_workers(params) out = pd.DataFrame({'Rollouts': rollouts, 'Reward': rewards, 'Max': max_rwds, 'Timesteps': ts, 'Dists': dists, 'Min_Dist': min_dists, 'Agent': agents, 'Lambda': lambdas}) out.to_csv(('data/%s/results/Seed%s.csv' % (params['dir'], params['seed'])), index=False)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--env_name', type=str, default='point-v0') parser.add_argument('--num_agents', '-na', type=int, default=5) parser.add_argument('--seed', '-sd', type=int, default=0) parser.add_argument('--max_iter', '-it', type=int, default=2000) parser.add_argument('--policy', '-po', type=str, default='FC') parser.add_argument('--embedding', '-em', type=str, default='a_s') parser.add_argument('--num_workers', '-nw', type=int, default=4) parser.add_argument('--filename', '-f', type=str, default='') parser.add_argument('--num_evals', '-ne', type=int, default=0) parser.add_argument('--flush', '-fl', type=int, default=1000) parser.add_argument('--ob_filter', '-ob', type=str, default='MeanStdFilter') parser.add_argument('--w_nov', '-wn', type=float, default=(- 1)) parser.add_argument('--dpp_kernel', '-ke', type=str, default='rbf') parser.add_argument('--states', '-ss', type=str, default='random-20') parser.add_argument('--update_states', '-us', type=int, default=20) args = parser.parse_args() params = vars(args) params = get_experiment(params) ray.init() os.environ['RAY_USE_NEW_GCS'] = 'True' state_word = [(str(params['states'].split('-')[0]) if (params['w_nov'] > 0) else '')][0] params['dir'] = ((((((((((((((((params['env_name'] + '_Net') + str(params['layers'])) + 'x') + str(params['h_dim'])) + '_Agents') + str(params['num_agents'])) + '_Novelty') + str(params['w_nov'])) + state_word) + 'kernel_') + params['dpp_kernel']) + '_lr') + str(params['learning_rate'])) + '_') + params['filename']) + params['ob_filter']) if (not os.path.exists(('data/' + params['dir']))): os.makedirs(('data/' + params['dir'])) os.makedirs((('data/' + params['dir']) + '/weights')) os.makedirs((('data/' + params['dir']) + '/results')) train(params)
def batched_weighted_sum(weights, vecs, batch_size): total = 0 num_items_summed = 0 for (batch_weights, batch_vecs) in zip(itergroups(weights, batch_size), itergroups(vecs, batch_size)): assert (len(batch_weights) == len(batch_vecs) <= batch_size) total += np.dot(np.asarray(batch_weights, dtype=np.float64), np.asarray(batch_vecs, dtype=np.float64)) num_items_summed += len(batch_weights) return (total, num_items_summed)
def itergroups(items, group_size): assert (group_size >= 1) group = [] for x in items: group.append(x) if (len(group) == group_size): (yield tuple(group)) del group[:] if group: (yield tuple(group))
def evaluate(env, params, p): return p.rollout(env, params['steps'], incl_data=True)
def Adam(dx, learner, learning_rate, t, eps=1e-08, beta1=0.9, beta2=0.999): learner.m = ((beta1 * learner.m) + ((1 - beta1) * dx)) mt = (learner.m / (1 - (beta1 ** t))) learner.v = ((beta2 * learner.v) + ((1 - beta2) * (dx ** 2))) vt = (learner.v / (1 - (beta2 ** t))) update = ((learning_rate * mt) / (np.sqrt(vt) + eps)) return update
@ray.remote def create_shared_noise(): '\n Create a large array of noise to be shared by all workers. Used \n for avoiding the communication of the random perturbations delta.\n ' seed = 12345 count = 2500000 noise = np.random.RandomState(seed).randn(count).astype(np.float64) return noise
class SharedNoiseTable(object): def __init__(self, noise, seed=11): self.rg = np.random.RandomState(seed) self.noise = noise assert (self.noise.dtype == np.float64) def get(self, i, dim): return self.noise[i:(i + dim)] def sample_index(self, dim): return self.rg.randint(0, ((len(self.noise) - dim) + 1)) def get_delta(self, dim): idx = self.sample_index(dim) return (idx, self.get(idx, dim))
def rbf_kernel(x, y, sigma): return np.exp(((- (np.linalg.norm((x - y)) ** 2)) / (2 * (sigma ** 2))))
def rbf_kernel_grad(x, y, sigma): return (((x - y) / (sigma ** 2)) * rbf_kernel(x, y, sigma))
class Filter(object): 'Processes input, possibly statefully.' def update(self, other, *args, **kwargs): 'Updates self with "new state" from other filter.' raise NotImplementedError def copy(self): 'Creates a new object with same state as self.\n Returns:\n copy (Filter): Copy of self' raise NotImplementedError def sync(self, other): 'Copies all state from other filter to self.' raise NotImplementedError
class NoFilter(Filter): def __init__(self, *args): pass def __call__(self, x, update=True): return np.asarray(x, dtype=np.float64) def update(self, other, *args, **kwargs): pass def copy(self): return self def sync(self, other): pass def stats_increment(self): pass def clear_buffer(self): pass def get_stats(self): return (0, 1) @property def mean(self): return 0 @property def var(self): return 1 @property def std(self): return 1
class RunningStat(object): def __init__(self, shape=None): self._n = 0 self._M = np.zeros(shape, dtype=np.float64) self._S = np.zeros(shape, dtype=np.float64) self._M2 = np.zeros(shape, dtype=np.float64) def copy(self): other = RunningStat() other._n = self._n other._M = np.copy(self._M) other._S = np.copy(self._S) return other def push(self, x): x = np.asarray(x) assert (x.shape == self._M.shape), 'x.shape = {}, self.shape = {}'.format(x.shape, self._M.shape) n1 = self._n self._n += 1 if (self._n == 1): self._M[...] = x else: delta = (x - self._M) deltaM2 = (np.square(x) - self._M2) self._M[...] += (delta / self._n) self._S[...] += (((delta * delta) * n1) / self._n) def update(self, other): n1 = self._n n2 = other._n n = (n1 + n2) delta = (self._M - other._M) delta2 = (delta * delta) M = (((n1 * self._M) + (n2 * other._M)) / n) S = ((self._S + other._S) + (((delta2 * n1) * n2) / n)) self._n = n self._M = M self._S = S def __repr__(self): return '(n={}, mean_mean={}, mean_std={})'.format(self.n, np.mean(self.mean), np.mean(self.std)) @property def n(self): return self._n @property def mean(self): return self._M @property def var(self): return ((self._S / (self._n - 1)) if (self._n > 1) else np.square(self._M)) @property def std(self): return np.sqrt(self.var) @property def shape(self): return self._M.shape
class MeanStdFilter(Filter): 'Keeps track of a running mean for seen states' def __init__(self, shape, demean=True, destd=True): self.shape = shape self.demean = demean self.destd = destd self.rs = RunningStat(shape) self.buffer = RunningStat(shape) self.mean = np.zeros(shape, dtype=np.float64) self.std = np.ones(shape, dtype=np.float64) def clear_buffer(self): self.buffer = RunningStat(self.shape) return def update(self, other, copy_buffer=False): 'Takes another filter and only applies the information from the\n buffer.\n Using notation `F(state, buffer)`\n Given `Filter1(x1, y1)` and `Filter2(x2, yt)`,\n `update` modifies `Filter1` to `Filter1(x1 + yt, y1)`\n If `copy_buffer`, then `Filter1` is modified to\n `Filter1(x1 + yt, yt)`.\n ' self.rs.update(other.buffer) if copy_buffer: self.buffer = other.buffer.copy() return def copy(self): 'Returns a copy of Filter.' other = MeanStdFilter(self.shape) other.demean = self.demean other.destd = self.destd other.rs = self.rs.copy() other.buffer = self.buffer.copy() return other def sync(self, other): 'Syncs all fields together from other filter.\n Using notation `F(state, buffer)`\n Given `Filter1(x1, y1)` and `Filter2(x2, yt)`,\n `sync` modifies `Filter1` to `Filter1(x2, yt)`\n ' assert (other.shape == self.shape), "Shapes don't match!" self.demean = other.demean self.destd = other.destd self.rs = other.rs.copy() self.buffer = other.buffer.copy() return def __call__(self, x, update=True): x = np.asarray(x, dtype=np.float64) if update: if (len(x.shape) == (len(self.rs.shape) + 1)): for i in range(x.shape[0]): self.rs.push(x[i]) self.buffer.push(x[i]) else: self.rs.push(x) self.buffer.push(x) if self.demean: x = (x - self.mean) if self.destd: x = (x / (self.std + 1e-08)) return x def stats_increment(self): self.mean = self.rs.mean self.std = self.rs.std self.std[(self.std < 1e-07)] = float('inf') return def get_stats(self): return (self.rs.mean, (self.rs.std + 1e-08)) def __repr__(self): return 'MeanStdFilter({}, {}, {}, {}, {}, {})'.format(self.shape, self.demean, self.rs, self.buffer)
def get_filter(filter_config, shape=None): if (filter_config == 'MeanStdFilter'): return MeanStdFilter(shape) elif (filter_config == 'NoFilter'): return NoFilter() else: raise Exception(('Unknown observation_filter: ' + str(filter_config)))
@ray.remote class Worker(object): import simpleenvs def __init__(self, env_seed, env_name='', shift=0, policy='FC', h_dim=64, layers=2, deltas=None, rollout_length=1000, delta_std=0.02, num_evals=0, ob_filter='NoFilter'): self.params = {} self.env_name = env_name self.params['env_name'] = env_name self.env = gym.make(env_name) self.params['ob_dim'] = self.env.observation_space.shape[0] self.params['ac_dim'] = self.env.action_space.shape[0] self.env.seed(0) self.params['h_dim'] = h_dim self.steps = rollout_length self.params['zeros'] = True self.params['seed'] = 0 self.params['layers'] = layers self.shift = shift self.sigma = 1 self.num_evals = num_evals self.params['ob_filter'] = ob_filter self.policy = get_policy(self.params) self.deltas = SharedNoiseTable(deltas, (env_seed + 7)) self.delta_std = delta_std def do_rollouts(self, policy, num_rollouts, selected_states, use_states=0, indices=None, seed=0, train=True): (rollout_rewards, deltas_idx, sparsities, data, embeddings) = ([], [], [], [], []) steps = 0 for i in range(num_rollouts): if (indices is None): (idx, delta) = self.deltas.get_delta(policy.size) else: idx = indices[i] delta = self.deltas.get(idx, policy.size) delta = (self.delta_std * delta).reshape(policy.shape) deltas_idx.append(idx) self.policy.update((policy + delta)) (pos_reward, pos_steps, pos_sparse, pos_data) = self.rollouts(seed, train) if use_states: pos_embedding = np.concatenate([self.policy.forward(x, eval=False) for x in selected_states], axis=0) else: pos_embedding = [] self.policy.update((policy - delta)) (neg_reward, neg_steps, neg_sparse, neg_data) = self.rollouts(seed, train) if use_states: neg_embedding = np.concatenate([self.policy.forward(x, eval=False) for x in selected_states], axis=0) else: neg_embedding = [] rollout_rewards.append([pos_reward, neg_reward]) sparsities.append([pos_sparse, neg_sparse]) data.append([pos_data, neg_data]) steps += (pos_steps + neg_steps) embeddings.append([pos_embedding, neg_embedding]) return {'deltas_idx': deltas_idx, 'rollout_rewards': rollout_rewards, 'sparsities': sparsities, 'steps': steps, 'data': data, 'embedding': embeddings} def rollouts(self, seed=0, train=True): self.env._max_episode_steps = self.steps if (self.num_evals > 0): total_reward = 0 timesteps = 0 sparsity = self.policy.used data = [] for _ in range(self.num_evals): self.env.seed(None) state = self.env.reset() (reward, ts, sp, d) = self.rollout(state) sparsity += sp total_reward += reward timesteps += ts data += d else: if (not hasattr(self.env, 'tasks')): self.env.seed(seed) state = self.env.reset() (total_reward, timesteps, sparsity, data) = self.rollout(state) return (total_reward, timesteps, sparsity, data) def rollout(self, state): total_reward = 0 done = False timesteps = 0 sparsity = self.policy.used data = [] while (not done): action = self.policy.forward(state) if hasattr(self.env, 'envtype'): if (self.env.envtype == 'dm'): action = np.clip(action, self.env.env.action_spec().minimum, self.env.env.action_spec().maximum) else: action = np.clip(action, self.env.env.action_space.low[0], self.env.env.action_space.high[0]) action = action.reshape(len(action)) elif (self.env_name.split(':')[0] != 'bsuite'): action = np.clip(action, self.env.action_space.low[0], self.env.action_space.high[0]) action = action.reshape(len(action)) (state, reward, done, _) = self.env.step(action) total_reward += (reward - self.shift) timesteps += 1 data.append([state, reward, np.array(action)]) return (total_reward, timesteps, sparsity, data) def stats_increment(self): self.policy.observation_filter.stats_increment() return def get_filter(self): return self.policy.observation_filter def sync_filter(self, other): self.policy.observation_filter.sync(other) return
def explore(config): if (config['train_batch_size'] < (config['sgd_minibatch_size'] * 2)): config['train_batch_size'] = (config['sgd_minibatch_size'] * 2) if (config['num_sgd_iter'] < 1): config['num_sgd_iter'] = 1 config['target_delay'] = int(config['target_delay']) return config
def explore(config): if (config['train_batch_size'] < (config['sgd_minibatch_size'] * 2)): config['train_batch_size'] = (config['sgd_minibatch_size'] * 2) if (config['lambda'] > 1): config['lambda'] = 1 config['train_batch_size'] = int(config['train_batch_size']) return config
def get_benckmark_arg_parser(): parser = argparse.ArgumentParser('Benchmark inference speed of Deformable DETR.') parser.add_argument('--num_iters', type=int, default=300, help='total iters to benchmark speed') parser.add_argument('--warm_iters', type=int, default=5, help='ignore first several iters that are very slow') parser.add_argument('--batch_size', type=int, default=1, help='batch size in inference') parser.add_argument('--resume', type=str, help='load the pre-trained checkpoint') return parser
@torch.no_grad() def measure_average_inference_time(model, inputs, num_iters=100, warm_iters=5): ts = [] for iter_ in range(num_iters): torch.cuda.synchronize() t_ = time.perf_counter() model(inputs) torch.cuda.synchronize() t = (time.perf_counter() - t_) if (iter_ >= warm_iters): ts.append(t) print(ts) return (sum(ts) / len(ts))
def benchmark(): (args, _) = get_benckmark_arg_parser().parse_known_args() main_args = get_main_args_parser().parse_args(_) assert ((args.warm_iters < args.num_iters) and (args.num_iters > 0) and (args.warm_iters >= 0)) assert (args.batch_size > 0) assert ((args.resume is None) or os.path.exists(args.resume)) dataset = build_dataset('val', main_args) (model, _, _) = build_model(main_args) model.cuda() model.eval() if (args.resume is not None): ckpt = torch.load(args.resume, map_location=(lambda storage, loc: storage)) model.load_state_dict(ckpt['model']) inputs = nested_tensor_from_tensor_list([dataset.__getitem__(0)[0].cuda() for _ in range(args.batch_size)]) t = measure_average_inference_time(model, inputs, args.num_iters, args.warm_iters) return ((1.0 / t) * args.batch_size)
def get_coco_api_from_dataset(dataset): for _ in range(10): if isinstance(dataset, torch.utils.data.Subset): dataset = dataset.dataset if isinstance(dataset, CocoDetection): return dataset.coco
def build_dataset(image_set, args): if (args.dataset_file == 'coco'): return build_coco(image_set, args) if (args.dataset_file == 'coco_panoptic'): from .coco_panoptic import build as build_coco_panoptic return build_coco_panoptic(image_set, args) raise ValueError(f'dataset {args.dataset_file} not supported')
def to_cuda(samples, targets, device): samples = samples.to(device, non_blocking=True) targets = [{k: v.to(device, non_blocking=True) for (k, v) in t.items()} for t in targets] return (samples, targets)
class data_prefetcher(): def __init__(self, loader, device, prefetch=True): self.loader = iter(loader) self.prefetch = prefetch self.device = device if prefetch: self.stream = torch.cuda.Stream() self.preload() def preload(self): try: (self.next_samples, self.next_targets) = next(self.loader) except StopIteration: self.next_samples = None self.next_targets = None return with torch.cuda.stream(self.stream): (self.next_samples, self.next_targets) = to_cuda(self.next_samples, self.next_targets, self.device) def next(self): if self.prefetch: torch.cuda.current_stream().wait_stream(self.stream) samples = self.next_samples targets = self.next_targets if (samples is not None): samples.record_stream(torch.cuda.current_stream()) if (targets is not None): for t in targets: for (k, v) in t.items(): v.record_stream(torch.cuda.current_stream()) self.preload() else: try: (samples, targets) = next(self.loader) (samples, targets) = to_cuda(samples, targets, self.device) except StopIteration: samples = None targets = None return (samples, targets)
class PanopticEvaluator(object): def __init__(self, ann_file, ann_folder, output_dir='panoptic_eval'): self.gt_json = ann_file self.gt_folder = ann_folder if utils.is_main_process(): if (not os.path.exists(output_dir)): os.mkdir(output_dir) self.output_dir = output_dir self.predictions = [] def update(self, predictions): for p in predictions: with open(os.path.join(self.output_dir, p['file_name']), 'wb') as f: f.write(p.pop('png_string')) self.predictions += predictions def synchronize_between_processes(self): all_predictions = utils.all_gather(self.predictions) merged_predictions = [] for p in all_predictions: merged_predictions += p self.predictions = merged_predictions def summarize(self): if utils.is_main_process(): json_data = {'annotations': self.predictions} predictions_json = os.path.join(self.output_dir, 'predictions.json') with open(predictions_json, 'w') as f: f.write(json.dumps(json_data)) return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) return None
class DistributedSampler(Sampler): 'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n ' def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True): if (num_replicas is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if (rank is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas))) self.total_size = (self.num_samples * self.num_replicas) self.shuffle = shuffle def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() indices += indices[:(self.total_size - len(indices))] assert (len(indices) == self.total_size) offset = (self.num_samples * self.rank) indices = indices[offset:(offset + self.num_samples)] assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
class NodeDistributedSampler(Sampler): 'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n ' def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True): if (num_replicas is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if (rank is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() if (local_rank is None): local_rank = int(os.environ.get('LOCAL_RANK', 0)) if (local_size is None): local_size = int(os.environ.get('LOCAL_SIZE', 1)) self.dataset = dataset self.shuffle = shuffle self.num_replicas = num_replicas self.num_parts = local_size self.rank = rank self.local_rank = local_rank self.epoch = 0 self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas))) self.total_size = (self.num_samples * self.num_replicas) self.total_size_parts = ((self.num_samples * self.num_replicas) // self.num_parts) def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() indices = [i for i in indices if ((i % self.num_parts) == self.local_rank)] indices += indices[:(self.total_size_parts - len(indices))] assert (len(indices) == self.total_size_parts) indices = indices[(self.rank // self.num_parts):self.total_size_parts:(self.num_replicas // self.num_parts)] assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
class CocoDetection(VisionDataset): '`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n ' def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None, cache_mode=False, local_rank=0, local_size=1): super(CocoDetection, self).__init__(root, transforms, transform, target_transform) from pycocotools.coco import COCO self.coco = COCO(annFile) self.ids = list(sorted(self.coco.imgs.keys())) self.cache_mode = cache_mode self.local_rank = local_rank self.local_size = local_size if cache_mode: self.cache = {} self.cache_images() def cache_images(self): self.cache = {} for (index, img_id) in zip(tqdm.trange(len(self.ids)), self.ids): if ((index % self.local_size) != self.local_rank): continue path = self.coco.loadImgs(img_id)[0]['file_name'] with open(os.path.join(self.root, path), 'rb') as f: self.cache[path] = f.read() def get_image(self, path): if self.cache_mode: if (path not in self.cache.keys()): with open(os.path.join(self.root, path), 'rb') as f: self.cache[path] = f.read() return Image.open(BytesIO(self.cache[path])).convert('RGB') return Image.open(os.path.join(self.root, path)).convert('RGB') def __getitem__(self, index): '\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.\n ' coco = self.coco img_id = self.ids[index] ann_ids = coco.getAnnIds(imgIds=img_id) target = coco.loadAnns(ann_ids) path = coco.loadImgs(img_id)[0]['file_name'] img = self.get_image(path) if (self.transforms is not None): (img, target) = self.transforms(img, target) return (img, target) def __len__(self): return len(self.ids)
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, max_norm: float=0): model.train() criterion.train() metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) metric_logger.add_meter('grad_norm', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 prefetcher = data_prefetcher(data_loader, device, prefetch=True) (samples, targets) = prefetcher.next() for _ in metric_logger.log_every(range(len(data_loader)), print_freq, header): outputs = model(samples) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum(((loss_dict[k] * weight_dict[k]) for k in loss_dict.keys() if (k in weight_dict))) loss_dict_reduced = utils.reduce_dict(loss_dict) loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()} loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)} losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) loss_value = losses_reduced_scaled.item() if (not math.isfinite(loss_value)): print('Loss is {}, stopping training'.format(loss_value)) print(loss_dict_reduced) sys.exit(1) optimizer.zero_grad() losses.backward() if (max_norm > 0): grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) else: grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm) optimizer.step() metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) metric_logger.update(class_error=loss_dict_reduced['class_error']) metric_logger.update(lr=optimizer.param_groups[0]['lr']) metric_logger.update(grad_norm=grad_total_norm) (samples, targets) = prefetcher.next() metric_logger.synchronize_between_processes() print('Averaged stats:', metric_logger) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
@torch.no_grad() def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): model.eval() criterion.eval() metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) header = 'Test:' iou_types = tuple((k for k in ('segm', 'bbox') if (k in postprocessors.keys()))) coco_evaluator = CocoEvaluator(base_ds, iou_types) panoptic_evaluator = None if ('panoptic' in postprocessors.keys()): panoptic_evaluator = PanopticEvaluator(data_loader.dataset.ann_file, data_loader.dataset.ann_folder, output_dir=os.path.join(output_dir, 'panoptic_eval')) for (samples, targets) in metric_logger.log_every(data_loader, 10, header): samples = samples.to(device) targets = [{k: v.to(device) for (k, v) in t.items()} for t in targets] outputs = model(samples) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict loss_dict_reduced = utils.reduce_dict(loss_dict) loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)} loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()} metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) metric_logger.update(class_error=loss_dict_reduced['class_error']) orig_target_sizes = torch.stack([t['orig_size'] for t in targets], dim=0) results = postprocessors['bbox'](outputs, orig_target_sizes) if ('segm' in postprocessors.keys()): target_sizes = torch.stack([t['size'] for t in targets], dim=0) results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) res = {target['image_id'].item(): output for (target, output) in zip(targets, results)} if (coco_evaluator is not None): coco_evaluator.update(res) if (panoptic_evaluator is not None): res_pano = postprocessors['panoptic'](outputs, target_sizes, orig_target_sizes) for (i, target) in enumerate(targets): image_id = target['image_id'].item() file_name = f'{image_id:012d}.png' res_pano[i]['image_id'] = image_id res_pano[i]['file_name'] = file_name panoptic_evaluator.update(res_pano) metric_logger.synchronize_between_processes() print('Averaged stats:', metric_logger) if (coco_evaluator is not None): coco_evaluator.synchronize_between_processes() if (panoptic_evaluator is not None): panoptic_evaluator.synchronize_between_processes() if (coco_evaluator is not None): coco_evaluator.accumulate() coco_evaluator.summarize() panoptic_res = None if (panoptic_evaluator is not None): panoptic_res = panoptic_evaluator.summarize() stats = {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} if (coco_evaluator is not None): if ('bbox' in postprocessors.keys()): stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() if ('segm' in postprocessors.keys()): stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() if (panoptic_res is not None): stats['PQ_all'] = panoptic_res['All'] stats['PQ_th'] = panoptic_res['Things'] stats['PQ_st'] = panoptic_res['Stuff'] return (stats, coco_evaluator)
def get_args_parser(): parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False) parser.add_argument('--lr', default=0.0002, type=float) parser.add_argument('--lr_backbone_names', default=['backbone.0'], type=str, nargs='+') parser.add_argument('--lr_backbone', default=2e-05, type=float) parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+') parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float) parser.add_argument('--batch_size', default=2, type=int) parser.add_argument('--weight_decay', default=0.0001, type=float) parser.add_argument('--epochs', default=50, type=int) parser.add_argument('--lr_drop', default=40, type=int) parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+') parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm') parser.add_argument('--sgd', action='store_true') parser.add_argument('--with_box_refine', default=False, action='store_true') parser.add_argument('--two_stage', default=False, action='store_true') parser.add_argument('--frozen_weights', type=str, default=None, help='Path to the pretrained model. If set, only the mask head will be trained') parser.add_argument('--backbone', default='resnet50', type=str, help='Name of the convolutional backbone to use') parser.add_argument('--dilation', action='store_true', help='If true, we replace stride with dilation in the last convolutional block (DC5)') parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help='Type of positional embedding to use on top of the image features') parser.add_argument('--position_embedding_scale', default=(2 * np.pi), type=float, help='position / size * scale') parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels') parser.add_argument('--enc_layers', default=6, type=int, help='Number of encoding layers in the transformer') parser.add_argument('--dec_layers', default=6, type=int, help='Number of decoding layers in the transformer') parser.add_argument('--dim_feedforward', default=1024, type=int, help='Intermediate size of the feedforward layers in the transformer blocks') parser.add_argument('--hidden_dim', default=256, type=int, help='Size of the embeddings (dimension of the transformer)') parser.add_argument('--dropout', default=0.1, type=float, help='Dropout applied in the transformer') parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions") parser.add_argument('--num_queries', default=300, type=int, help='Number of query slots') parser.add_argument('--dec_n_points', default=4, type=int) parser.add_argument('--enc_n_points', default=4, type=int) parser.add_argument('--masks', action='store_true', help='Train segmentation head if the flag is provided') parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help='Disables auxiliary decoding losses (loss at each layer)') parser.add_argument('--assign_first_stage', action='store_true') parser.add_argument('--assign_second_stage', action='store_true') parser.add_argument('--set_cost_class', default=2, type=float, help='Class coefficient in the matching cost') parser.add_argument('--set_cost_bbox', default=5, type=float, help='L1 box coefficient in the matching cost') parser.add_argument('--set_cost_giou', default=2, type=float, help='giou box coefficient in the matching cost') parser.add_argument('--mask_loss_coef', default=1, type=float) parser.add_argument('--dice_loss_coef', default=1, type=float) parser.add_argument('--cls_loss_coef', default=2, type=float) parser.add_argument('--bbox_loss_coef', default=5, type=float) parser.add_argument('--giou_loss_coef', default=2, type=float) parser.add_argument('--focal_alpha', default=0.25, type=float) parser.add_argument('--dataset_file', default='coco') parser.add_argument('--coco_path', default='./data/coco', type=str) parser.add_argument('--coco_panoptic_path', type=str) parser.add_argument('--remove_difficult', action='store_true') parser.add_argument('--bigger', action='store_true') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true') parser.add_argument('--num_workers', default=2, type=int) parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory') return parser
def main(args): utils.init_distributed_mode(args) print('git:\n {}\n'.format(utils.get_sha())) if (args.frozen_weights is not None): assert args.masks, 'Frozen training is meant for segmentation only' print(args) device = torch.device(args.device) seed = (args.seed + utils.get_rank()) torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) (model, criterion, postprocessors) = build_model(args) model.to(device) model_without_ddp = model n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad)) print('number of params:', n_parameters) dataset_train = build_dataset(image_set='train', args=args) dataset_val = build_dataset(image_set='val', args=args) if args.distributed: if args.cache_mode: sampler_train = samplers.NodeDistributedSampler(dataset_train) sampler_val = samplers.NodeDistributedSampler(dataset_val, shuffle=False) else: sampler_train = samplers.DistributedSampler(dataset_train) sampler_val = samplers.DistributedSampler(dataset_val, shuffle=False) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) sampler_val = torch.utils.data.SequentialSampler(dataset_val) batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True) data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, collate_fn=utils.collate_fn, num_workers=args.num_workers, pin_memory=True) data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers, pin_memory=True) def match_name_keywords(n, name_keywords): out = False for b in name_keywords: if (b in n): out = True break return out for (n, p) in model_without_ddp.named_parameters(): print(n) param_dicts = [{'params': [p for (n, p) in model_without_ddp.named_parameters() if ((not match_name_keywords(n, args.lr_backbone_names)) and (not match_name_keywords(n, args.lr_linear_proj_names)) and p.requires_grad)], 'lr': args.lr}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, args.lr_backbone_names) and p.requires_grad)], 'lr': args.lr_backbone}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad)], 'lr': (args.lr * args.lr_linear_proj_mult)}] if args.sgd: optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) else: optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module if (args.dataset_file == 'coco_panoptic'): coco_val = datasets.coco.build('val', args) base_ds = get_coco_api_from_dataset(coco_val) else: base_ds = get_coco_api_from_dataset(dataset_val) if (args.frozen_weights is not None): checkpoint = torch.load(args.frozen_weights, map_location='cpu') model_without_ddp.detr.load_state_dict(checkpoint['model']) output_dir = Path(args.output_dir) if args.finetune: checkpoint = torch.load(args.finetune, map_location='cpu') state_dict = checkpoint['model'] for k in list(state_dict.keys()): if ('class_embed' in k): print('removing', k) del state_dict[k] (missing_keys, unexpected_keys) = model_without_ddp.load_state_dict(state_dict, strict=False) unexpected_keys = [k for k in unexpected_keys if (not (k.endswith('total_params') or k.endswith('total_ops')))] if (len(missing_keys) > 0): print('Missing Keys: {}'.format(missing_keys)) if (len(unexpected_keys) > 0): print('Unexpected Keys: {}'.format(unexpected_keys)) print('finetuning from epoch', checkpoint['epoch']) if args.resume: if args.resume.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.resume, map_location='cpu') (missing_keys, unexpected_keys) = model_without_ddp.load_state_dict(checkpoint['model'], strict=False) unexpected_keys = [k for k in unexpected_keys if (not (k.endswith('total_params') or k.endswith('total_ops')))] if (len(missing_keys) > 0): print('Missing Keys: {}'.format(missing_keys)) if (len(unexpected_keys) > 0): print('Unexpected Keys: {}'.format(unexpected_keys)) if ((not args.eval) and ('optimizer' in checkpoint) and ('lr_scheduler' in checkpoint) and ('epoch' in checkpoint)): import copy p_groups = copy.deepcopy(optimizer.param_groups) optimizer.load_state_dict(checkpoint['optimizer']) for (pg, pg_old) in zip(optimizer.param_groups, p_groups): pg['lr'] = pg_old['lr'] pg['initial_lr'] = pg_old['initial_lr'] print(optimizer.param_groups) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) args.override_resumed_lr_drop = True if args.override_resumed_lr_drop: print('Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.') lr_scheduler.step_size = args.lr_drop lr_scheduler.base_lrs = list(map((lambda group: group['initial_lr']), optimizer.param_groups)) lr_scheduler.step(lr_scheduler.last_epoch) args.start_epoch = (checkpoint['epoch'] + 1) if (not args.eval): (test_stats, coco_evaluator) = evaluate(model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir) if args.eval: (test_stats, coco_evaluator) = evaluate(model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir) if args.output_dir: utils.save_on_master(coco_evaluator.coco_eval['bbox'].eval, (output_dir / 'eval.pth')) return print('Start training') start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: sampler_train.set_epoch(epoch) train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm) lr_scheduler.step() if args.output_dir: checkpoint_paths = [(output_dir / 'checkpoint.pth')] if ((((epoch + 1) % args.lr_drop) == 0) or (((epoch + 1) % 5) == 0)): checkpoint_paths.append((output_dir / f'checkpoint{epoch:04}.pth')) for checkpoint_path in checkpoint_paths: utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args}, checkpoint_path) (test_stats, coco_evaluator) = evaluate(model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir) log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} if (args.output_dir and utils.is_main_process()): with (output_dir / 'log.txt').open('a') as f: f.write((json.dumps(log_stats) + '\n')) if (coco_evaluator is not None): (output_dir / 'eval').mkdir(exist_ok=True) if ('bbox' in coco_evaluator.coco_eval): filenames = ['latest.pth'] if ((epoch % 50) == 0): filenames.append(f'{epoch:03}.pth') for name in filenames: torch.save(coco_evaluator.coco_eval['bbox'].eval, ((output_dir / 'eval') / name)) total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def build_model(args): return build(args)
class FrozenBatchNorm2d(torch.nn.Module): '\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n ' def __init__(self, n, eps=1e-05): super(FrozenBatchNorm2d, self).__init__() self.register_buffer('weight', torch.ones(n)) self.register_buffer('bias', torch.zeros(n)) self.register_buffer('running_mean', torch.zeros(n)) self.register_buffer('running_var', torch.ones(n)) self.eps = eps def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = (prefix + 'num_batches_tracked') if (num_batches_tracked_key in state_dict): del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): w = self.weight.reshape(1, (- 1), 1, 1) b = self.bias.reshape(1, (- 1), 1, 1) rv = self.running_var.reshape(1, (- 1), 1, 1) rm = self.running_mean.reshape(1, (- 1), 1, 1) eps = self.eps scale = (w * (rv + eps).rsqrt()) bias = (b - (rm * scale)) return ((x * scale) + bias)
class BackboneBase(nn.Module): def __init__(self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool): super().__init__() for (name, parameter) in backbone.named_parameters(): if ((not train_backbone) or (('layer2' not in name) and ('layer3' not in name) and ('layer4' not in name))): parameter.requires_grad_(False) if return_interm_layers: return_layers = {'layer2': '0', 'layer3': '1', 'layer4': '2'} self.strides = [8, 16, 32] self.num_channels = [512, 1024, 2048] else: return_layers = {'layer4': '0'} self.strides = [32] self.num_channels = [2048] self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) def forward(self, tensor_list: NestedTensor): xs = self.body(tensor_list.tensors) out: Dict[(str, NestedTensor)] = {} for (name, x) in xs.items(): m = tensor_list.mask assert (m is not None) mask = F.interpolate(m[None].float(), size=x.shape[(- 2):]).to(torch.bool)[0] out[name] = NestedTensor(x, mask) return out
class Backbone(BackboneBase): 'ResNet backbone with frozen BatchNorm.' def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool): norm_layer = FrozenBatchNorm2d backbone = getattr(torchvision.models, name)(replace_stride_with_dilation=[False, False, dilation], pretrained=is_main_process(), norm_layer=norm_layer) assert (name not in ('resnet18', 'resnet34')), 'number of channels are hard coded' super().__init__(backbone, train_backbone, return_interm_layers) if dilation: self.strides[(- 1)] = (self.strides[(- 1)] // 2)
class SwinBackbone(nn.Module): def __init__(self): super().__init__() self.body = get_swinl() self.features = ['res3', 'res4', 'res5'] self.strides = [8, 16, 32] self.num_channels = [384, 768, 1536] def forward(self, tensor_list: NestedTensor): xs = self.body(tensor_list.tensors) m = tensor_list.mask[None] assert (m is not None) out: Dict[(str, NestedTensor)] = {} for name in self.features: mask = F.interpolate(m.float(), size=xs[name].shape[(- 2):]).to(torch.bool)[0] out[name] = NestedTensor(xs[name], mask) return out
class Joiner(nn.Sequential): def __init__(self, backbone, position_embedding): super().__init__(backbone, position_embedding) self.strides = backbone.strides self.num_channels = backbone.num_channels def forward(self, tensor_list: NestedTensor): xs = self[0](tensor_list) out: List[NestedTensor] = [] pos = [] for (name, x) in sorted(xs.items()): out.append(x) for x in out: pos.append(self[1](x).to(x.tensors.dtype)) return (out, pos)
def build_backbone(args): position_embedding = build_position_encoding(args) train_backbone = (args.lr_backbone > 0) return_interm_layers = (args.masks or (args.num_feature_levels > 1)) if ('swin' in args.backbone): backbone = SwinBackbone() else: backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) model = Joiner(backbone, position_embedding) return model
def get_extensions(): this_dir = os.path.dirname(os.path.abspath(__file__)) extensions_dir = os.path.join(this_dir, 'src') main_file = glob.glob(os.path.join(extensions_dir, '*.cpp')) source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp')) source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu')) sources = (main_file + source_cpu) extension = CppExtension extra_compile_args = {'cxx': []} define_macros = [] if (torch.cuda.is_available() and (CUDA_HOME is not None)): extension = CUDAExtension sources += source_cuda define_macros += [('WITH_CUDA', None)] extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'] else: raise NotImplementedError('Cuda is not availabel') sources = [os.path.join(extensions_dir, s) for s in sources] include_dirs = [extensions_dir] ext_modules = [extension('MultiScaleDeformableAttention', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)] return ext_modules
@torch.no_grad() def check_forward_equal_with_pytorch_double(): value = (torch.rand(N, S, M, D).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu() output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu() fwdok = torch.allclose(output_cuda, output_pytorch) max_abs_err = (output_cuda - output_pytorch).abs().max() max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
@torch.no_grad() def check_forward_equal_with_pytorch_float(): value = (torch.rand(N, S, M, D).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu() output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu() fwdok = torch.allclose(output_cuda, output_pytorch, rtol=0.01, atol=0.001) max_abs_err = (output_cuda - output_pytorch).abs().max() max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True): value = (torch.rand(N, S, M, channels).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 func = MSDeformAttnFunction.apply value.requires_grad = grad_value sampling_locations.requires_grad = grad_sampling_loc attention_weights.requires_grad = grad_attn_weight gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step)) print(f'* {gradok} check_gradient_numerical(D={channels})')
def parse_args(): '\n Helper function parsing the command line options\n @retval ArgumentParser\n ' parser = ArgumentParser(description='PyTorch distributed training launch helper utilty that will spawn up multiple distributed processes') parser.add_argument('--nnodes', type=int, default=1, help='The number of nodes to use for distributed training') parser.add_argument('--node_rank', type=int, default=0, help='The rank of the node for multi-node distributed training') parser.add_argument('--nproc_per_node', type=int, default=1, help='The number of processes to launch on each node, for GPU training, this is recommended to be set to the number of GPUs in your system so that each process can be bound to a single GPU.') parser.add_argument('--master_addr', default='127.0.0.1', type=str, help="Master node (rank 0)'s address, should be either the IP address or the hostname of node 0, for single node multi-proc training, the --master_addr can simply be 127.0.0.1") parser.add_argument('--master_port', default=29500, type=int, help="Master node (rank 0)'s free port that needs to be used for communciation during distributed training") parser.add_argument('training_script', type=str, help='The full path to the single GPU training program/script to be launched in parallel, followed by all the arguments for the training script') parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args()
def main(): args = parse_args() dist_world_size = (args.nproc_per_node * args.nnodes) current_env = os.environ.copy() current_env['MASTER_ADDR'] = args.master_addr current_env['MASTER_PORT'] = str(args.master_port) current_env['WORLD_SIZE'] = str(dist_world_size) processes = [] for local_rank in range(0, args.nproc_per_node): dist_rank = ((args.nproc_per_node * args.node_rank) + local_rank) current_env['RANK'] = str(dist_rank) current_env['LOCAL_RANK'] = str(local_rank) cmd = ([args.training_script] + args.training_script_args) process = subprocess.Popen(cmd, env=current_env) processes.append(process) for process in processes: process.wait() if (process.returncode != 0): raise subprocess.CalledProcessError(returncode=process.returncode, cmd=process.args)
class ToTensor(object): 'Convert ndarrays in sample to Tensors.' def __call__(self, sample): (image, text, label) = (sample['image'], sample['text'], sample['label']) return {'image': torch.from_numpy(image.astype(np.float32)), 'text': text, 'label': torch.from_numpy(label.astype(np.float32)), 'textlen': sample['textlen']}
class Normalize(object): 'Input image cleaning.' def __init__(self, mean_vector, std_devs): (self.mean_vector, self.std_devs) = (mean_vector, std_devs) def __call__(self, sample): image = sample['image'] return {'image': self._normalize(image, self.mean_vector, self.std_devs), 'text': sample['text'], 'label': sample['label'], 'textlen': sample['textlen']} def _normalize(self, tensor, mean, std): 'Normalize a tensor image with mean and standard deviation.\n See ``Normalize`` for more details.\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channely.\n Returns:\n Tensor: Normalized Tensor image.\n ' if (not self._is_tensor_image(tensor)): print(tensor.size()) raise TypeError('tensor is not a torch image. Its size is {}.'.format(tensor.size())) for (t, m, s) in zip(tensor, mean, std): t.sub_(m).div_(s) return tensor def _is_tensor_image(self, img): return (torch.is_tensor(img) and (img.ndimension() == 3))
class RandomModalityMuting(object): 'Randomly turn a mode off.' def __init__(self, p_muting=0.1): self.p_muting = p_muting def __call_(self, sample): rval = random.random() im = sample['image'] au = sample['text'] if (rval <= self.p_muting): vval = random.random() if (vval <= 0.5): im = (sample['image'] * 0) else: au = (sample['text'] * 0) return {'image': im, 'text': au, 'label': sample['label'], 'textlen': sample['textlen']}
class MM_IMDB(Dataset): def __init__(self, root_dir='', transform=None, stage='train', feat_dim=100, average_text=False): '\n Args:\n root_dir (string): Directory where data is.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n ' if (stage == 'train'): self.len_data = 15552 elif (stage == 'test'): self.len_data = 7799 elif (stage == 'dev'): self.len_data = 2608 self.transform = transform self.root_dir = root_dir self.stage = stage self.average_text = average_text global fdim fdim = feat_dim def __len__(self): return self.len_data def __getitem__(self, idx): imagepath = os.path.join(self.root_dir, self.stage, 'image_{:06}.npy'.format(idx)) labelpath = os.path.join(self.root_dir, self.stage, 'label_{:06}.npy'.format(idx)) textpath = os.path.join(self.root_dir, self.stage, 'text_{:06}.npy'.format(idx)) image = np.load(imagepath) label = np.load(labelpath) text = np.load(textpath) if self.average_text: text = text.mean(0) textlen = text.shape[0] sample = {'image': image, 'text': text, 'label': label, 'textlen': textlen} if self.transform: sample = self.transform(sample) return sample
def collate_imdb(list_samples): global fdim max_text_len = 0 for sample in list_samples: L = len(sample['text']) if (max_text_len < L): max_text_len = L list_images = (len(list_samples) * [None]) list_text = (len(list_samples) * [None]) list_labels = (len(list_samples) * [None]) list_textlen = (len(list_samples) * [None]) for (i, sample) in enumerate(list_samples): text_sample_len = len(sample['text']) text_i = sample['text'].astype(np.float32) padding = np.asarray(([(fdim * [(- 10.0)])] * (max_text_len - text_sample_len)), np.float32) list_images[i] = sample['image'] list_labels[i] = sample['label'] if (padding.shape[0] > 0): list_text[i] = torch.from_numpy(np.concatenate((text_i, padding), 0)) else: list_text[i] = torch.from_numpy(text_i) list_textlen[i] = sample['textlen'] images = torch.transpose(torch.stack(list_images), 1, 3) text = torch.stack(list_text) labels = torch.stack(list_labels) return {'image': images, 'text': text, 'label': labels, 'textlen': list_textlen}
def parse_args(): parser = argparse.ArgumentParser(description='Modality optimization.') parser.add_argument('--checkpointdir', type=str, help='output base dir', default='/home/juanma/Documents/Checkpoints/NTU/') parser.add_argument('--datadir', type=str, help='data directory', default='/home/juanma/Documents/Data/ROSE_Action/') parser.add_argument('--ske_cp', type=str, help='Skeleton net checkpoint (assuming is contained in checkpointdir)', default='skeleton_32frames_85.24.checkpoint') parser.add_argument('--rgb_cp', type=str, help='RGB net checkpoint (assuming is contained in checkpointdir)', default='rgb_8frames_83.91.checkpoint') parser.add_argument('--test_cp', type=str, help='Full net checkpoint (assuming is contained in checkpointdir)', default='') parser.add_argument('--num_outputs', type=int, help='output dimension', default=60) parser.add_argument('--batchsize', type=int, help='batch size', default=20) parser.add_argument('--inner_representation_size', type=int, help='output size of mixing linear layers', default=256) parser.add_argument('--epochs', type=int, help='training epochs', default=70) parser.add_argument('--eta_max', type=float, help='eta max', default=0.001) parser.add_argument('--eta_min', type=float, help='eta min', default=1e-06) parser.add_argument('--Ti', type=int, help='epochs Ti', default=5) parser.add_argument('--Tm', type=int, help='epochs multiplier Tm', default=2) parser.add_argument('--use_dataparallel', help='Use several GPUs', action='store_true', dest='use_dataparallel', default=False) parser.add_argument('--j', dest='num_workers', type=int, help='Dataloader CPUS', default=16) parser.add_argument('--modality', type=str, help='', default='both') parser.add_argument('--no-verbose', help='verbose', action='store_false', dest='verbose', default=True) parser.add_argument('--weightsharing', help='Weight sharing', action='store_true', default=False) parser.add_argument('--no-multitask', dest='multitask', help='Multitask loss', action='store_false', default=True) parser.add_argument('--alphas', help='Use alphas', action='store_true', default=False) parser.add_argument('--batchnorm', help='Use batch norm', action='store_true', dest='batchnorm', default=False) parser.add_argument('--vid_dim', action='store', default=256, dest='vid_dim', help='frame side dimension (square image assumed) ') parser.add_argument('--vid_fr', action='store', default=30, dest='vi_fr', help='video frame rate') parser.add_argument('--vid_len', action='store', default=(8, 32), dest='vid_len', type=int, nargs='+', help='length of video, as a tuple of two lengths, (rgb len, skel len)') parser.add_argument('--drpt', action='store', default=0.4, dest='drpt', type=float, help='dropout') parser.add_argument('--no_bad_skel', action='store_true', help='Remove the 300 bad samples, espec. useful to evaluate', default=False) parser.add_argument('--no_norm', action='store_true', default=False, dest='no_norm', help='Not normalizing the skeleton') parser.add_argument('--conf', type=int, help='conf to train', default=1) return parser.parse_args()
def get_dataloaders(args): import torchvision.transforms as transforms from datasets import ntu as d from torch.utils.data import DataLoader transformer_val = transforms.Compose([d.NormalizeLen(args.vid_len), d.ToTensor()]) transformer_tra = transforms.Compose([d.AugCrop(), d.NormalizeLen(args.vid_len), d.ToTensor()]) dataset_training = d.NTU(args.datadir, transform=transformer_tra, stage='train', args=args) dataset_testing = d.NTU(args.datadir, transform=transformer_val, stage='test', args=args) dataset_validation = d.NTU(args.datadir, transform=transformer_val, stage='dev', args=args) datasets = {'train': dataset_training, 'dev': dataset_validation, 'test': dataset_testing} dataloaders = {x: DataLoader(datasets[x], batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, drop_last=False, pin_memory=True) for x in ['train', 'dev', 'test']} return dataloaders
def train_model(rmode, configuration, dataloaders, args, device): dataset_sizes = {x: len(dataloaders[x].dataset) for x in ['train', 'test', 'dev']} if (args.test_cp == ''): num_batches_per_epoch = (dataset_sizes['train'] / args.batchsize) criteria = [torch.nn.CrossEntropyLoss(), torch.nn.CrossEntropyLoss(), torch.nn.CrossEntropyLoss()] skemodel_filename = os.path.join(args.checkpointdir, args.ske_cp) rgbmodel_filename = os.path.join(args.checkpointdir, args.rgb_cp) rmode.skenet.load_state_dict(torch.load(skemodel_filename)) rmode.rgbnet.load_state_dict(torch.load(rgbmodel_filename)) params = rmode.central_params() optimizer = op.Adam(params, lr=(args.eta_max / 10), weight_decay=0.0001) scheduler = sc.LRCosineAnnealingScheduler(args.eta_max, args.eta_min, args.Ti, args.Tm, num_batches_per_epoch) if ((torch.cuda.device_count() > 1) and args.use_dataparallel): rmode = torch.nn.DataParallel(rmode) rmode.to(device) if args.verbose: print('Pretraining central weights: ') print(configuration) interm_model_acc = tr.train_ntu_track_acc(rmode, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=device, num_epochs=1, verbose=args.verbose, multitask=args.multitask) if args.verbose: print(('Intermediate val accuracy: ' + str(interm_model_acc))) if ((torch.cuda.device_count() > 1) and args.use_dataparallel): params = rmode.module.parameters() else: params = rmode.parameters() optimizer = op.Adam(params, lr=args.eta_max, weight_decay=0.0001) scheduler = sc.LRCosineAnnealingScheduler(args.eta_max, args.eta_min, args.Ti, args.Tm, num_batches_per_epoch) best_model_acc = tr.train_ntu_track_acc(rmode, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=device, num_epochs=args.epochs, verbose=args.verbose, multitask=args.multitask) if args.verbose: print(('Final val accuracy: ' + str(best_model_acc))) else: fullmodel_filename = os.path.join(args.checkpointdir, args.test_cp) rmode.load_state_dict(torch.load(fullmodel_filename)) if ((torch.cuda.device_count() > 1) and args.use_dataparallel): rmode = torch.nn.DataParallel(rmode) rmode.to(device) test_model_acc = tr.test_ntu_track_acc(rmode, dataloaders, dataset_sizes, device=device, multitask=args.multitask) if args.verbose: print(('Final test accuracy: ' + str(test_model_acc))) return test_model_acc
def parse_args(): parser = argparse.ArgumentParser(description='Modality optimization.') parser.add_argument('--checkpointdir', type=str, help='output base dir', default='/home/juanma/Documents/Checkpoints/NTU/') parser.add_argument('--datadir', type=str, help='data directory', default='/home/juanma/Documents/Data/ROSE_Action/') parser.add_argument('--ske_cp', type=str, help='Skeleton net checkpoint (assuming is contained in checkpointdir)', default='skeleton_32frames_83.42') parser.add_argument('--rgb_cp', type=str, help='RGB net checkpoint (assuming is contained in checkpointdir)', default='rgb_8frames_82.14') parser.add_argument('--num_outputs', type=int, help='output dimension', default=60) parser.add_argument('--batchsize', type=int, help='batch size', default=20) parser.add_argument('--inner_representation_size', type=int, help='output size of mixing linear layers', default=16) parser.add_argument('--epochs', type=int, help='training epochs', default=3) parser.add_argument('--lr_surrogate', type=float, help='learning rate surrogate', default=0.001) parser.add_argument('--epochs_surrogate', type=int, help='num of epochs for surrogate', default=50) parser.add_argument('--eta_max', type=float, help='eta max', default=0.001) parser.add_argument('--eta_min', type=float, help='eta min', default=1e-06) parser.add_argument('--Ti', type=int, help='epochs Ti', default=1) parser.add_argument('--Tm', type=int, help='epochs multiplier Tm', default=2) parser.add_argument('--use_dataparallel', help='Use several GPUs', action='store_true', default=False) parser.add_argument('--num_workers', type=int, help='Dataloader CPUS', default=16) parser.add_argument('--modality', type=str, help='', default='both') parser.add_argument('--max_fusions', type=int, dest='max_progression_levels', help='max fusions', default=4) parser.add_argument('--search_iterations', type=int, help='epnas iterations', default=3) parser.add_argument('--num_samples', type=int, help='number of samples to train at each explo step (K)', default=15) parser.add_argument('--initial_temperature', type=float, help='initial sampling temperature', default=10.0) parser.add_argument('--final_temperature', type=float, help='final sampling temperature', default=0.2) parser.add_argument('--temperature_decay', type=float, help='temperature decay (sigma)', default=4.0) parser.add_argument('--no-verbose', help='verbose', dest='verbose', action='store_false', default=True) parser.add_argument('--weightsharing', help='Weight sharing', action='store_true', default=False) parser.add_argument('--alphas', help='Use alphas', action='store_true', default=False) parser.add_argument('--batchnorm', help='Use batch norm', action='store_true', default=False) parser.add_argument('--multitask', help='Multitask loss', action='store_true', default=False) parser.add_argument('--vid_dim', action='store', default=256, dest='vid_dim', help='frame side dimension (square image assumed) ') parser.add_argument('--vid_fr', action='store', default=30, dest='vi_fr', help='video frame rate') parser.add_argument('--vid_len', action='store', default=(8, 32), dest='vid_len', type=int, nargs='+', help='length of video, as a tuple of two lengths, (rgb len, skel len)') parser.add_argument('--drpt', action='store', default=0.5, dest='drpt', type=float, help='dropout') parser.add_argument('--no_bad_skel', action='store_true', help='Remove the 300 bad samples, espec. useful to evaluate', default=False) parser.add_argument('--no_norm', action='store_true', default=False, dest='no_norm', help='Not normalizing the skeleton') return parser.parse_args()
def inflated_resnet(**kwargs): list_block = [Bottleneck3D, Bottleneck3D, Bottleneck3D, Bottleneck3D] list_layers = [3, 4, 6, 3] model = ResNet(list_block, list_layers, **kwargs) load_pretrained_2D_weights('resnet50', model, inflation='center') return model
class Bottleneck3D(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck3D, self).__init__() self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm3d(planes) self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, dilation=(1, dilation, dilation)) self.bn2 = nn.BatchNorm3d(planes) self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm3d((planes * 4)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.input_dim = 5 self.dilation = dilation def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
class ResNet(nn.Module): def __init__(self, list_block, layers, **kwargs): self.inplanes = 64 self.input_dim = 4 super(ResNet, self).__init__() self._first_conv() self.relu = nn.ReLU(inplace=True) self.list_channels = [64, 128, 256, 512] self.layer1 = self._make_layer(list_block[0], self.list_channels[0], layers[0]) self.layer2 = self._make_layer(list_block[1], self.list_channels[1], layers[1], stride=2) self.layer3 = self._make_layer(list_block[2], self.list_channels[2], layers[2], stride=2) self.layer4 = self._make_layer(list_block[3], self.list_channels[3], layers[3], stride=2) self.out_dim = 5 for m in self.modules(): if (isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d)): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif (isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d)): m.weight.data.fill_(1) m.bias.data.zero_() def _first_conv(self): self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) self.maxpool = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.bn1 = nn.BatchNorm2d(64) self.input_dim = 4 def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None stride = (1, stride, stride) if ((stride != 1) or (self.inplanes != (planes * block.expansion))): (conv, batchnorm) = (nn.Conv3d, nn.BatchNorm3d) downsample = nn.Sequential(conv(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False, dilation=dilation), batchnorm((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dilation)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def get_feature_maps(self, x): (B, C, T, W, H) = x.size() x = transform_input(x, self.input_dim, T=T) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = transform_input(x, self.layer1[0].input_dim, T=T) x = self.layer1(x) fm1 = x x = transform_input(x, self.layer2[0].input_dim, T=T) x = self.layer2(x) fm2 = x x = transform_input(x, self.layer3[0].input_dim, T=T) x = self.layer3(x) fm3 = x x = transform_input(x, self.layer4[0].input_dim, T=T) x = self.layer4(x) final_fm = transform_input(x, self.out_dim, T=T) return (fm1, fm2, fm3, final_fm)
def transform_input(x, dim, T=12): diff = (len(x.size()) - dim) if (diff > 0): (B, C, T, W, H) = x.size() x = x.transpose(1, 2) x = x.contiguous() x = x.view((- 1), C, W, H) elif (diff < 0): (_, C, W, H) = x.size() x = x.view((- 1), T, C, W, H) x = x.transpose(1, 2) return x
class LRCosineAnnealingScheduler(): def __init__(self, eta_max, eta_min, Ti, Tmultiplier, num_batches_per_epoch): self.eta_min = eta_min self.eta_max = eta_max self.Ti = Ti self.Tcur = 0.0 self.nbpe = num_batches_per_epoch self.iteration_counter = 0.0 self.eta = eta_max self.Tm = Tmultiplier def _compute_rule(self): self.eta = (self.eta_min + ((0.5 * (self.eta_max - self.eta_min)) * (1 + np.cos(((np.pi * self.Tcur) / self.Ti))))) return self.eta def step(self): self.Tcur = (self.iteration_counter / self.nbpe) self.iteration_counter = (self.iteration_counter + 1.0) eta = self._compute_rule() if (eta <= (self.eta_min + 1e-10)): self.Tcur = 0 self.Ti = (self.Ti * self.Tm) self.iteration_counter = 0 return eta def update_optimizer(self, optimizer): state_dict = optimizer.state_dict() for param_group in state_dict['param_groups']: param_group['lr'] = self.eta optimizer.load_state_dict(state_dict)
class FixedScheduler(): def __init__(self, lr): self.lr = lr def step(self): return self.lr def update_optimizer(self, optimizer): state_dict = optimizer.state_dict() for param_group in state_dict['param_groups']: param_group['lr'] = self.lr optimizer.load_state_dict(state_dict)
class activ(nn.Module): def __init__(self, args): super(activ, self).__init__() self.activation = args.activation if (args.activation == 'LeakyReLU'): self.act = torch.nn.LeakyReLU() elif (args.activation == 'ELU'): self.act = torch.nn.ELU() elif (args.activation == 'ReLU'): self.act = torch.nn.ReLU() elif (args.activation == 'Tanh'): self.act = torch.nn.Tanh() elif (args.activation == 'Sigmoid'): self.act = torch.nn.Sigmoid() elif (args.activation == 'Swish'): self.beta = nn.Parameter(torch.tensor(0.5)) self.act = torch.nn.Sigmoid() else: print('WARNING: REQUIRED ACTIVATION IS NOT DEFINED') def forward(self, x): if (self.activation == 'Swish'): return (self.act((self.beta * x)) * x) else: return self.act(x)
class SimpleRecurrentSurrogate(nn.Module): def __init__(self, num_hidden=100, number_input_feats=3, size_ebedding=100): super(SimpleRecurrentSurrogate, self).__init__() self.num_hidden = num_hidden self.embedding = nn.Sequential(nn.Linear(number_input_feats, size_ebedding), nn.Sigmoid()) self.lstm = nn.LSTM(size_ebedding, num_hidden) self.hid2val = nn.Linear(num_hidden, 1) self.nonlinearity = nn.Sigmoid() for m in self.modules(): if isinstance(m, nn.Linear): m.weight.data.uniform_((- 0.1), 0.1) m.bias.data.fill_(1.8) def forward(self, sequence_of_operations): embeds = [] for s in sequence_of_operations: embeds.append(self.embedding(s)) embeds = torch.stack(embeds, dim=0) (lstm_out, hidden) = self.lstm(embeds) val_space = self.hid2val(lstm_out[(- 1)]) val_space = self.nonlinearity(val_space) return val_space def eval_model(self, sequence_of_operations_np, device): npseq = np.expand_dims(sequence_of_operations_np, 1) sequence_of_operations = torch.from_numpy(npseq).float().to(device) res = self.forward(sequence_of_operations) res = res.cpu().data.numpy() return res[(0, 0)]
class SurrogateDataloader(): def __init__(self): self._dict_data = {} def add_datum(self, datum_conf, datum_acc): seq_len = len(datum_conf) datum_hash = datum_conf.data.tobytes() if (seq_len in self._dict_data): if (datum_hash in self._dict_data[seq_len]): self._dict_data[seq_len][datum_hash] = (datum_conf, max(datum_acc, self._dict_data[seq_len][datum_hash][1])) else: self._dict_data[seq_len][datum_hash] = (datum_conf, datum_acc) else: self._dict_data[seq_len] = {datum_hash: (datum_conf, datum_acc)} def get_data(self, to_torch=False): dataset_conf = list() dataset_acc = list() for (len_key, data_dict) in self._dict_data.items(): conf_list = list() acc_list = list() for (datum_hash, datum) in data_dict.items(): conf_list.append(datum[0]) acc_list.append(datum[1]) conf_list = np.transpose(np.asarray(conf_list, np.float32), (1, 0, 2)) dataset_conf.append(np.array(conf_list, np.float32)) dataset_acc.append(np.expand_dims(np.array(acc_list, np.float32), 1)) if to_torch: for index in range(len(dataset_conf)): dataset_conf[index] = torch.from_numpy(dataset_conf[index]) dataset_acc[index] = torch.from_numpy(dataset_acc[index]) return (dataset_conf, dataset_acc) def get_k_best(self, k): dataset_conf = list() dataset_acc = list() for (len_key, data_dict) in self._dict_data.items(): for (datum_hash, datum) in data_dict.items(): dataset_conf.append(datum[0]) dataset_acc.append(datum[1]) dataset_acc = np.array(dataset_acc) top_k_idx = np.argpartition(dataset_acc, (- k))[(- k):] confs = [dataset_conf[i] for i in top_k_idx] accs = [dataset_acc[i] for i in top_k_idx] return (confs, accs, top_k_idx)
def train_simple_surrogate(model, criterion, optimizer, data_tensors, num_epochs, device): for epoch in range(num_epochs): model.train(True) for batch in range(len(data_tensors[0])): (inputs, outputs) = (data_tensors[0][batch], data_tensors[1][batch]) inputs = inputs.to(device) outputs = outputs.to(device) optimizer.zero_grad() with torch.set_grad_enabled(True): f_outputs = model(inputs) loss = criterion(f_outputs, outputs) loss.backward() optimizer.step() model.train(False) return loss.item()