code
stringlengths
17
6.64M
class DatasetFolder(VisionDataset): 'A generic data loader.\n\n This default directory structure can be customized by overriding the\n :meth:`find_classes` method.\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n exten...
def pil_loader(path: str) -> Image.Image: with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB')
def accimage_loader(path: str) -> Any: import accimage try: return accimage.Image(path) except IOError: return pil_loader(path)
def default_loader(path: str) -> Any: from torchvision import get_image_backend if (get_image_backend() == 'accimage'): return accimage_loader(path) else: return pil_loader(path)
class ImageFolder(DatasetFolder): 'A generic data loader where the images are arranged in this way by default: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/[...]/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/[...]/asd932_.png\n\n This class inhe...
class GaussianBlur(object): '\n Apply Gaussian Blur to the PIL image.\n ' def __init__(self, p=0.5, radius_min=0.1, radius_max=2.0): self.prob = p self.radius_min = radius_min self.radius_max = radius_max def __call__(self, img): do_it = (random.random() <= self.pro...
class Solarization(object): '\n Apply Solarization to the PIL image.\n ' def __init__(self, p): self.p = p def __call__(self, img): if (random.random() < self.p): return ImageOps.solarize(img) else: return img
def strong_transforms(img_size=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment='rand-m9-mstd0.5-inc1', interpolation='random', use_prefetcher=True, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, re_prob=0.25, re_mode='pixel', re_count=1, re_num_spl...
class DataAugmentation(object): '\n implement multi-crop data augmentation.\n --global_crops_scale: scale range of the 224-sized cropped image before resizing\n --local_crops_scale: scale range of the 96-sized cropped image before resizing\n --local_crops_number: Number of small local views to generat...
def get_dataset(args): '\n build a multi-crop data augmentation and a dataset/dataloader\n ' transform = DataAugmentation(global_crops_scale=args.global_crops_scale, local_crops_scale=args.local_crops_scale, local_crops_number=args.local_crops_number, vanilla_weak_augmentation=args.vanilla_weak_augmenta...
class data_prefetcher(): '\n implement data prefetcher. we perform some augmentation on GPUs intead of CPUs\n --loader: a data loader\n --fp16: whether we use fp16, if yes, we need to tranform the data to be fp16\n ' def __init__(self, loader, fp16=True): self.loader = iter(loader) ...
def clip_gradients(model, clip): '\n clip gradient if gradient norm > clip\n ' norms = [] for (name, p) in model.named_parameters(): if (p.grad is not None): param_norm = p.grad.data.norm(2) norms.append(param_norm.item()) clip_coef = (clip / (param_norm +...
def cancel_gradients_last_layer(epoch, model, freeze_last_layer): '\n cancle gradient if epoch > freeze_last_layer\n ' if (epoch >= freeze_last_layer): return for (n, p) in model.named_parameters(): if ('last_layer' in n): p.grad = None
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0): '\n start_warmup_value to base_value in the first warmup_epochs epochs;\n then cosine scheduling base_value to final_value in the remaining epochs-warmup_epochs\n ' warmup_schedule = np.array([...
def get_params_groups(model): '\n divide the parameters into several groups, see below\n ' regularized = [] not_regularized = [] patch_embed = [] patch_embed_not_regularized = [] for (name, param) in model.named_parameters(): if (not param.requires_grad): continue ...
class LARS(torch.optim.Optimizer): '\n Almost copy-paste from https://github.com/facebookresearch/barlowtwins/blob/main/main.py\n ' def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, eta=0.001, weight_decay_filter=None, lars_adaptation_filter=None): defaults = dict(lr=lr, weight_dec...
def get_optimizer(student, len_dataloader, args): '\n build an optimizer for training\n ' params_groups = get_params_groups(student) if (args.optimizer == 'adamw'): optimizer = torch.optim.AdamW(params_groups) elif (args.optimizer == 'sgd'): optimizer = torch.optim.SGD(params_gro...
def get_logger(file_path_name): '\n build a logger which both write on the desk and also on the terminal\n ' logger = logging.getLogger() logger.setLevel('INFO') BASIC_FORMAT = '%(levelname)s:%(message)s' DATE_FORMAT = '' formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT) chlr ...
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs): '\n Re-start from checkpoint\n ' if (not os.path.isfile(ckp_path)): return print('Found checkpoint at {}'.format(ckp_path)) checkpoint = torch.load(ckp_path, map_location='cpu') for (key, value) in kwargs.items(): ...
def bool_flag(s): '\n Parse boolean arguments from the command line.\n ' FALSY_STRINGS = {'off', 'false', '0'} TRUTHY_STRINGS = {'on', 'true', '1'} if (s.lower() in FALSY_STRINGS): return False elif (s.lower() in TRUTHY_STRINGS): return True else: raise argparse.A...
def fix_random_seeds(seed=31): '\n Fix random seeds.\n ' torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed)
def has_batchnorms(model): '\n judge whether a model has batch normalization\n ' bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm) for (name, module) in model.named_modules(): if isinstance(module, bn_types): return True return False
class SmoothedValue(object): 'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n ' def __init__(self, window_size=20, fmt=None): if (fmt is None): fmt = '{median:.6f} ({global_avg:.6f})' self.deque = deque(maxlen=wi...
class MetricLogger(object): '\n build a Metric Logger\n ' def __init__(self, delimiter='\t'): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for (k, v) in kwargs.items(): if isinstance(v, torch.Tensor): ...
def get_sha(): cwd = os.path.dirname(os.path.abspath(__file__)) def _run(command): return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() sha = 'N/A' diff = 'clean' branch = 'N/A' try: sha = _run(['git', 'rev-parse', 'HEAD']) subprocess.check_output([...
def is_dist_avail_and_initialized(): '\n judge whether distributed training is available and well-initialized\n ' if (not dist.is_available()): return False if (not dist.is_initialized()): return False return True
def get_world_size(): '\n get the world size\n ' if (not is_dist_avail_and_initialized()): return 1 return dist.get_world_size()
def get_rank(): '\n get the rank\n ' if (not is_dist_avail_and_initialized()): return 0 return dist.get_rank()
def is_main_process(): '\n judge whether the current node is the master node\n ' return (get_rank() == 0)
def save_on_master(*args, **kwargs): '\n save checkpoint on the master node\n ' if is_main_process(): torch.save(*args, **kwargs)
def setup_for_distributed(is_master): '\n This function disables printing when not in master process\n ' import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if (is_master or force): builtin_p...
def init_distributed_ddpjob(args=None): '\n initialize the ddp job\n ' if (dist.is_available() and dist.is_initialized()): return (dist.get_world_size(), dist.get_rank()) try: os.environ['MASTER_PORT'] = '40101' torch.distributed.init_process_group(backend='nccl') except ...
def init_distributed_mode(args): '\n initialize the normal job\n ' if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)): args.rank = int(os.environ['RANK']) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ.get('LOCAL_RANK', 0)) print('args...
def accuracy(output, target, topk=(1,)): '\n Computes the accuracy over the k top predictions for the specified values of k\n ' maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.reshape(1, (- 1)).expand_as(p...
def multi_scale(samples, model): '\n build a multi-scale features\n ' v = None for s in [1, (1 / (2 ** (1 / 2))), (1 / 2)]: if (s == 1): inp = samples.clone() else: inp = nn.functional.interpolate(samples, scale_factor=s, mode='bilinear', align_corners=False) ...
class AllGather(torch.autograd.Function): '\n gather the variable on different nodes toghther\n ' @staticmethod def forward(ctx, x): if (dist.is_available() and dist.is_initialized() and (dist.get_world_size() > 1)): outputs = [torch.zeros_like(x) for _ in range(dist.get_world_s...
class AllReduce(torch.autograd.Function): '\n reduce the variable on different nodes toghther\n ' @staticmethod def forward(ctx, x): if (dist.is_available() and dist.is_initialized() and (dist.get_world_size() > 1)): x = (x.contiguous() / dist.get_world_size()) dist....
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size): if os.path.isfile(pretrained_weights): state_dict = torch.load(pretrained_weights, map_location='cpu') if ((checkpoint_key is not None) and (checkpoint_key in state_dict)): print(f'Take key ...
@torch.no_grad() def concat_all_gather(tensor): '\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n ' tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gathe...
def getAllAttacks(): '\n Reads the input file to obtain attacks for all attackers\n And then make a list of unique attacks\n ' f = open(sys.argv[1], 'r') f_lines = f.read().split('\n') attacks = list() for line in f_lines: if ('|' in line): a = line.split('|') ...
def solveBSG(invalidAttacks): m = Model('MIQP') m.setParam('OutputFlag', False) f = open(sys.argv[1], 'r') '\n ------ Input file ------\n No. of defender strategies (X)\n No. of attackers (L)\n | Probability for an attacker (p_l)\n | No. of attack strategies for an attacker (Q_l)\n |...
def read_data(filename): "\n ------ Input file ------\n | No. of targets (n)\n | Defender's resources (rd)\n | R(c)_1 R(u)_1\n | ...\n | R(c)_n R(u)_n\n | C(c)_1 C(u)_1\n | ...\n | C(c)_n C(u)_n\n ------ Example (see BSSG_input.txt)------\n | 4\n | 2\n | 0 -15\n | 0 -10\n...
def attack_target(t_attacked): print(NUM_TARGETS, NUM_RESOURCES, C, R) m = Model('MILP') p = [] for i in range(NUM_TARGETS): name = ('p-' + str(i)) p.append(m.addVar(lb=0, ub=1, vtype=GRB.CONTINUOUS, name=name)) m.update() p_rt = [] for r in range(NUM_RESOURCES): p_...
def main(filename): read_data(filename) obj_vals = [] mp = [] for t in range(NUM_TARGETS): (val, marg_prob) = attack_target(t) obj_vals.append(val) mp.append(marg_prob) best_def_reward = max(obj_vals) best_mp = mp[obj_vals.index(best_def_reward)] f = open('best_marg...
def feasibility_test(X, constraint_structure): S = {index for (index, x) in np.ndenumerate(X)} if any((((X[i] < 0) or (X[i] > 1)) for i in S)): print('matrix entries must be between zero and one') for (key, value) in constraint_structure.items(): if ((sum([X[i] for i in key]) < value[0]) o...
def bihierarchy_test(constraint_structure): constraint_sets = [] for (key, value) in constraint_structure.items(): constraint_sets.append(set(key)) permutations = itertools.permutations(constraint_sets) for constraint_set_ordering in permutations: (listofA, listofB) = ([], []) ...
def graph_constructor(X, bihierarchy, constraint_structure): S = {index for (index, x) in np.ndenumerate(X)} (A, B) = bihierarchy (A.append(S), B.append(S)) for x in S: (A.append({x}), B.append({x})) for x in S: constraint_structure.update({frozenset({x}): (0, 1)}) R1 = nx.DiGr...
def constrained_birkhoff_von_neumann_iterator(H, X): (G, p) = H.pop(0) eligible_edges = [(from_node, to_node, edge_attributes) for (from_node, to_node, edge_attributes) in G.edges(data=True) if all((((i < edge_attributes['weight']) or (edge_attributes['weight'] < i)) for i in range(0, int((math.floor(sum(sum(...
def iterate_constrained_birkhoff_von_neumann_iterator(X, G): S = {index for (index, x) in np.ndenumerate(X)} H = [(G, 1)] solution = [] while (len(H) > 0): if any(((tolerance < x < (1 - tolerance)) for x in [d['weight'] for (u, v, d) in H[0][0].edges(data=True) if (u in [frozenset({x}) for x i...
def solution_cleaner(X, solution): S = {index for (index, x) in np.ndenumerate(X)} solution_columns_and_probs = [] for y in solution: solution_columns_and_probs.append([[(u, d['weight']) for (u, v, d) in y[0].edges(data=True) if (u in [frozenset({x}) for x in S])], y[1]]) solution_zeroed = [] ...
def constrained_birkhoff_von_neumann_decomposition(X, constraint_structure): S = {index for (index, x) in np.ndenumerate(X)} feasibility_test(X, constraint_structure) return solution_cleaner(X, iterate_constrained_birkhoff_von_neumann_iterator(X, graph_constructor(X, bihierarchy_test(constraint_structure)...
def get_marg_probs(filename='BSSG_input.txt'): subprocess.call(['/opt/gurobi701/linux64/bin/gurobi.sh', 'BSG_multi_milp.py', filename])
def obtain_mixed_strategy(): f = open('best_marg_prob.pkl', 'rb') best_mp = pickle.load(f) NUM_RESOURCES = len(best_mp) NUM_TARGETS = len(best_mp[0]) ' add constrains for the decomposition ' constraints = {} for r in range(NUM_RESOURCES): row = [] for t in range(NUM_TARGETS...
def homog_probs(result): probs = result[0] strategies = result[1] print(strategies) homog_strategies = [] for s in strategies: homog_strategies.append(np.sum(s, axis=0)) print(len(probs)) print(homog_strategies) print('{} = {}'.format(probs, sum(probs)))
def main(filename='BSSG_input.txt'): get_marg_probs(filename) homog_probs(obtain_mixed_strategy())
def printSeperator(): print('---------------')
def run_ensemble(eval_list, score_list): os.makedirs('VA-Track', exist_ok=True) video_names = open(eval_list.name, 'r').read().splitlines() video_scores = {k: {'valence': None, 'arousal': None} for k in video_names} score_names = open(score_list.name, 'r').read().splitlines() nb_scores = len(score...
def parse_arguments(): 'Parses command-line flags.\n ' parser = argparse.ArgumentParser() parser.add_argument('-l', '--eval_list', help='Text file containing names of videos to be evaluated on.', type=argparse.FileType('r'), required=True) parser.add_argument('-s', '--score_list', help='Text file c...
def main(): start = time.time() args = parse_arguments() if args.verbose: logging.basicConfig(level=logging.DEBUG) del args.verbose run_ensemble(**vars(args)) logging.info('Computed in %s seconds', (time.time() - start))
def main(hparams): model = AffWild2VA(hparams) checkpoint = torch.load(hparams.checkpoint, map_location=(lambda storage, loc: storage)) model.load_state_dict(checkpoint['state_dict']) print('Loaded pretrained weights') trainer = Trainer(gpus=hparams.gpus, nb_gpu_nodes=hparams.nodes, distributed_ba...
class AttFusion(nn.Module): def __init__(self, input_dim=[512, 512], hidden_dim=128): super(AttFusion, self).__init__() self.use_proj = (input_dim[1] != input_dim[0]) if self.use_proj: self.proj_v = nn.Linear(input_dim[1], input_dim[0]) self.scorer_a = GRU(input_dim[0]...
class AudioSet(pl.LightningModule): def __init__(self, hparams): super(AudioSet, self).__init__() self.hparams = hparams self.audio = GRU(200, self.hparams.num_hidden, 2, 527, self.hparams.num_fc_layers, dropout=True) self.history = {'lr': [], 'loss': []} def forward(self, x)...
class VA_VGGFace(nn.Module): def __init__(self, inputDim=4096, hiddenDim=512, nLayers=2, nClasses=2, frameLen=16, backend='gru', nFCs=1): super(VA_VGGFace, self).__init__() self.inputDim = inputDim self.hiddenDim = hiddenDim self.nClasses = nClasses self.frameLen = frameLe...
class VA_3DVGGM(nn.Module): def __init__(self, inputDim=512, hiddenDim=512, nLayers=2, nClasses=2, frameLen=16, backend='gru', norm_layer='bn', nFCs=1): super(VA_3DVGGM, self).__init__() self.inputDim = inputDim self.hiddenDim = hiddenDim self.nClasses = nClasses self.fram...
class VA_3DVGGM_Split(nn.Module): def __init__(self, inputDim=512, hiddenDim=512, nLayers=2, frameLen=16, nClasses=2, backend='gru', norm_layer='bn', split_layer=5, nFCs=1, use_mtl=False): super(VA_3DVGGM_Split, self).__init__() self.inputDim = inputDim self.hiddenDim = hiddenDim ...
class VA_3DResNet(nn.Module): def __init__(self, inputDim=512, hiddenDim=512, nLayers=2, nClasses=2, frameLen=16, backend='gru', use_cbam=False, resnet_ver='v2', resnet_depth=18, frontend_agg_mode='ap', nFCs=1): super(VA_3DResNet, self).__init__() self.inputDim = inputDim self.hiddenDim =...
class VA_3DDenseNet(nn.Module): def __init__(self, inputDim=392, hiddenDim=512, nLayers=2, nClasses=2, frameLen=16, backend='gru', frontend_agg_mode='ap', nFCs=1): super(VA_3DDenseNet, self).__init__() self.inputDim = inputDim self.hiddenDim = hiddenDim self.nClasses = nClasses ...
class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), (- 1))
class BasicConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): super(BasicConv, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_s...
class ChannelGate(nn.Module): '\n Given a feature map of shape (B, C, H, W):\n - Compute MaxPool and AvgPool of shape (B, C, 1)\n - MLP (B, C/ratio, 1) - (B, C, 1)\n - Sum activations (B, C, 1)\n - Broadcast to original size\n ' def __init__(self, gate_channels, reduction_ratio=16): ...
class ChannelPool(nn.Module): '\n Given a channel refined feature:\n - Compute MaxPool and AvgPool feature maps across channels\n - Stack into a 2-channel feature map\n ' def forward(self, x): channel_max = torch.max(x, 1)[0].unsqueeze(1) channel_mean = torch.mean(x, 1).unsqueeze(...
class SpatialGate(nn.Module): '\n Given a channel refined feature:\n - Compute channel-pooled feature map\n - Apply (in=2, out=1, kernel=5) convolution\n - Apply sigmoid to obtain spatial attention\n ' def __init__(self): super(SpatialGate, self).__init__() kernel_size = 5 ...
class CBAM(nn.Module): '\n Given an input feature map of shape (B, C, H, W):\n - Apply channel attention \n - Apply spatial attention \n ' def __init__(self, gate_channels, reduction_ratio=16): super(CBAM, self).__init__() self.ChannelGate = ChannelGate(gate_channels, reduction_ra...
class _DenseLayer_3D(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): super(_DenseLayer_3D, self).__init__() (self.add_module('norm1', nn.BatchNorm3d(num_input_features)),) (self.add_module('relu1', nn.ReLU(inplace=True)),) (self.add_module(...
class _DenseBlock_3D(nn.Sequential): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): super(_DenseBlock_3D, self).__init__() for i in range(num_layers): layer = _DenseLayer_3D((num_input_features + (i * growth_rate)), growth_rate, bn_size, drop_rate...
class _Transition_3D(nn.Sequential): def __init__(self, num_input_features, num_output_features, pooling=True): super(_Transition_3D, self).__init__() self.add_module('norm', nn.BatchNorm3d(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv',...
class DenseNet52_3D(nn.Module): def __init__(self, num_classes=256, num_init_features=64, bn_size=4, block_config=(4, 6, 8, 6), growth_rate=32, dp=0.0, agg_mode='ap', fmap_out_size=3): super(DenseNet52_3D, self).__init__() num_features = num_init_features self.agg_mode = agg_mode ...
class BatchExponentialLR(_LRScheduler): 'Exponentially increases the learning rate between two boundaries over a number of\n iterations.\n Arguments:\n optimizer (torch.optim.Optimizer): wrapped optimizer.\n end_lr (float): the final learning rate.\n num_iter (int): the number of iterat...
def plot_lr(history, skip_start=10, skip_end=5, log_lr=True, show_lr=None): 'Plots the learning rate range test.\n Arguments:\n skip_start (int, optional): number of batches to trim from the start.\n Default: 10.\n skip_end (int, optional): number of batches to trim from the start.\n ...
def conv3x3(in_planes, out_planes, stride=1): '3x3 convolution with padding' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1): '1x1 convolution' return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_cbam=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) ...
class ResNet(nn.Module): def __init__(self, block, layers, num_classes=256, zero_init_residual=True, agg_mode='ap', fmap_out_size=3, use_cbam=False): super(ResNet, self).__init__() self.inplanes = 64 self.agg_mode = agg_mode self.layer1 = self._make_layer(block, 64, layers[0], use...
class BasicBlockV2(nn.Module): 'BasicBlock V2 from\n `"Identity Mappings in Deep Residual Networks"<https://arxiv.org/abs/1603.05027>`_ paper.\n This is used for ResNet V2 for 18, 34 layers.\n Args:\n inplanes (int): number of input channels.\n planes (int): number of output channels.\n ...
class ResNetV2(nn.Module): 'ResNet V2 model from\n `"Identity Mappings in Deep Residual Networks"<https://arxiv.org/abs/1603.05027>`_ paper.\n Args:\n block (Module) : class for the residual block. Options are BasicBlockV1, BottleneckV1.\n layers (list of int) : numbers of layers in each block...
def concordance_cc2(r1, r2, reduction='mean'): '\n Computes batch sequence-wise CCC.\n ' r1_mean = r1.mean(dim=(- 1), keepdim=True) r2_mean = r2.mean(dim=(- 1), keepdim=True) mean_cent_prod = ((r1 - r1_mean) * (r2 - r2_mean)).mean(dim=(- 1), keepdim=True) ccc = ((2 * mean_cent_prod) / ((r1.v...
def concordance_cc2_np(r1, r2): mean_cent_prod = ((r1 - r1.mean()) * (r2 - r2.mean())).mean() return ((2 * mean_cent_prod) / ((r1.var() + r2.var()) + ((r1.mean() - r2.mean()) ** 2)))
def mse(preds, labels): return (sum(((preds - labels) ** 2)) / len(labels))
def smooth_predictions(preds, window=13, mode='wiener'): if (mode == 'median'): return np.apply_along_axis((lambda x: medfilt(x, window)), 0, preds) elif (mode == 'wiener'): return np.apply_along_axis((lambda x: wiener(x, window)), 0, preds)
def plot_results(base_path, y1, y2, index): X = np.arange(len(y1)) plt.plot(X, y1, label=('Actual ' + index)) plt.plot(X, y2, label=('Predicted ' + index)) plt.xlabel('Frames') plt.ylabel(index) plt.title('Aff-Wild2 predictions') plt.legend() plt.show()
class VGGFace(nn.Module): def __init__(self): 'VGGFace model, assuming 112x112 input.\n ' super().__init__() self.conv1 = _ConvBlock(3, 64, 64) self.conv2 = _ConvBlock(64, 128, 128) self.conv3 = _ConvBlock(128, 256, 256, 256) self.conv4 = _ConvBlock(256, 512...
class _ConvBlock(nn.Module): 'A Convolutional block.' def __init__(self, *units): 'Create a block with len(units) - 1 convolutions.\n convolution number i transforms the number of channels from \n units[i - 1] to units[i] channels.\n ' super().__init__() self.conv...
class VoxCeleb2_1k(pl.LightningModule): def __init__(self, hparams): super(VoxCeleb2_1k, self).__init__() self.hparams = hparams if (self.hparams.backbone == 'resnet'): self.visual = VA_3DResNet(frameLen=self.hparams.window, backend=self.hparams.backend, resnet_ver='v1', nClas...
def main(hparams): torch.backends.cudnn.deterministic = True random.seed(hparams.seed) torch.manual_seed(hparams.seed) torch.cuda.manual_seed(hparams.seed) np.random.seed(hparams.seed) model = AudioSet(hparams) if hparams.checkpoint: model = model.load_from_checkpoint(hparams.check...
def main(hparams): torch.backends.cudnn.deterministic = True random.seed(hparams.seed) torch.manual_seed(hparams.seed) torch.cuda.manual_seed(hparams.seed) np.random.seed(hparams.seed) model = VoxCeleb2_1k(hparams) if hparams.checkpoint: model = model.load_from_checkpoint(hparams.c...
def extract_melspec(task): (fps, src_wav, dst_npy) = task src_wav = src_wav.replace('_left', '').replace('_right', '') if os.path.exists(dst_npy): return 1 try: (y, sr) = librosa.load(src_wav, sr=16000) hop_length = int(((((1 / 3) * 1) / fps) * 16000)) power = librosa.f...
def main(hparams): torch.backends.cudnn.deterministic = True random.seed(hparams.seed) torch.manual_seed(hparams.seed) torch.cuda.manual_seed(hparams.seed) np.random.seed(hparams.seed) model = AffWild2VA(hparams) if hparams.fusion_checkpoint: checkpoint = torch.load(hparams.fusion_...
def compute_melspec(filename, outdir): wav = librosa.load(filename, sr=44100)[0] melspec = librosa.feature.melspectrogram(wav, sr=44100, n_fft=(128 * 20), hop_length=(347 * 2), n_mels=128, fmin=20, fmax=(44100 // 2)) logmel = librosa.core.power_to_db(melspec) np.save(((outdir + os.path.basename(filena...
def make_extract_vggish_embedding(frame_duration, hop_duration, input_op_name='vggish/input_features', output_op_name='vggish/embedding', embedding_size=128, resources_dir=None): '\n Creates a coroutine generator for extracting and saving VGGish embeddings\n\n Parameters\n ----------\n frame_duration\...
def extract_embeddings_vggish(annotation_path, dataset_dir, output_dir, vggish_resource_dir, frame_duration=0.96, hop_duration=0.96, progress=True, vggish_embedding_size=128): '\n Extract embeddings for files annotated in the SONYC annotation file and save them to disk.\n\n Parameters\n ----------\n a...