code
stringlengths
17
6.64M
class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=''): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [(self.prefix + self.batch_fmtstr.format(batch))] entries += [str(meter) for meter in self.meters] print('\t'.join(entries)) def _get_batch_fmtstr(self, num_batches): num_digits = len(str((num_batches // 1))) fmt = (('{:' + str(num_digits)) + 'd}') return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']')
def accuracy(output, target, topk=(1,)): 'Computes the accuracy over the k top predictions for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
class Trainer(): def __init__(self, model, mode, loss_function, optimizer, lr_scheduler, train_dataloader, val_dataloader, device, epochs, output_dir, metrics_config, multi_gpu=False): self.model = model self.mode = mode self.output_dir = output_dir self.logs_dir = os.path.join(output_dir, 'logs') self.loss_function = loss_function self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_dataloader = train_dataloader self.val_dataloader = val_dataloader self.device = device self.epochs = epochs self.multi_gpu = multi_gpu self.display_iterations = metrics_config['display_iterations'] self.record_layer_alignment = metrics_config['layer_alignment'] self.record_weight_ratio = metrics_config['weight_ratio'] self.top_k = metrics_config['top_k'] self.writer = SummaryWriter(self.logs_dir) self.layer_alignment_modes = ['fa', 'usf', 'frsf', 'brsf'] def write_layer_alignment(self, epoch): if self.record_layer_alignment: if (self.mode in self.layer_alignment_modes): try: layers_alignment = compute_angles_module(self.model) self.writer.add_scalars('layer_alignment/train', layers_alignment, epoch) except BaseException: pass else: print('Layer alignment is not implemented for {}'.format(self.mode)) def write_weight_ratio(self, epoch): if self.record_weight_ratio: try: weight_difference = compute_weight_ratio_module(self.model, self.mode) self.writer.add_scalars('weight_difference/train', weight_difference, epoch) except BaseException: pass def run(self): self.best_acc = 0.0 for epoch in range(self.epochs): self.write_layer_alignment(epoch) self.write_weight_ratio(epoch) t = time.time() (acc, loss) = train(model=self.model, mode=self.mode, loss_function=self.loss_function, optimizer=self.optimizer, train_dataloader=self.train_dataloader, device=self.device, multi_gpu=self.multi_gpu, epoch=epoch, top_k=self.top_k, display_iterations=self.display_iterations) self.writer.add_scalar('accuracy/train', acc, epoch) self.writer.add_scalar('loss/train', loss, epoch) (acc, loss) = test(model=self.model, loss_function=self.loss_function, test_dataloader=self.val_dataloader, device=self.device, top_k=self.top_k) self.writer.add_scalar('accuracy/test', acc, epoch) self.writer.add_scalar('loss/test', loss, epoch) if (acc > self.best_acc): self.best_acc = max(acc, self.best_acc) print('New best accuracy reached: {} \nSaving best accuracy model...'.format(self.best_acc)) if self.multi_gpu: torch.save(self.model.module, os.path.join(self.output_dir, 'model_best_acc.pth')) else: torch.save(self.model, os.path.join(self.output_dir, 'model_best_acc.pth')) torch.save(self.model, os.path.join(self.output_dir, 'latest_model.pth')) total_time = (time.time() - t) self.lr_scheduler.step() self.writer.add_scalar('time/train', total_time, epoch) with open(os.path.join(self.output_dir, 'best_acc.txt'), 'w') as f: f.write(str(self.best_acc)) self.write_layer_alignment(epoch) self.write_weight_ratio(epoch)
def read_yaml(yaml_path): with open(yaml_path, 'r') as f: yaml_file = yaml.load(f, Loader=yaml.Loader) return yaml_file
def mkdir(path): if (not os.path.exists(path)): return os.makedirs(path)
def mkdirs(paths): if (isinstance(paths, list) and (not isinstance(paths, str))): for path in paths: mkdir(path) else: mkdir(paths)
def path_exists(path): if os.path.exists(path): return True else: raise ValueError('Path provided does not exist.')
def read_schema(schema_name): with open(os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'schemas', (schema_name + '.json')))) as schema: return json.load(schema)
def validate_config(instance, schema_name, defaults=True): with open(os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'schemas', (schema_name + '.json')))) as schema: if defaults: default_validator = extend_schema_with_default(Draft7Validator) try: default_validator(json.load(schema)).validate(instance) except ValueError: raise ValueError('Error when validating the default schema.') else: try: jsonschema.validate(instance, json.load(schema)) except ValueError: raise ValueError('Error when validating the schema.')
def extend_schema_with_default(validator_class): validate_properties = validator_class.VALIDATORS['properties'] def set_defaults(validator, properties, instance, schema): for (property_, subschema) in properties.items(): if (('default' in subschema) and (not isinstance(instance, list))): instance.setdefault(property_, subschema['default']) for error in validate_properties(validator, properties, instance, schema): (yield error) return validators.extend(validator_class, {'properties': set_defaults})
class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool2d(kernel_size=2) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool2d(kernel_size=2) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3) self.relu3 = nn.ReLU() self.fc = nn.Linear(in_features=128, out_features=10) def forward(self, x): out = self.pool1(self.relu1(self.conv1(x))) out = self.pool2(self.relu2(self.conv2(out))) out = self.relu3(self.conv3(out)) out = F.avg_pool2d(out, out.size()[3]) out = out.view(out.size(0), (- 1)) return self.fc(out)
def tflog2pandas(path: str) -> pd.DataFrame: 'convert single tensorflow log file to pandas DataFrame\n Parameters\n ----------\n path : str\n path to tensorflow log file\n Returns\n -------\n pd.DataFrame\n converted dataframe\n ' DEFAULT_SIZE_GUIDANCE = {'compressedHistograms': 1, 'images': 1, 'scalars': 0, 'histograms': 1} runlog_data = pd.DataFrame({'metric': [], 'value': [], 'step': []}) try: event_acc = EventAccumulator(path, DEFAULT_SIZE_GUIDANCE) event_acc.Reload() tags = event_acc.Tags()['scalars'] for tag in tags: event_list = event_acc.Scalars(tag) values = list(map((lambda x: x.value), event_list)) step = list(map((lambda x: x.step), event_list)) r = {'metric': ([tag] * len(step)), 'value': values, 'step': step} r = pd.DataFrame(r) runlog_data = pd.concat([runlog_data, r]) except Exception: print('Event file possibly corrupt: {}'.format(path)) traceback.print_exc() return runlog_data
def sorting_function(x1, x2): x1_s = x1.split('_') x2_s = x2.split('_') if (int(x1_s[1]) < int(x2_s[1])): return (- 1) elif (int(x1_s[1]) > int(x2_s[1])): return 1 elif (x1_s[0] <= x2_s[0]): return (- 1) else: return 1
def get_layer_alignment(dir_logs, net='resnet'): layers_paths = [folder for folder in os.listdir(dir_logs)] event_paths = [] layers_alignment = {} for l_p in layers_paths: if ('layer_alignment' in l_p): log_path = glob.glob(os.path.join(dir_logs, l_p, 'event*')) if (len(log_path) > 0): df = tflog2pandas(log_path[0]) layers_alignment[l_p.replace('layer_alignment_train_', '')] = df['value'].tolist() keys = list(layers_alignment.keys()) keys = sorted(keys, key=cmp_to_key(sorting_function)) if (net == 'resnet'): keys.remove('fc_0') keys.append('fc_0') elif (net == 'lenet'): keys.remove('fc1_0') keys.remove('fc2_0') keys.append('fc1_0') keys.append('fc2_0') return {key: layers_alignment[key] for key in keys}
def get_layer_weights(dir_logs, net='resnet', normalization=None): layers_paths = [folder for folder in os.listdir(dir_logs)] event_paths = [] layers_alignment = {} for l_p in layers_paths: if ('weight_difference' in l_p): log_path = glob.glob(os.path.join(dir_logs, l_p, 'event*')) if (len(log_path) > 0): df = tflog2pandas(log_path[0]) layers_alignment[l_p.replace('weight_difference_train_', '')] = df['value'].tolist() keys = list(layers_alignment.keys()) keys = sorted(keys, key=cmp_to_key(sorting_function)) if (net == 'resnet'): keys.remove('fc_0') keys.append('fc_0') elif (net == 'lenet'): keys.remove('fc1_0') keys.remove('fc2_0') keys.append('fc1_0') keys.append('fc2_0') layer_weights = {key: layers_alignment[key] for key in keys} if normalization: count = 0 for (key, value) in layer_weights.items(): layer_weights[key] = (np.array(value) * normalization[count]) count += 1 return layer_weights
def mkdir(path): if (not os.path.exists(path)): return os.makedirs(path)
def plot_multiple_lists(ydata, xdata, x_axis_name, y_axis_name, title, save_dir, figname, cmap='winter'): n = len(ydata) cmap_ = plt.cm.get_cmap(cmap) colors = iter(cmap_(np.linspace(0, 1, n))) colors_cmap = cmap_(np.arange(cmap_.N)) Z = [[0, 0], [0, 0]] levels = range(0, n, 1) CS3 = plt.contourf(Z, levels, cmap=cmap_) plt.clf() mkdir(save_dir) with plt.style.context('ggplot'): for (name, data) in ydata.items(): plt.plot(xdata, data, color=next(colors)) plt.title(title, pad=10) plt.xlabel(x_axis_name) plt.ylabel(y_axis_name) cbar = plt.colorbar(CS3) cbar.set_label('Layer Depth', labelpad=10) plt.tight_layout() plt.savefig('{}/{}.pdf'.format(save_dir, figname), dpi=200) plt.show()
class FGSM(Attack): "\n FGSM in the paper 'Explaining and harnessing adversarial examples'\n [https://arxiv.org/abs/1412.6572]\n Distance Measure : Linf\n Arguments:\n model (nn.Module): model to attack.\n eps (float): maximum perturbation. (Default: 0.007)\n Shape:\n - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].\n - labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \\leq y_i \\leq` `number of labels`.\n - output: :math:`(N, C, H, W)`.\n Examples::\n >>> attack = torchattacks.FGSM(model, eps=0.007)\n >>> adv_images = attack(images, labels)\n " def __init__(self, model, eps=0.35, mode='bp'): super().__init__('FGSM', model) self.eps = eps self._supported_mode = ['default', 'targeted'] self.mode = mode def forward(self, images, labels): '\n Overridden.\n ' images = images.clone().detach().to(self.device) labels = labels.clone().detach().to(self.device) loss_function = nn.CrossEntropyLoss() images.requires_grad = True outputs = self.model(images) cost = loss_function(outputs, labels) if (self.mode == 'DFA'): self.model.zero_grad() loss_gradient = torch.autograd.grad(cost, outputs, retain_graph=True)[0] for layer in self.model[1].module.modules(): layer.loss_gradient = loss_gradient cost.backward() grad = images.grad else: grad = torch.autograd.grad(cost, images, retain_graph=False, create_graph=False)[0] self.grad = grad adv_images = (images + (self.eps * grad.sign())) adv_images = torch.clamp(adv_images, min=0, max=1).detach() return adv_images
class PGD(Attack): "\n PGD in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'\n [https://arxiv.org/abs/1706.06083]\n Distance Measure : Linf\n Arguments:\n model (nn.Module): model to attack.\n eps (float): maximum perturbation. (Default: 0.3)\n alpha (float): step size. (Default: 2/255)\n steps (int): number of steps. (Default: 40)\n random_start (bool): using random initialization of delta. (Default: True)\n Shape:\n - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].\n - labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \\leq y_i \\leq` `number of labels`.\n - output: :math:`(N, C, H, W)`.\n Examples::\n >>> attack = torchattacks.PGD(model, eps=8/255, alpha=1/255, steps=40, random_start=True)\n >>> adv_images = attack(images, labels)\n " def __init__(self, model, eps=0.35, mode='bp', alpha=(2 / 255), steps=40, random_start=True): super().__init__('PGD', model) self.eps = eps self.alpha = alpha self.steps = steps self.random_start = random_start self._supported_mode = ['default', 'targeted'] self.mode = mode def forward(self, images, labels): '\n Overridden.\n ' images = images.clone().detach().to(self.device) labels = labels.clone().detach().to(self.device) loss = nn.CrossEntropyLoss() adv_images = images.clone().detach() if self.random_start: adv_images = (adv_images + torch.empty_like(adv_images).uniform_((- self.eps), self.eps)) adv_images = torch.clamp(adv_images, min=0, max=1).detach() for _ in range(self.steps): adv_images.requires_grad = True outputs = self.model(adv_images) cost = loss(outputs, labels) if (self.mode == 'DFA'): self.model.zero_grad() loss_gradient = torch.autograd.grad(cost, outputs, retain_graph=True)[0] for layer in self.model[1].module.modules(): layer.loss_gradient = loss_gradient cost.backward() grad = adv_images.grad else: grad = torch.autograd.grad(cost, adv_images, retain_graph=False, create_graph=False)[0] self.grad = grad adv_images = (adv_images.detach() + (self.alpha * grad.sign())) delta = torch.clamp((adv_images - images), min=(- self.eps), max=self.eps) adv_images = torch.clamp((images + delta), min=0, max=1).detach() return adv_images
class TPGD(Attack): "\n PGD based on KL-Divergence loss in the paper 'Theoretically Principled Trade-off between Robustness and Accuracy'\n [https://arxiv.org/abs/1901.08573]\n Distance Measure : Linf\n Arguments:\n model (nn.Module): model to attack.\n eps (float): strength of the attack or maximum perturbation. (Default: 8/255)\n alpha (float): step size. (Default: 2/255)\n steps (int): number of steps. (Default: 7)\n Shape:\n - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].\n - output: :math:`(N, C, H, W)`.\n Examples::\n >>> attack = torchattacks.TPGD(model, eps=8/255, alpha=2/255, steps=7)\n >>> adv_images = attack(images)\n " def __init__(self, model, mode='bp', eps=(8 / 255), alpha=(2 / 255), steps=7): super().__init__('TPGD', model) self.eps = eps self.alpha = alpha self.steps = steps self._supported_mode = ['default'] self.mode = mode def forward(self, images, labels=None): '\n Overridden.\n ' images = images.clone().detach().to(self.device) logit_ori = self.model(images).detach() labels = F.softmax(logit_ori, dim=1) adv_images = (images + (0.001 * torch.randn_like(images))) adv_images = torch.clamp(adv_images, min=0, max=1).detach() loss = nn.KLDivLoss(reduction='sum') for _ in range(self.steps): adv_images.requires_grad = True logit_adv = self.model(adv_images) outputs = F.log_softmax(logit_adv, dim=1) cost = loss(outputs, labels) if (self.mode == 'DFA'): self.model.zero_grad() loss_gradient = torch.autograd.grad(cost, outputs, retain_graph=True)[0] for layer in self.model[1].module.modules(): layer.loss_gradient = loss_gradient cost.backward() grad = adv_images.grad else: grad = torch.autograd.grad(cost, adv_images, retain_graph=False, create_graph=False)[0] adv_images = (adv_images.detach() + (self.alpha * grad.sign())) delta = torch.clamp((adv_images - images), min=(- self.eps), max=self.eps) adv_images = torch.clamp((images + delta), min=0, max=1).detach() return adv_images
@pytest.fixture(scope='session') def config_bp_path(): return os.path.abspath(os.path.join('tests', 'fixtures', 'config_files', 'config_bp.yaml'))
@pytest.fixture(scope='session') def config_usf_reproducible_path(): return os.path.abspath(os.path.join('tests', 'fixtures', 'config_files', 'config_usf_reproducible.yaml'))
def test_benchmark(config_bp_path): benchmark = Benchmark(config_bp_path) benchmark.run() current_files = os.listdir('tests/tmp/mnist/le_net/backpropagation_test/') expected_files = ['best_acc.txt', 'config.yaml', 'latest_model.pth', 'results.csv', 'results.json', 'model_best_acc.pth', 'logs'] for file in expected_files: assert (file in current_files)
def test_benchmark_command_line_reproducibility_cpu(config_usf_reproducible_path): cmd = ['python', 'benchmark.py', '--config', config_usf_reproducible_path] subprocess.run(cmd) results_1 = pd.read_json('tests/tmp/mnist/le_net/usf_test/results.json') cmd = ['python', 'benchmark.py', '--config', config_usf_reproducible_path] subprocess.run(cmd) results_2 = pd.read_json('tests/tmp/mnist/le_net/usf_test/results.json') pd.testing.assert_frame_equal(results_1, results_2)
@pytest.fixture(scope='session') def mode_types(): return ['backpropagation', 'fa', 'dfa', 'usf', 'brsf', 'frsf']
class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(1, 20, 3) self.relu = nn.ReLU() self.fc = nn.Linear(20, 10) def forward(self, x): out = self.relu(self.conv1(x)) out = F.avg_pool2d(out, out.size()[3]) return self.fc(out)
@pytest.fixture(scope='function') def dummy_net(): return Model()
@pytest.fixture(scope='function') def dummy_net_constructor(): return Model
@pytest.fixture(scope='session') def datasets_available(): return ['mnist', 'cifar10', 'cifar10_benchmark', 'cifar100', 'fashion_mnist', 'imagenet']
def test_datasets_implemented(datasets_available): for dataset_name in datasets_available: assert DatasetSelector(dataset_name).get_dataset()
@pytest.fixture(scope='session') def model_architectures(): return [('le_net_mnist', (1, 1, 32, 32)), ('le_net_cifar', (1, 3, 32, 32)), ('resnet18', (1, 3, 128, 128)), ('resnet20', (1, 3, 128, 128)), ('resnet56', (1, 3, 128, 128))]
def check_model(model, input_size): model_ = model() if (('mode' in model_.__dict__) and (model_.mode == 'dfa')): _ = model_.forward(torch.rand(input_size), targets=torch.LongTensor([1]), loss_function=torch.nn.CrossEntropyLoss()) else: _ = model_(torch.rand(input_size))
def test_backpropagation_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.backpropagation.__dict__[arch], input_size)
def test_fa_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.fa.__dict__[arch], input_size)
def test_dfa_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.dfa.__dict__[arch], input_size)
def test_usf_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.usf.__dict__[arch], input_size)
def test_brsf_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.brsf.__dict__[arch], input_size)
def test_frsf_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.frsf.__dict__[arch], input_size)
def test_biomodule_convert(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() if (mode == 'dfa'): with pytest.raises(ValueError, match='Model `output_dim` is required for Direct Feedback Alignment \\(dfa\\) mode'): BioModule(dummy_net, mode) _ = BioModule(dummy_net, mode, output_dim=10) else: _ = BioModule(dummy_net, mode)
def test_module_converter_convert_dummy_net(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() layers_to_convert = {str(type(dummy_net.conv1)): 1, str(type(dummy_net.fc)): 1} w1 = dummy_net.conv1.weight.data w2 = dummy_net.fc.weight.data output_dim = None converter = ModuleConverter(mode=mode) if (mode == 'dfa'): output_dim = 10 converted = converter.convert(dummy_net, output_dim=output_dim) for (layer, count) in converter.replaced_layers_counts.items(): assert (layers_to_convert[layer] == count) assert (not np.testing.assert_array_almost_equal(w1, converted.conv1.weight.data)) assert (not np.testing.assert_array_almost_equal(w2, converted.fc.weight.data))
def test_module_converter_convert_dummy_net_copy_weights(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() layers_to_convert = {str(type(dummy_net.conv1)): 1, str(type(dummy_net.fc)): 1} w1 = dummy_net.conv1.weight.data w2 = dummy_net.fc.weight.data output_dim = None converter = ModuleConverter(mode=mode) if (mode == 'dfa'): output_dim = 10 converted = converter.convert(dummy_net, copy_weights=True, output_dim=output_dim) for (layer, count) in converter.replaced_layers_counts.items(): assert (layers_to_convert[layer] == count) np.testing.assert_array_almost_equal(w1, converted.conv1.weight.data) np.testing.assert_array_almost_equal(w2, converted.fc.weight.data)
def test_module_converter_convert_dummy_net_layer_config(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() layers_to_convert = {str(type(dummy_net.conv1)): 1, str(type(dummy_net.fc)): 1} w1 = dummy_net.conv1.weight.data w2 = dummy_net.fc.weight.data output_dim = None converter = ModuleConverter(mode=mode) layer_config = {'options': {'init': 'kaiming'}} if (mode == 'dfa'): output_dim = 10 converted = converter.convert(dummy_net, copy_weights=True, output_dim=output_dim, layer_config=layer_config) for (layer, count) in converter.replaced_layers_counts.items(): assert (layers_to_convert[layer] == count) np.testing.assert_array_almost_equal(w1, converted.conv1.weight.data) np.testing.assert_array_almost_equal(w2, converted.fc.weight.data) assert (converted.conv1.init == 'kaiming') assert (converted.fc.init == 'kaiming')
def main(): mode = argv[1] e = Evaluator() if (mode == 'wikt'): e.read_all_wiktionary() e.compare_with_triangles_stdin() elif (mode == 'feat'): e.write_labels(argv[2]) e.featurize_and_uniq_triangles_stdin()
def scan_stdin(args): stats = {'punct': 0, 'punct ok': 0, 'sum': 0, 'invalid': 0} for l in stdin: stats['sum'] += 1 try: (wc1, w1, wc2, w2) = l.decode('utf8').strip().split('\t')[0:4] if args['punct']: if (abs((len(punct_re.findall(w1)) - len(punct_re.findall(w2)))) >= int(args['--num'])): print(l.strip()) stats['punct'] += 1 else: stats['punct ok'] += 1 if args['unigram']: if ((not (wc1 in sum_)) or (not (wc2 in sum_))): stderr.write('INVALID, unknown language: {0}'.format(l)) continue if ((wc1 in args['--whitelist']) or (wc2 in args['--whitelist'])): continue prob1 = 0.0 prob2 = 0.0 for c in w1: prob1 += math.log((float(unigrams[wc1][c]) / sum_[wc1])) prob1 /= len(w1) for c in w2: prob2 += math.log((float(unigrams[wc2][c]) / sum_[wc2])) prob2 /= len(w2) if ((prob1 < int(args['<prob_threshold>'])) or (prob2 < int(args['<prob_threshold>']))): print(l.strip()) except ValueError: stats['invalid'] += 1 stderr.write('INVALID: {0}'.format(l))
def read_unigrams(fn): with open(fn) as f: for l in f: (wc, c, cnt) = l.decode('utf8').split('\t') unigrams[wc][c] = int(cnt) sum_[wc] += int(cnt)
def main(): args = docopt(__doc__, version='Wikt2Dict - Find anomalies 1.0') if args['unigram']: read_unigrams(args['<unigram_file>']) scan_stdin(args)
def read_pairs(wc_filter=None, input_files=None, use_stdin=False): tri = defaultdict(set) if use_stdin: for l in stdin: add_pair(l, tri, wc_filter) elif input_files: for fn in input_files: with open(fn) as f: for l in f: add_pair(l, tri, wc_filter) return tri
def add_pair(l, tri, wc_filter): try: (wc1, w1, wc2, w2) = l.decode('utf8').strip().split('\t')[0:4] if (wc_filter and ((not (wc1 in wc_filter)) or (not (wc2 in wc_filter)))): return tri[(wc1, w1)].add((wc2, w2)) tri[(wc2, w2)].add((wc1, w1)) except ValueError: stderr.write('Invalid line: {0}'.format(l))
def find_k_long_polygons(pairs, k): if (k == 1): for word in pairs.keys(): (yield [word]) else: for polygon in find_k_long_polygons(pairs, (k - 1)): for word in pairs[polygon[(- 1)]]: if (not (word in polygon[1:])): (yield (polygon + [word]))
def find_and_print_polygons(pairs, found=None, k=4, mode='polygons'): for polygon in find_k_long_polygons(pairs, (k + 1)): if (polygon[0] == polygon[(- 1)]): output(pairs, found=polygon, mode=mode)
def find_k_clicks(pairs, k): if (k == 1): for word in pairs.keys(): (yield [word]) else: for click in find_k_clicks(pairs, (k - 1)): if (len(click) > (k - 1)): continue for word in pairs[click[(- 1)]]: if (word in click): continue bad = False for c in click[:(- 2)]: if (not (word in pairs[c])): bad = True if (not bad): click.append(word) (yield click)
def find_and_print_clicks(pairs, k=4): for click in find_k_clicks(pairs, k): output(pairs, found=sorted(click), mode='clicks')
def output(pairs, found, mode): (edge_density, new_pairs) = edge_density_and_new_pairs(pairs, found) if ((mode == 'clicks') and (edge_density == 1.0)): if arguments['--illustrate']: print(' --> '.join((', '.join([i, j]) for (i, j) in found)).encode('utf8')) else: print('\t'.join(('\t'.join([i, j]) for (i, j) in found)).encode('utf8')) elif (mode == 'polygons'): for pair in new_pairs: if arguments['--illustrate']: print((((((('\t'.join(pair[0]).encode('utf8') + '\t') + '\t'.join(pair[1]).encode('utf8')) + '\t') + str(edge_density)) + '\t') + ' --> '.join((', '.join(i) for i in found)).encode('utf8'))) else: print((((((('\t'.join(pair[0]).encode('utf8') + '\t') + '\t'.join(pair[1]).encode('utf8')) + '\t') + str(edge_density)) + '\t') + '\t'.join(('\t'.join(i) for i in found)).encode('utf8')))
def edge_density_and_new_pairs(pairs, cycle): new_pairs = list() all_pairs = list() for (i, e1) in enumerate(cycle): for e2 in cycle[(i + 1):(- 1)]: all_pairs.append(sorted([e1, e2])) if ((not (e2 in pairs[e1])) and (not (e1 in pairs[e2]))): new_pairs.append(sorted([e1, e2])) n = len(all_pairs) return ((1 - (float(len(new_pairs)) / ((n * (n - 1)) / 2))), new_pairs)
def main(): if arguments['--wc-filter']: with open(arguments['--wc-filter']) as f: wc_filter = set([wc.strip() for wc in f]) else: wc_filter = None k = int(arguments['--k']) if arguments['<input>']: pairs = read_pairs(wc_filter, input_files=arguments['<input>']) else: pairs = read_pairs(wc_filter, use_stdin=True) stderr.write('Pairs read\n') stderr.write('Number of pairs {0}\n'.format((sum((len(v) for v in pairs.values())) / 2))) if arguments['polygons']: find_and_print_polygons(pairs, k=k, mode='polygons') else: find_and_print_clicks(pairs, k=k)
def read_table(fn): mapping = defaultdict(set) with open(fn) as f: for l in f: fd = l.decode('utf8').strip().split('\t') id_ = int(fd[0]) for (i, lang) in enumerate(['en', 'hu', 'la', 'pl']): if (fd[(i + 1)] == '#'): continue for word in fd[(i + 1)].split('/'): mapping[(lang, word.replace('_', ' '))].add(id_) return mapping
def read_words(fn): words = set() with open(fn) as f: for l in f: fd = l.decode('utf8').strip().split('\t') if (len(fd) >= 2): words.add((fd[0], fd[1])) if (len(fd) >= 4): words.add((fd[2], fd[3])) return words
def find_translations(words): iter_no = 0 for l in stdin: iter_no += 1 if ((iter_no % 1000000) == 0): stderr.write('{}\n'.format(iter_no)) try: fd = l.decode('utf8').strip().split('\t') pair1 = (fd[0], fd[1]) pair2 = (fd[2], fd[3]) if (pair1 in words): print('\t'.join((fd[0:4] + list(pair1))).encode('utf8')) if (pair2 in words): print('\t'.join((fd[0:4] + list(pair2))).encode('utf8')) except ValueError: stderr.write('Error in line {}'.format(l))
def add_orig_bindings(mapping, translations): for ((wc, word), ids) in mapping.iteritems(): for id_ in ids: translations[id_][wc].add(word)
def find_translations_to_table(mapping): iter_no = 0 translations = defaultdict((lambda : defaultdict(set))) add_orig_bindings(mapping, translations) for l in stdin: iter_no += 1 if ((iter_no % 1000000) == 0): stderr.write('{}\n'.format(iter_no)) try: fd = l.decode('utf8').strip().split('\t') (wc1, w1, wc2, w2) = fd[0:4] if ((wc1 == 'roa_rup') or (wc2 == 'roa_rup')): continue wc1 = ('zh' if (wc1 == 'cmn') else wc1) wc2 = ('zh' if (wc2 == 'cmn') else wc2) pair1 = (wc1, w1) pair2 = (wc2, w2) for id_ in mapping[pair1]: translations[id_][wc2].add(w2) for id_ in mapping[pair2]: translations[id_][wc1].add(w1) except ValueError: stderr.write('Error in line {}'.format(l)) for (id_, trans) in translations.iteritems(): trans_to_dump = dict() for (wc, words) in trans.iteritems(): trans_to_dump[wc] = sorted(words) print('{0}\t{1}'.format(id_, json.dumps(trans_to_dump)))
def main(): mode = (argv[2] if (len(argv) > 2) else 'direct') if (mode == 'direct'): words = read_words(argv[1]) find_translations(words) elif (mode == 'collect'): table = read_table(argv[1]) find_translations_to_table(table)
def main(): if ((len(argv) > 2) and (not (argv[2] == 'all'))): filter_wc = set([wc.strip() for wc in argv[2:]]) else: filter_wc = None cfg_fn = argv[1] logger = logging.getLogger('wikt2dict') cfg = ConfigHandler('general', cfg_fn) logger = LogHandler(cfg) with open(cfg['wikicodes']) as wc_f: wikicodes = set([w.strip() for w in wc_f]) n = len(wikicodes) if filter_wc: m = (n - len(filter_wc)) else: m = 0 num_of_tr = ((((n * (n - 1)) * (n - 2)) / 6) - (((m * (m - 1)) * (m - 2)) / 6)) i = 1 for triangle_wc in combinations(wikicodes, 3): if (filter_wc and (len((set(triangle_wc) & filter_wc)) == 0)): continue stderr.write(((((str(i) + '/') + str(num_of_tr)) + repr(triangle_wc)) + '\n')) i += 1 logger.info((' '.join(triangle_wc) + ' triangle')) triangulator = Triangulator(triangle_wc, cfg_fn) triangulator.collect_triangles() triangulator.write_triangles()
def main(): unigrams = defaultdict((lambda : defaultdict(int))) for l in stdin: try: (wc1, w1, wc2, w2) = l.decode('utf8').strip().split('\t')[0:4] for c in w1: unigrams[wc1][c] += 1 for c in w2: unigrams[wc2][c] += 1 except ValueError: stderr.write('Invalid line: {0}'.format(l)) for (wc, chars) in unigrams.iteritems(): for (c, cnt) in sorted(((k, v) for (k, v) in chars.iteritems()), key=(lambda x: (- x[1]))): print(u'{0}\t{1}\t{2}'.format(wc, c, cnt).encode('utf8'))
class SectionAndArticleParser(ArticleParser): '\n Class for parsing Wiktionaries that have translation tables\n in foreign articles too and section-level parsing is required.\n e.g. dewiktionary has a translation section in the article\n about the English word dog. Therefore, we need to recognize\n the language of the title word (dog) and then parse the\n translation table.\n ' def __init__(self, wikt_cfg, parser_cfg, filter_langs=None): ArticleParser.__init__(self, wikt_cfg, parser_cfg, filter_langs) self.read_section_langmap() def read_section_langmap(self): '\n The language of a section is determined based on its header.\n The header may or may not use language names.\n If a language name map is specified, then each section header\n will be looked up in that map.\n Otherwise wikicodes are used.\n ' self.section_langmap = dict() if self.cfg.section_langmap: f = open(self.cfg.section_langmap) for l in f: fields = l.strip().decode('utf8').split('\t') for langname in fields[1:]: self.section_langmap[langname] = fields[0] self.section_langmap[langname.title()] = fields[0] f.close() else: self.section_langmap = dict([(wc, wc) for wc in self.wikt_cfg.wikicodes]) def extract_translations(self, title, text): translations = list() for (section_lang, section) in self.get_sections(text): for parser in self.wikt_cfg.section_parsers: pairs = parser.extract_translations(title, section) for p in pairs: if ((self.wikt_cfg.allow_synonyms is False) and (p[0] == section_lang)): continue translations.extend([(section_lang, title, p[0], p[1]) for p in pairs]) return set(translations) def get_sections(self, text): section_titles_i = list() lines = text.split('\n') for (i, line) in enumerate(lines): m = self.cfg.section_re.search(line) if m: lang = m.group(self.cfg.section_langfield) section_titles_i.append((i, lang)) if (not section_titles_i): return for (i, (ind, lang)) in enumerate(section_titles_i[:(- 1)]): if (lang in self.section_langmap): (yield (self.section_langmap[lang], '\n'.join(lines[ind:section_titles_i[(i + 1)][0]]))) last_lang = section_titles_i[(- 1)][1] if (last_lang in self.section_langmap): (yield (self.section_langmap[last_lang], '\n'.join(lines[section_titles_i[(- 1)][0]:])))
class LangnamesArticleParser(ArticleParser): '\n Class for parsing Wiktionaries that use simple lists for translations\n instead of templates ' def __init__(self, wikt_cfg, parser_cfg, filter_langs=None): ArticleParser.__init__(self, wikt_cfg, parser_cfg, filter_langs) self.read_langname_mapping() def read_langname_mapping(self): self.mapping = dict() if self.cfg.langnames: f = open(self.cfg.langnames) for l in f: fields = l.strip().decode('utf8').split('\t') for langname in fields[1:]: self.mapping[langname] = fields[0] self.mapping[langname.title()] = fields[0] self.mapping[langname.lower()] = fields[0] f.close() else: self.mapping = dict([(wc, wc) for wc in self.wikt_cfg.wikicodes]) def extract_translations(self, title, text): translations = list() for tr in self.cfg.translation_line_re.finditer(text): if self.skip_translation_line(tr.group(0)): continue langname = tr.group(self.cfg.language_name_field).lower() if (not (langname in self.mapping)): continue wc = self.mapping[langname] entities = self.get_entities(tr.group(self.cfg.translation_field)) for entity in entities: entity_clear = self.trim_translation(entity) if entity_clear: translations.append((wc, entity_clear)) return set(translations) def trim_translation(self, word): return word.replace('\n', ' ').strip() def get_entities(self, trans_field): trimmed = self.cfg.bracket_re.sub('', trans_field) entities = list() for e in self.cfg.delimiter_re.split(trimmed): for m in self.cfg.translation_re.finditer(e): word = m.group(1) if self.skip_entity(word): continue entities.append(word) return set(entities) def skip_entity(self, entity): if self.cfg.skip_translation_re.search(entity): return True if (self.cfg.junk_re and self.cfg.junk_re.search(entity)): return True return False
class DefaultArticleParser(ArticleParser): def extract_translations(self, title, text): translations = list() for tr in self.cfg.trad_re.finditer(text): wc = tr.group(self.cfg.wc_field) if ((not wc) or (not wc.strip()) or (not (wc in self.wikt_cfg.wikicodes))): continue word = tr.group(self.cfg.word_field) if ((not word) or (not word.strip())): continue word = word.strip() if self.skip_word(word): continue translations.append((wc, word)) return set(translations) def skip_word(self, word): if (self.cfg.skip_translation_re and self.cfg.skip_translation_re.search(word)): return True if ('\n' in word): return True return False
def err(msg): ' Prints a message to stderr, terminating it with a newline ' sys.stderr.write((msg + '\n'))
class Article(): ' Stores the contents of a Wikipedia article ' def __init__(self, title, markup, is_redirect): self.title = title self.markup = markup self.is_redirect = is_redirect
class WikiParser(): 'Parses the Wikipedia XML and extracts the relevant data,\n such as sentences and vocabulary' def __init__(self, callback, ignore_redirects=True): self.callback = callback self.ignore_redirects = ignore_redirects self.buffer_size = ((10 * 1024) * 1024) self.ignoredArticleTypes = ['wikipedia', 'category', 'template'] self.xml_parser = sax.ParserCreate() self.xml_parser.StartElementHandler = (lambda name, attrs: self.xml_start_element(name, attrs)) self.xml_parser.EndElementHandler = (lambda name: self.xml_end_element(name)) self.xml_parser.CharacterDataHandler = (lambda data: self.xml_char_data(data)) self.article = None self.section = None self.word = None self.enclosing_tags = [] self.text = [] self.article = None def process(self): while True: buf = sys.stdin.read(self.buffer_size) if (buf == ''): break self.xml_parser.Parse(buf) def xml_char_data(self, data): self.text.append(data) pass def xml_start_element(self, name, attrs): name = name.lower() self.enclosing_tags = ([name] + self.enclosing_tags) self.text = [] if (name == 'page'): self.article = Article(None, None, False) def xml_end_element(self, name): name = name.lower() contents = ''.join(self.text) if (name == 'title'): self.article.title = contents elif (name == 'redirect'): self.article.is_redirect = True elif (name == 'text'): self.article.markup = contents elif (name == 'page'): if (self.ignore_redirects and self.article.is_redirect): pass else: self.new_article(self.article) self.article = None if ((len(self.enclosing_tags) > 0) and (name == self.enclosing_tags[0])): self.enclosing_tags = self.enclosing_tags[1:] else: err(('Mismatched closing tag: ' + name)) self.text = [] def new_article(self, article): if (':' in article.title): articleType = article.title.split(':')[0].lower() if (articleType in self.ignoredArticleTypes): return self.callback(article) def get_enclosing_tag(self): return (None if (len(self.enclosing_tags) == 0) else self.enclosing_tags[0]) def close(self): 'Releases all resources associated with this class' pass
class Triangulator(object): def __init__(self, triangle_wc): self.wikicodes = set(triangle_wc) self.cfg = config.WiktionaryConfig() self.pairs = defaultdict((lambda : defaultdict((lambda : defaultdict((lambda : defaultdict(list))))))) self.triangles = defaultdict(list) self.read_pairs_in_three_langs() def read_pairs_in_three_langs(self): for wc in (self.wikicodes | set(['de', 'lt'])): try: cfg = config.get_config_by_wc(wc) self.read_pairs_in_lang(wc, cfg.output_path) except IndexError: continue def read_pairs_in_lang(self, wc, fn): if (not path.exists(fn)): return with open(fn) as f: for l in f: fd = l.decode('utf8').strip().split('\t') if (len(fd) < 6): continue (wc1, w1, wc2, w2, src_wc, src_art) = fd[0:6] if (wc1 == 'cmn'): wc1 = 'zh' if (wc2 == 'cmn'): wc2 = 'zh' if ((not (wc1 in self.wikicodes)) and (not (wc2 in self.wikicodes))): continue self.pairs[wc1][w1][wc2][w2].append((src_wc, src_art)) self.pairs[wc2][w2][wc1][w1].append((src_wc, src_art)) def collect_triangles(self): for wc2 in self.wikicodes: (wc1, wc3) = sorted([w for w in self.wikicodes if (not (w == wc2))]) for (w2, tr) in self.pairs[wc2].iteritems(): for (w1, src1_l) in tr[wc1].iteritems(): for (w3, src3_l) in tr[wc3].iteritems(): for pair in product(src1_l, src3_l): if (wc1 < wc3): self.triangles[(wc1, w1, wc3, w3)].append((pair[0][0], pair[0][1], wc2, w2, pair[1][0], pair[1][1])) else: self.triangles[(wc3, w3, wc1, w1)].append((pair[0][0], pair[0][1], wc2, w2, pair[1][0], pair[1][1])) def write_triangles(self): dir_ = self.get_dir() if (not path.exists(dir_)): makedirs(dir_) for wc2 in self.wikicodes: out_str = '' (wc1, wc3) = sorted([w for w in self.wikicodes if (not (w == wc2))]) min_cnt = int(self.cfg.triangle_threshold) for (tri, sources) in self.triangles.iteritems(): if ((not (tri[0] == wc1)) or (not (tri[2] == wc3))): continue if (len(sources) >= min_cnt): for s in set(sources): out_str += ((('\t'.join(tri).encode('utf8') + '\t') + '\t'.join(s).encode('utf8')) + '\n') if out_str: with open(((dir_ + '/') + '_'.join([wc1, wc2, wc3])), 'w') as f: f.write(out_str) def get_dir(self): i = 0 file_cnt = 1000 while (file_cnt >= 998): dir_ = ((self.cfg['triangle_dir'] + '/') + str(i)) i += 1 if (not path.exists(dir_)): break file_cnt = len([name for name in listdir(dir_)]) return dir_
class Wiktionary(object): def __init__(self, cfg): self.cfg = cfg self.init_parsers() self.pairs = list() def init_parsers(self): self.parsers = list() for (parser_cl, parser_cfg) in self.cfg.parsers: self.parsers.append(parser_cl(self.cfg, parser_cfg)) def parse_articles(self, write_immediately=False): with open(self.cfg.output_path, 'w') as self.outf: for (title, text) in self.read_dump(): pairs = self.extract_translations(title, text) if pairs: if write_immediately: self.write_one_article_translations(pairs) else: self.store_translations(pairs) if (write_immediately is False): self.write_all_pairs() def extract_translations(self, title, text): if self.skip_article(title, text): return pairs = list() for parser in self.parsers: for p in parser.extract_translations(title, text): if (len(p) == 2): pair = ((self.cfg.wc, title, p[0], p[1]), tuple(parser.cfg.features)) elif (len(p) == 4): pair = (p, tuple(parser.cfg.features)) else: raise Exception('Invalid pair {0}'.format(p)) pairs.append(pair) return set(pairs) def skip_article(self, title, text): if ((not title.strip()) or (not text.strip())): return True if (':' in title): return True return False def write_one_article_translations(self, pairs): for pair in pairs: if (self.cfg.verbose_output is True): self.outf.write(('\t'.join(pair[0]).encode('utf8') + '\n')) else: self.outf.write(('\t'.join(pair[0:4]).encode('utf8') + '\n')) def store_translations(self, pairs): for (pair, feat) in pairs: (wc1, w1, wc2, w2) = pair[0:4] if (wc1 < wc2): self.pairs.append(([wc1, w1, wc2, w2, self.cfg.wc, w1] + list(feat))) else: self.pairs.append(([wc2, w2, wc1, w1, wc1, w1] + list(feat))) def write_all_pairs(self): for pair in sorted(self.pairs): if (self.cfg.verbose_output is True): self.outf.write(('\t'.join(pair).encode('utf8') + '\n')) else: self.outf.write(('\t'.join(pair[0:4]).encode('utf8') + '\n')) def read_dump(self): with open(self.cfg.dump_path) as f: title = u'' article = u'' page_sep = '%%#PAGE' for l_ in f: l = l_.decode('utf8') if l.startswith(page_sep): if (title and article): (yield (title, article)) title = l.split(page_sep)[(- 1)].strip() article = u'' else: article += l (yield (title, article))
def load_clip_cpu(backbone_name): model_path = 'path_to_CLIP_ViT-B-16_pre-trained_parameters' try: model = torch.jit.load(model_path, map_location='cpu').eval() state_dict = None except RuntimeError: state_dict = torch.load(model_path, map_location='cpu') model = clip.build_model((state_dict or model.state_dict())) return model
def transform_center(): interp_mode = Image.BICUBIC tfm_test = [] tfm_test += [Resize(224, interpolation=interp_mode)] tfm_test += [CenterCrop((224, 224))] tfm_test += [ToTensor()] normalize = Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]) tfm_test += [normalize] tfm_test = Compose(tfm_test) return tfm_test
def get_videos(vidname, read_path): allframes = [] videoins = (read_path + vidname) vvv = cv2.VideoCapture(videoins) if (not vvv.isOpened()): print('Video is not opened! {}'.format(videoins)) else: fps = vvv.get(cv2.CAP_PROP_FPS) totalFrameNumber = vvv.get(cv2.CAP_PROP_FRAME_COUNT) size = (int(vvv.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vvv.get(cv2.CAP_PROP_FRAME_HEIGHT))) second = (totalFrameNumber // fps) if (totalFrameNumber != 0): for _ in range(int(totalFrameNumber)): (rval, frame) = vvv.read() if (frame is not None): img = Image.fromarray(frame.astype('uint8')).convert('RGB') imgtrans = centrans(img).numpy() allframes.append(imgtrans) return np.array(allframes)
@lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache() def bytes_to_unicode(): "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n " bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1)))) cs = bs[:] n = 0 for b in range((2 ** 8)): if (b not in bs): bs.append(b) cs.append(((2 ** 8) + n)) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))
def get_pairs(word): 'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n ' pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs
def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip()
def whitespace_clean(text): text = re.sub('\\s+', ' ', text) text = text.strip() return text
class SimpleTokenizer(object): def __init__(self, bpe_path: str=default_bpe()): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') merges = merges[1:(((49152 - 256) - 2) + 1)] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = (vocab + [(v + '</w>') for v in vocab]) for merge in merges: vocab.append(''.join(merge)) vocab.extend(['<|startoftext|>', '<|endoftext|>']) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for (k, v) in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE) def bpe(self, token): if (token in self.cache): return self.cache[token] word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def decode(self, tokens): text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ') return text
def setup_path(args): prefix = args.prefix postfix = args.postfix openset = args.openset temporal = args.temporal tfmlayers = args.tfm_layers batchsize = args.batchsize numFrames = args.numFrames iters = args.num_iterations verbose = (args.verbose if args.verbose else 'none') dataset = args.dataset.split('-')[0] global dt_string, logPath, modelPath, resultsPath now = datetime.now() dt_string = now.strftime('%Y_%m_%d_%H_%M') if (args.test or args.resume): if args.test: basename = args.test.split('/')[(- 2)] elif args.resume: basename = args.resume.split('/')[(- 2)] logPath = os.path.join('../logs/', basename) modelPath = os.path.join('../models/', basename) try: with open('{}/running_command.txt'.format(modelPath), 'a') as f: json.dump({'command_time_stamp': dt_string, **args.__dict__}, f, indent=2) except: print({'command_time_stamp': dt_string, **args.__dict__}) else: os.makedirs(f"../logs{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", exist_ok=True) os.makedirs(f"../models{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", exist_ok=True) logPath = os.path.join(f"../logs{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", f'{dt_string}-dataset_{dataset}-openset_{openset}-iter_{iters:.0e}-bs_{batchsize}-numFrames_{numFrames}-temporal_{temporal}-tfmL_{tfmlayers}-prompt_{prefix}+X+{postfix}-{verbose}') modelPath = os.path.join(f"../models{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", f'{dt_string}-dataset_{dataset}-openset_{openset}-iter_{iters:.0e}-bs_{batchsize}-numFrames_{numFrames}-temporal_{temporal}-tfmL_{tfmlayers}-prompt_{prefix}+X+{postfix}-{verbose}') os.makedirs(logPath, exist_ok=True) os.makedirs(modelPath, exist_ok=True) with open('{}/running_command.txt'.format(modelPath), 'w') as f: json.dump({'command_time_stamp': dt_string, **args.__dict__}, f, indent=2) return [logPath, modelPath]
def setup_dataloader(args): if (args.dataset == 'HMDB51-feature-30fps-center'): feature_root = '../feat/HMDB' else: raise ValueError('Unknown dataset.') if args.dataset.startswith('HMDB'): (trainactions, valactions) = ([], []) trn_dataset = readFeatureHMDB51(root=feature_root, frames=args.numFrames, fpsR=[1, (1 / 2), (1 / 3), (1 / 3), (1 / 3), (1 / 4)], ensemble=1, mode='train') val_dataset = readFeatureHMDB51(root=feature_root, frames=args.numFrames, fpsR=[1, (1 / 2), (1 / 3), (1 / 3), (1 / 3), (1 / 4)], ensemble=args.valEnsemble, mode='val') return [trn_dataset, val_dataset, trainactions, valactions]
def main(args): np.random.seed(args.seed) torch.manual_seed(args.seed) device = ('cuda' if torch.cuda.is_available() else 'cpu') [logPath, modelPath] = cg.setup_path(args) args.model_path = modelPath logger = SummaryWriter(logdir=logPath) args.return_intermediate_text_feature = 0 [trn_dataset, val_datasete, trainactions, valactions] = cg.setup_dataloader(args) trnloader = FastDataLoader(trn_dataset, batch_size=args.batchsize, num_workers=args.workers, shuffle=True, pin_memory=False, drop_last=True) valloader = FastDataLoader(val_datasete, batch_size=args.batchsize, num_workers=args.workers, shuffle=False, pin_memory=False, drop_last=False) print('==> reading meta data for {}'.format(args.dataset)) (actionlist, actiondict, actiontoken) = text_prompt(dataset=args.dataset, clipbackbone=args.backbone, device=device) print('==> initialising action recognition model') model = CLIPrompt(args, actionlist, actiondict, actiontoken, device) model.float() model.to(device) optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=0.01) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=int(args.decay_steps), eta_min=(args.lr * 0.01), last_epoch=(- 1)) args.start_iter = 0 if args.test: print('loading checkpoint {}'.format(args.test)) if (args.test == 'random/random'): iteration = 0 print('loading random weights') else: checkpoint = torch.load(args.test, map_location=torch.device('cpu')) iteration = checkpoint['iteration'] state_dict = checkpoint['state_dict'] model.load_state_dict(state_dict) print('loading successful') val.val_CLIPrompt(args, valloader, [actionlist, actiondict, actiontoken, trainactions, valactions], model, None, device, iteration) print('test finish, exiting') sys.exit() if args.resume: print('loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume, map_location=torch.device('cpu')) iteration = checkpoint['iteration'] state_dict = checkpoint['state_dict'] model.load_state_dict(state_dict) args.start_iter = iteration print('loading successful') print('======> start training {}, {}, use {}.'.format(args.dataset, args.verbose, device)) train.train_CLIPrompt(args, [trnloader, valloader], [actionlist, actiondict, actiontoken, trainactions, valactions], model, optimizer, lr_scheduler, logger, device)
def convert_to_token(xh): xh_id = clip.tokenize(xh).cpu().data.numpy() return xh_id
def text_prompt(dataset='HMDB51', clipbackbone='ViT-B/16', device='cpu'): (actionlist, actionprompt, actiontoken) = ([], {}, []) numC = {'HMDB51-feature-30fps-center': 51} (clipmodel, _) = clip.load(clipbackbone, device=device, jit=False) for paramclip in clipmodel.parameters(): paramclip.requires_grad = False if (dataset == 'HMDB51-feature-30fps-center'): meta = open('../data/HMDB51/HMDB51_action.list', 'rb') actionlist = meta.readlines() meta.close() actionlist = np.array([a.decode('utf-8').split('\n')[0] for a in actionlist]) actiontoken = np.array([convert_to_token(a) for a in actionlist]) with torch.no_grad(): actionembed = clipmodel.encode_text_light(torch.tensor(actiontoken).to(device)) actiondict = OrderedDict(((actionlist[i], actionembed[i].cpu().data.numpy()) for i in range(numC[dataset]))) actiontoken = OrderedDict(((actionlist[i], actiontoken[i]) for i in range(numC[dataset]))) return (actionlist, actiondict, actiontoken)
def set_learning_rate(optimizer, lr): for g in optimizer.param_groups: g['lr'] = lr
def readtxt(metapath, datapath): (vidDir, vidLabel) = ([], []) f = open(metapath, 'rb') path = f.readlines() f.close() for p in path: psplit = p.decode('utf-8').strip('\n').split(',') vidDir += [os.path.join(datapath, psplit[0])] vidLabel += [[int(psplit[1]), psplit[2], int(psplit[3])]] return (vidDir, vidLabel)
def save_checkpoint(state, is_best=0, gap=1, filename='checkpoint.pth.tar', keep_all=False): torch.save(state, filename) last_epoch_path = os.path.join(os.path.dirname(filename), ('checkpoint_iter%s.pth.tar' % str((state['iteration'] - gap)))) if (not keep_all): try: os.remove(last_epoch_path) except: pass if is_best: past_best = glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar')) past_best = sorted(past_best, key=(lambda x: int(''.join(filter(str.isdigit, x))))) if (len(past_best) >= 5): try: os.remove(past_best[0]) except: pass torch.save(state, os.path.join(os.path.dirname(filename), ('model_best_iter%s.pth.tar' % str(state['iteration']))))
class _RepeatSampler(object): ' Sampler that repeats forever.\n Args:\n sampler (Sampler)\n ' def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: (yield from iter(self.sampler))
class FastDataLoader(torch.utils.data.dataloader.DataLoader): 'for reusing cpu workers, to save time' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): return len(self.batch_sampler.sampler) def __iter__(self): for i in range(len(self)): (yield next(self.iterator))
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f"-> Saving to '{file}'...") np.savez_compressed(file, **kwargs)
def export_ddad(mode, save_stem: ty.N[str]=None, overwrite: bool=False) -> None: 'Export the ground truth LiDAR depth images for SYNS.\n\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f'-> Exporting ground truth depths for DDAD "{mode}"...') ds = DdadDataset(mode, datum='image depth K', shape=None, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") depths = np.zeros((len(ds), *DdadDataset.SHAPE), dtype=np.float32) Ks = np.zeros((len(ds), 4, 4), dtype=np.float32) for (i, batch) in enumerate(tqdm(ds)): y = batch[1] depths[i] = y['depth'].squeeze() Ks[i] = y['K'] del batch, y save(save_file, depth=depths, K=Ks)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f"-> Saving to '{file}'...") np.savez_compressed(file, **kwargs)
def export_diode(mode: str, scene: str, save_stem: ty.N[str]=None, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for SYNS.\n\n :param mode: (str) Split mode to use. {'val'}\n :param scene: (str) Scene type to use. {'outdoor', 'indoor'}\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " print(f"-> Exporting ground truth depths for DIODE '{mode}'...") ds = DiodeDataset(mode, scene, shape=None, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") depths = np.array([(y['depth'].squeeze() * y['mask']) for (_, y, _) in tqdm(ds)]) save(save_file, depth=depths)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to "{file}"...''') np.savez_compressed(file, **kwargs)
def export_kitti(depth_split: str, mode: str, use_velo_depth: bool=False, save_stem: Optional[str]=None, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for a given Kitti test split.\n\n :param depth_split: (str) Kitti depth split to load.\n :param mode: (str) Split mode to use. {'train', 'val', 'test'}\n :param use_velo_depth: (bool) If `True`, load the raw velodyne depth. Only used for legacy reasons!\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " print(f''' -> Exporting ground truth depths for KITTI "{depth_split}/{mode}"...''') split_file = kr.get_split_file(depth_split, mode='test') lines = [line.split() for line in kr.load_split(split_file)] items = [{'seq': l[0], 'cam': (2 if (l[2] == 'l') else 3), 'stem': int(l[1])} for l in lines] save_file = (split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') (depths, Ks) = ([], []) for d in tqdm(items): (cam2cam, _, velo2cam) = kr.load_calib(d['seq'].split('/')[0]) if use_velo_depth: file = kr.get_velodyne_file(d['seq'], d['stem']) depth = kr.load_depth_velodyne(file, velo2cam, cam2cam, cam=d['cam'], use_velo_depth=use_velo_depth) else: file = kr.get_depth_file(d['seq'], f"image_0{d['cam']}", d['stem']) depth = kr.load_depth(file) depths.append(depth) Ks.append(cam2cam[f"K_0{d['cam']}"]) depths = np.array(depths, dtype=object) save(save_file, depth=depths, K=Ks)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f'-> Saving to "{file}"...') np.savez_compressed(file, **kwargs)
def export_mannequin(mode: str, save_stem: ty.N[str]=None, overwrite: bool=False) -> None: 'Export the ground truth LiDAR depth images for SYNS.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f"-> Exporting ground truth depths for Mannequin '{mode}'...") ds = MannequinDataset(mode, datum='image depth K', shape=None, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") (depths, Ks) = ([], []) for (_, y, m) in tqdm(ds): depths.append(y['depth'].squeeze()) Ks.append(geo.resize_K(y['K'], y['depth'].shape[(- 2):], shape=MannequinDataset.SHAPE)) save(save_file, depth=np.array(depths, dtype=object), K=np.array(Ks))
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to "{file}"...''') np.savez_compressed(file, **kwargs)
def export_nyud(mode: str, save_stem: str, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for NYUD.\n\n :param mode: (str) Split mode to use. {'test'}\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " ds = NyudDataset(mode=mode, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') depths = np.array([batch[1]['depth'].squeeze() for batch in tqdm(ds)]) save(save_file, depth=depths)