code
stringlengths
17
6.64M
def download_metadata_gdown(metadata_key, metadata_path='../metadata'): if (not os.path.exists(metadata_path)): os.makedirs(metadata_path) gdown.download_folder(METADATA_GDRIVE_URLS[metadata_key], output=os.path.join(metadata_path, metadata_key), use_cookies=False)
def download_dataset_gdown(dataset_key, tmp_path='tmp', dataset_path='ds'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) if (not os.path.exists(os.path.join(tmp_path, dataset_key))): gdown.download_folder(DATASET_GDRIVE_URLS[dataset_key], output=os.path.join(tmp_path, dataset_key), use_cookies=False) extract_file = glob.glob((os.path.join(tmp_path, dataset_key) + '/*.zip.001'))[0] split_json_file = glob.glob((os.path.join(tmp_path, dataset_key) + '/*.json'))[0] if (not os.path.exists(os.path.basename(split_json_file))): shutil.move(split_json_file, '.') extract_path = os.path.join(tmp_path, 'extract') if (not os.path.exists(extract_path)): os.makedirs(extract_path) cmd = ['7z', 'x', extract_file, ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() if (not os.path.exists(dataset_path)): os.makedirs(dataset_path) dataset_ids = glob.glob((extract_path + '/*/*')) for folder in dataset_ids: if (not os.path.exists(os.path.join(dataset_path, os.path.basename(folder)))): os.rename(folder, os.path.join(dataset_path, os.path.basename(folder))) shutil.rmtree(tmp_path)
def download_model_gdown(model_name, model_key, model_path='checkpoints'): if (not os.path.exists(model_path)): os.makedirs(model_path) gdown.download(MODEL_GDRIVE_URLS[model_name][model_key], output=os.path.join(model_path, model_key), fuzzy=True, use_cookies=False)
def makeOneHotVec(idx, num_classes): vec = [(1 if (i == idx) else 0) for i in range(num_classes)] return vec
def collate_fn_silver(batch): res = defaultdict(list) for d in batch: for (k, v) in d.items(): res[k].append(v) res['label'] = torch.tensor(res['label'], dtype=torch.long) return res
def collate_fn_silver_multi(batch): res = defaultdict(list) for d in batch: for (k, v) in d.items(): res[k].append(v) res['label'] = torch.stack(res['label'], dim=0) return res
def collate_fn(batch): res = defaultdict(list) for d in batch: for (k, v) in d.items(): res[k].append(v) res['label'] = torch.stack(res['label']) return res
def collate_fn_enrico(batch): res = defaultdict(list) for d in batch: for (k, v) in d.items(): res[k].append(v) res['label'] = torch.tensor(res['label'], dtype=torch.long) return res
class EnricoImageDataset(torch.utils.data.Dataset): def __init__(self, id_list_path, csv='../../metadata/screenclassification/design_topics.csv', class_map_file='../../metadata/screenclassification/class_map_enrico.json', img_folder=(os.environ['SM_CHANNEL_TRAINING'] if ('SM_CHANNEL_TRAINING' in os.environ) else '../../downloads/enrico/screenshots'), img_size=128, ra_num_ops=(- 1), ra_magnitude=(- 1), one_hot_labels=False): super(EnricoImageDataset, self).__init__() self.csv = pd.read_csv(csv) self.img_folder = img_folder self.one_hot_labels = one_hot_labels img_transforms = [transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] if ((ra_num_ops > 0) and (ra_magnitude > 0)): img_transforms = ([transforms.RandAugment(ra_num_ops, ra_magnitude)] + img_transforms) self.img_transforms = transforms.Compose(img_transforms) self.image_names = list(self.csv['screen_id']) self.labels = list(self.csv['topic']) self.class_counter = Counter(self.labels) with open(id_list_path, 'r') as f: split_ids = set(json.load(f)) keep_inds = [i for i in range(len(self.image_names)) if (str(self.image_names[i]) in split_ids)] self.image_names = [self.image_names[i] for i in keep_inds] self.labels = [self.labels[i] for i in keep_inds] with open(class_map_file, 'r') as f: map_dict = json.load(f) self.label2Idx = map_dict['label2Idx'] self.idx2Label = map_dict['idx2Label'] def __len__(self): return len(self.image_names) def __getitem__(self, index): img_path = os.path.join(self.img_folder, (str(self.image_names[index]) + '.jpg')) image = Image.open(img_path).convert('RGB') image = self.img_transforms(image) targets = self.label2Idx[self.labels[index]] if self.one_hot_labels: targets = torch.tensor(makeOneHotVec(targets, len(self.idx2Label.keys())), dtype=torch.long) return {'image': image, 'label': targets}
class CombinedImageDataset(torch.utils.data.IterableDataset): def __init__(self, ds_list, prob_list): super(CombinedImageDataset, self).__init__() self.ds_list = ds_list self.prob_list = prob_list def __iter__(self): while True: dsi = choices(list(range(len(self.ds_list))), self.prob_list)[0] ds = self.ds_list[dsi] dse = int((random.random() * len(ds))) val = ds.__getitem__(dse) (yield val)
class SilverMultilabelImageDataset(torch.utils.data.Dataset): def __init__(self, id_list_path=None, silver_id_list_path_ignores=None, K=150, P=1, csv='../../metadata/screenclassification/silver_webui-multi_topic.csv', img_folder=(os.environ['SM_CHANNEL_TRAINING'] if ('SM_CHANNEL_TRAINING' in os.environ) else '../../downloads/ds'), img_size=128, one_hot_labels=False, ra_num_ops=(- 1), ra_magnitude=(- 1)): super(SilverMultilabelImageDataset, self).__init__() with open(csv, 'r') as file: first_line = file.readline() num_classes = (len(first_line.split(',')) - 1) self.num_classes = num_classes self.one_hot_labels = one_hot_labels self.K = K self.P = P self.csv = pd.read_csv(csv, names=(['screenshot_path'] + [('class_' + str(i)) for i in range(num_classes)])) for i in range(num_classes): self.csv[('class_' + str(i))] = self.csv[('class_' + str(i))].astype(dtype='float') self.img_folder = img_folder img_transforms = [transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] if ((ra_num_ops > 0) and (ra_magnitude > 0)): img_transforms = ([transforms.RandAugment(ra_num_ops, ra_magnitude)] + img_transforms) self.img_transforms = transforms.Compose(img_transforms) print('total csv rows', len(self.csv.index)) if (id_list_path is not None): with open(id_list_path, 'r') as f: split_ids = set(json.load(f)) self.csv['split_id'] = self.csv['screenshot_path'].str.replace('\\', '/') self.csv = self.csv[self.csv['split_id'].str.contains('/')] self.csv['split_id'] = self.csv['split_id'].str.split('/').str[0] self.csv = self.csv[self.csv['split_id'].isin(split_ids)] self.csv = self.csv.reset_index(drop=True) print('filtered csv rows', len(self.csv.index)) if (silver_id_list_path_ignores is not None): all_ignores = set() for ignore_path in silver_id_list_path_ignores: with open(ignore_path, 'r') as f: all_ignores |= set(json.load(f)) self.csv = self.csv[self.csv['screenshot_path'].str.contains('.')] self.csv['split_id'] = self.csv['screenshot_path'].str.split('.').str[0] self.csv = self.csv[(~ self.csv['split_id'].isin(all_ignores))] self.csv = self.csv.reset_index(drop=True) print('filtered csv rows2', len(self.csv.index)) keep_inds = [] for i in range(num_classes): keep_inds.extend(list(self.csv.nlargest(K, ('class_' + str(i))).index.values)) keep_inds = set(keep_inds) df_mat = self.csv[[('class_' + str(i)) for i in range(num_classes)]] image_names = [] image_labels = [] for i in keep_inds: if one_hot_labels: image_names.append(self.csv.iloc[i]['screenshot_path']) image_labels.append(torch.tensor(df_mat.iloc[i].to_numpy())) else: idxs = np.argsort(df_mat.iloc[i].to_numpy(), axis=(- 1))[(- P):] image_name = self.csv.iloc[i]['screenshot_path'] for idx in idxs: image_names.append(image_name) image_labels.append(idx) self.image_names = image_names self.labels = image_labels self.class_counter = Counter(self.labels) def __len__(self): return len(self.image_names) def __getitem__(self, index): index = (index % len(self.image_names)) def tryAnother(): return self.__getitem__((index + 1)) try: img_path = os.path.join(self.img_folder, str(self.image_names[index])).replace('\\', '/') image = Image.open(img_path).convert('RGB') image = self.img_transforms(image) targets = self.labels[index] return {'image': image, 'label': targets} except: return tryAnother()
class SilverDataModule(pl.LightningDataModule): def __init__(self, batch_size=16, num_workers=0, silver_id_list_path=None, silver_id_list_path_ignores=None, ra_num_ops=2, ra_magnitude=9, P=1, K=150, silver_csv='../../metadata/screenclassification/silver_webui-multi_topic.csv', img_folder='../../downloads/ds'): super(SilverDataModule, self).__init__() self.batch_size = batch_size self.num_workers = num_workers ds1 = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_train_ids.json', one_hot_labels=True, ra_num_ops=ra_num_ops, ra_magnitude=ra_magnitude) ds2 = SilverMultilabelImageDataset(csv=silver_csv, img_folder=img_folder, id_list_path=silver_id_list_path, silver_id_list_path_ignores=silver_id_list_path_ignores, P=P, K=K, one_hot_labels=True, ra_num_ops=ra_num_ops, ra_magnitude=ra_magnitude) combined_ds = CombinedImageDataset([ds1, ds2], [(1 / 15), (14 / 15)]) self.train_dataset = combined_ds self.val_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_val_ids.json') self.test_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_test_ids.json') def train_dataloader(self): return torch.utils.data.DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_silver_multi) def val_dataloader(self): return torch.utils.data.DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_silver) def test_dataloader(self): return torch.utils.data.DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_silver)
class EnricoDataModule(pl.LightningDataModule): def __init__(self, batch_size=16, num_workers=4, img_size=128, ra_num_ops=(- 1), ra_magnitude=(- 1)): super(EnricoDataModule, self).__init__() self.batch_size = batch_size self.num_workers = num_workers self.train_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_train_ids.json', ra_num_ops=ra_num_ops, ra_magnitude=ra_magnitude, img_size=img_size) self.val_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_val_ids.json', img_size=img_size) self.test_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_test_ids.json', img_size=img_size) def train_dataloader(self): samples_weight = torch.tensor([(1 / self.train_dataset.class_counter[t]) for t in self.train_dataset.labels]) sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weight, len(samples_weight)) return torch.utils.data.DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size, sampler=sampler, collate_fn=collate_fn_enrico) def val_dataloader(self): return torch.utils.data.DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_enrico) def test_dataloader(self): return torch.utils.data.DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_enrico)
class UIScreenClassifier(pl.LightningModule): def __init__(self, num_classes=20, dropout_block=0.0, dropout=0.2, lr=5e-05, soft_labels=True, stochastic_depth_p=0.2, arch='resnet50'): super(UIScreenClassifier, self).__init__() self.save_hyperparameters() if ((arch == 'resnet50') or (arch == 'resnet50_conv')): model = models.resnet50(pretrained=False) replace_default_bn_with_custom(model, dropout=dropout_block) replace_res_blocks_with_stochastic(model, stochastic_depth_p=stochastic_depth_p) model.fc = nn.Sequential(nn.Dropout(dropout), nn.Linear(model.fc.in_features, num_classes)) self.model = model self.conv_cls = nn.Sequential(nn.InstanceNorm2d(2048), nn.Dropout2d(dropout), nn.Conv2d(2048, num_classes, 3, stride=1, padding=1)) elif (arch == 'vgg16'): model = models.vgg16_bn(pretrained=False, dropout=dropout) replace_default_bn_with_custom(model, dropout=dropout_block) model.classifier[(- 1)] = nn.Linear(4096, num_classes) self.model = model def forward(self, image): if ((self.hparams.arch == 'resnet50') or (self.hparams.arch == 'vgg16')): return self.model(image) elif (self.hparams.arch == 'resnet50_conv'): x = self.model.conv1(image) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = self.conv_cls(x) batch_size = x.shape[0] res = x.view(batch_size, self.hparams.num_classes, (- 1)).mean(dim=(- 1)) return res def training_step(self, batch, batch_idx): image = batch['image'] labels = batch['label'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] out = torch.cat(outs, dim=0) if (len(labels.shape) == 2): if self.hparams.soft_labels: loss = F.cross_entropy(out, labels.float()) else: loss = F.binary_cross_entropy_with_logits(out, labels) else: loss = F.cross_entropy(out, labels) return loss def validation_step(self, batch, batch_idx): image = batch['image'] labels = batch['label'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] out = torch.cat(outs, dim=0) if (len(labels.shape) == 2): return (out, labels) else: (_, inds) = out.max(dim=(- 1)) return (inds, labels) def validation_epoch_end(self, outputs): all_outs = torch.cat([o[0] for o in outputs], dim=0) all_labels = torch.cat([o[1] for o in outputs], dim=0) if (len(all_labels.shape) == 2): bce_score = F.binary_cross_entropy_with_logits(all_outs, all_labels) score_dict = {'bce': bce_score} print(score_dict) self.log_dict(score_dict) else: all_outs = all_outs.detach().cpu().long().numpy() all_labels = all_labels.detach().cpu().long().numpy() macro_score = f1_score(all_labels, all_outs, average='macro') micro_score = f1_score(all_labels, all_outs, average='micro') weighted_score = f1_score(all_labels, all_outs, average='weighted') score_dict = {'f1_macro': macro_score, 'f1_micro': micro_score, 'f1_weighted': weighted_score} print(score_dict) self.log_dict(score_dict) def test_step(self, batch, batch_idx): image = batch['image'] labels = batch['label'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] out = torch.cat(outs, dim=0) if (len(labels.shape) == 2): return (out, labels) else: (_, inds) = out.max(dim=(- 1)) return (inds, labels) def test_epoch_end(self, outputs): all_outs = torch.cat([o[0] for o in outputs], dim=0) all_labels = torch.cat([o[1] for o in outputs], dim=0) if (len(all_labels.shape) == 2): bce_score = F.binary_cross_entropy_with_logits(all_outs, all_labels) score_dict = {'bce': bce_score} print(score_dict) self.log_dict(score_dict) else: all_outs = all_outs.detach().cpu().long().numpy() all_labels = all_labels.detach().cpu().long().numpy() macro_score = f1_score(all_labels, all_outs, average='macro') micro_score = f1_score(all_labels, all_outs, average='micro') weighted_score = f1_score(all_labels, all_outs, average='weighted') score_dict = {'f1_macro': macro_score, 'f1_micro': micro_score, 'f1_weighted': weighted_score} print(score_dict) return score_dict def configure_optimizers(self): optimizer = torch.optim.AdamW([p for p in self.parameters() if p.requires_grad], lr=self.hparams.lr) return optimizer
class UIScreenSegmenter(pl.LightningModule): def __init__(self, num_classes=20): super(UIScreenSegmenter, self).__init__() self.save_hyperparameters() model = models.resnet50(pretrained=False, norm_layer=nn.InstanceNorm2d) model.fc = nn.Linear(model.fc.in_features, num_classes) self.model = model self.decoder = nn.Sequential(nn.InstanceNorm2d(2048), nn.Upsample(scale_factor=2), nn.Conv2d(2048, 1024, 3, stride=1, padding=1), nn.InstanceNorm2d(1024), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(1024, 512, 3, stride=1, padding=1), nn.InstanceNorm2d(512), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(512, 256, 3, stride=1, padding=1), nn.InstanceNorm2d(256), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(256, 128, 3, stride=1, padding=1), nn.InstanceNorm2d(128), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(128, 64, 3, stride=1, padding=1), nn.InstanceNorm2d(64), nn.ReLU(), nn.Conv2d(64, num_classes, 3, stride=1, padding=1)) def encode(self, img): x = self.model.conv1(img) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) return x def decode(self, x, out_size): x = self.decoder(x) return F.interpolate(x, size=out_size, mode='bilinear', align_corners=False) def forward(self, x): return self.decode(self.encode(x), (x.shape[(- 2)], x.shape[(- 1)])) def training_step(self, batch, batch_idx): image = batch['image'] segmentation = batch['segmentation'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] losses = [F.binary_cross_entropy_with_logits(outs[i], segmentation[i].unsqueeze(0)) for i in range(len(outs))] loss = torch.stack(losses).mean() sch = self.lr_schedulers() if (sch is not None): sch.step() return loss def validation_step(self, batch, batch_idx): image = batch['image'] segmentation = batch['segmentation'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] losses = [F.binary_cross_entropy_with_logits(outs[i], segmentation[i].unsqueeze(0)) for i in range(len(outs))] loss = torch.stack(losses).mean() return loss def validation_epoch_end(self, outputs): bce_score = torch.stack(outputs).mean() score_dict = {'bce': bce_score} print(score_dict) self.log_dict(score_dict) def configure_optimizers(self): optimizer = torch.optim.SGD(self.parameters(), lr=0.001, momentum=0.9, nesterov=True, weight_decay=0.0001) lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
class StochasticBasicBlock(nn.Module): def __init__(self, m, stochastic_depth_p=0.2, stochastic_depth_mode='row'): super(StochasticBasicBlock, self).__init__() self.m = m self.sd = StochasticDepth(stochastic_depth_p, mode=stochastic_depth_mode) def forward(self, x): identity = x out = self.m.conv1(x) out = self.m.bn1(out) out = self.m.relu(out) out = self.m.conv2(out) out = self.m.bn2(out) out = self.sd(out) if (self.m.downsample is not None): identity = self.m.downsample(x) out += identity out = self.m.relu(out) return out
class StochasticBottleneck(nn.Module): def __init__(self, m, stochastic_depth_p=0.2, stochastic_depth_mode='row'): super(StochasticBottleneck, self).__init__() self.m = m self.sd = StochasticDepth(stochastic_depth_p, mode=stochastic_depth_mode) def forward(self, x): identity = x out = self.m.conv1(x) out = self.m.bn1(out) out = self.m.relu(out) out = self.m.conv2(out) out = self.m.bn2(out) out = self.m.relu(out) out = self.m.conv3(out) out = self.m.bn3(out) out = self.sd(out) if (self.m.downsample is not None): identity = self.m.downsample(x) out += identity out = self.m.relu(out) return out
class CustomNormAndDropout(nn.Module): def __init__(self, num_features, dropout): super(CustomNormAndDropout, self).__init__() self.norm = nn.InstanceNorm2d(num_features) self.dropout = nn.Dropout2d(dropout) def forward(self, x): x = self.norm(x) x = self.dropout(x) return x
def replace_default_bn_with_custom(model, dropout=0.0): for (child_name, child) in model.named_children(): if isinstance(child, nn.BatchNorm2d): setattr(model, child_name, CustomNormAndDropout(child.num_features, dropout)) else: replace_default_bn_with_custom(child, dropout)
def replace_default_bn_with_in(model): for (child_name, child) in model.named_children(): if isinstance(child, nn.BatchNorm2d): setattr(model, child_name, nn.InstanceNorm2d(child.num_features)) else: replace_default_bn_with_in(child)
def replace_res_blocks_with_stochastic(model, stochastic_depth_p=0.2, stochastic_depth_mode='row'): all_blocks = [] def get_blocks(model, blocks): for (child_name, child) in model.named_children(): if isinstance(child, BasicBlock): blocks.append((child_name, StochasticBasicBlock)) elif isinstance(child, Bottleneck): blocks.append((child_name, StochasticBottleneck)) else: get_blocks(child, blocks) get_blocks(model, all_blocks) p_alphas = (torch.linspace(0, 1, len(all_blocks)) * stochastic_depth_p) for bi in range(len(all_blocks)): setattr(model, all_blocks[bi][0], all_blocks[bi][1](p_alphas[bi], stochastic_depth_mode))
class UIElementDetector(pl.LightningModule): def __init__(self, num_classes=25, min_size=320, max_size=640, use_multi_head=True, lr=0.0001, val_weights=None, test_weights=None, arch='fcos'): super(UIElementDetector, self).__init__() self.save_hyperparameters() if (arch == 'fcos'): model = torchvision.models.detection.fcos_resnet50_fpn(min_size=min_size, max_size=max_size, num_classes=num_classes, trainable_backbone_layers=5) if use_multi_head: multi_head = FCOSMultiHead(model.backbone.out_channels, model.anchor_generator.num_anchors_per_location()[0], num_classes) model.head = multi_head elif (arch == 'ssd'): model = torchvision.models.detection.ssd300_vgg16(num_classes=num_classes, trainable_backbone_layers=5) self.model = model def training_step(self, batch, batch_idx): (images, targets) = batch images = list((image for image in images)) targets = [{k: v for (k, v) in t.items()} for t in targets] loss_dict = self.model(images, targets) loss = sum((loss for loss in loss_dict.values())) self.log_dict({'loss': float(loss)}) return loss def validation_step(self, batch, batch_idx): (images, targets) = batch images = list((image for image in images)) targets = [{k: v for (k, v) in t.items()} for t in targets] outputs = self.model(images) preds = [] gts = [] for batch_i in range(len(outputs)): batch_len = outputs[batch_i]['boxes'].shape[0] pred_box = outputs[batch_i]['boxes'] pred_score = outputs[batch_i]['scores'] pred_label = outputs[batch_i]['labels'] preds.append(torch.cat((pred_box, pred_label.unsqueeze((- 1)), pred_score.unsqueeze((- 1))), dim=(- 1))) gtsi = [] target_len = targets[batch_i]['boxes'].shape[0] for i in range(target_len): target_box = targets[batch_i]['boxes'][i] target_label = targets[batch_i]['labels'][i] if (len(target_label.shape) == 1): for ci in range(target_label.shape[0]): if (target_label[ci] > 0): gtsi.append(torch.cat((target_box, torch.tensor([ci, 0, 0], device=target_box.device)), dim=(- 1))) else: gtsi.append(torch.cat((target_box, torch.tensor([target_label, 0, 0], device=target_box.device)), dim=(- 1))) gts.append((torch.stack(gtsi) if (len(gtsi) > 0) else torch.zeros(0, 7, device=self.device))) return (preds, gts) def validation_epoch_end(self, outputs): metric_fn = MetricBuilder.build_evaluation_metric('map_2d', async_mode=True, num_classes=self.hparams.num_classes) for batch_output in outputs: for i in range(len(batch_output[0])): metric_fn.add(batch_output[0][i].detach().cpu().numpy(), batch_output[1][i].detach().cpu().numpy()) metrics = metric_fn.value(iou_thresholds=0.5) print(np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]])) if (self.hparams.val_weights is None): mapscore = metrics['mAP'] else: weights = np.array(self.hparams.val_weights) aps = np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]]) mapscore = (aps * weights).sum() self.log_dict({'mAP': mapscore}) def test_step(self, batch, batch_idx): (images, targets) = batch images = list((image for image in images)) targets = [{k: v for (k, v) in t.items()} for t in targets] outputs = self.model(images) preds = [] gts = [] for batch_i in range(len(outputs)): batch_len = outputs[batch_i]['boxes'].shape[0] pred_box = outputs[batch_i]['boxes'] pred_score = outputs[batch_i]['scores'] pred_label = outputs[batch_i]['labels'] preds.append(torch.cat((pred_box, pred_label.unsqueeze((- 1)), pred_score.unsqueeze((- 1))), dim=(- 1))) gtsi = [] target_len = targets[batch_i]['boxes'].shape[0] for i in range(target_len): target_box = targets[batch_i]['boxes'][i] target_label = targets[batch_i]['labels'][i] if (len(target_label.shape) == 1): for ci in range(target_label.shape[0]): if (target_label[ci] > 0): gtsi.append(torch.cat((target_box, torch.tensor([ci, 0, 0], device=target_box.device)), dim=(- 1))) else: gtsi.append(torch.cat((target_box, torch.tensor([target_label, 0, 0], device=target_box.device)), dim=(- 1))) gts.append((torch.stack(gtsi) if (len(gtsi) > 0) else torch.zeros(0, 7, device=self.device))) return (preds, gts) def test_epoch_end(self, outputs): metric_fn = MetricBuilder.build_evaluation_metric('map_2d', async_mode=True, num_classes=self.hparams.num_classes) for batch_output in outputs: for i in range(len(batch_output[0])): metric_fn.add(batch_output[0][i].detach().cpu().numpy(), batch_output[1][i].detach().cpu().numpy()) metrics = metric_fn.value(iou_thresholds=0.5) print(np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]])) if (self.hparams.test_weights is None): mapscore = metrics['mAP'] else: weights = np.array(self.hparams.test_weights) aps = np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]]) mapscore = (aps * weights).sum() self.log_dict({'mAP': mapscore}) def configure_optimizers(self): return torch.optim.SGD(filter((lambda p: p.requires_grad), self.parameters()), lr=self.hparams.lr)
def random_viewport_from_full(height, w, h): h1 = int((random.random() * (h - height))) h2 = (h1 + height) viewport = (0, h1, w, h2) return viewport
def random_viewport_pair_from_full(img_full, height_ratio): img_pil = Image.open(img_full).convert('RGB') (w, h) = img_pil.size height = int((w * height_ratio)) viewport1 = random_viewport_from_full(height, w, h) vh1 = viewport1[1] delta = (int((random.random() * (2 * height))) - height) vh2 = (vh1 + delta) vh2 = min(max(0, vh2), (h - height)) viewport2 = (0, vh2, w, (vh2 + height)) view1 = img_pil.crop(viewport1) view2 = img_pil.copy().crop(viewport2) return (view1, view2)
class WebUISimilarityDataset(torch.utils.data.IterableDataset): def __init__(self, split_file='../../downloads/train_split_web350k.json', root_dir='../../downloads/ds', domain_map_file='../../metadata/screensim/domain_map.json', duplicate_map_file='../../metadata/screensim/duplicate_map.json', device_name='iPhone-13 Pro', scroll_height_ratio=2.164, img_size=(256, 128), uda_dir='../../downloads/rico/combined', uda_ignore_id_files=['../../metadata/screenclassification/filtered_train_ids.json', '../../metadata/screenclassification/filtered_val_ids.json', '../../metadata/screenclassification/filtered_test_ids.json']): super(WebUISimilarityDataset, self).__init__() self.root_dir = root_dir self.device_name = device_name self.scroll_height_ratio = scroll_height_ratio with open(split_file, 'r') as f: split_list = json.load(f) split_set = set([str(s) for s in split_list]) with open(domain_map_file, 'r') as f: self.domain_map = json.load(f) self.domain_list = [] for dn in tqdm(self.domain_map): if (all([(url[1] in split_set) for url in self.domain_map[dn]]) and (len(set([u[0] for u in self.domain_map[dn]])) > 1)): self.domain_list.append(dn) with open(duplicate_map_file, 'r') as f: self.duplicate_map = json.load(f) self.duplicate_list = [] for dn in tqdm(self.duplicate_map): if all([(url in split_set) for url in self.duplicate_map[dn]]): self.duplicate_list.append(dn) self.img_transforms = transforms.Compose([transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) ignore_ids = set() for ignore_file in uda_ignore_id_files: with open(ignore_file, 'r') as f: ignore_file_ids = set(json.load(f)) ignore_ids |= ignore_file_ids self.uda_dir = uda_dir self.uda_files = [f for f in os.listdir(uda_dir) if (f.endswith('.jpg') and (f.replace('.jpg', '') not in ignore_ids))] def sample_same_scroll(self): try: random_domain = random.choice(self.domain_list) domain_urls = self.domain_map[random_domain] crawl_id = random.choice(domain_urls)[1] screenshot_full_path = os.path.join(self.root_dir, crawl_id, (self.device_name + '-screenshot-full.webp')) (pil_img1, pil_img2) = random_viewport_pair_from_full(screenshot_full_path, self.scroll_height_ratio) return (pil_img1, pil_img2) except: return self.sample_same_scroll() def sample_same_screen(self): try: random_duplicate = random.choice(self.duplicate_list) sampled_screens = random.sample(self.duplicate_map[random_duplicate], 2) img1_path = os.path.join(self.root_dir, sampled_screens[0], (self.device_name + '-screenshot.webp')) img2_path = os.path.join(self.root_dir, sampled_screens[1], (self.device_name + '-screenshot.webp')) pil_img1 = Image.open(img1_path).convert('RGB') pil_img2 = Image.open(img2_path).convert('RGB') return (pil_img1, pil_img2) except: return self.sample_same_screen() def sample_same_domain(self): try: random_domain = random.choice(self.domain_list) url1 = random.choice(self.domain_map[random_domain]) candidates = [u for u in self.domain_map[random_domain] if (u[0] != url1[0])] url2 = random.choice(candidates) img1_path = os.path.join(self.root_dir, url1[1], (self.device_name + '-screenshot.webp')) img2_path = os.path.join(self.root_dir, url2[1], (self.device_name + '-screenshot.webp')) pil_img1 = Image.open(img1_path).convert('RGB') pil_img2 = Image.open(img2_path).convert('RGB') return (pil_img1, pil_img2) except: return self.sample_same_domain() def sample_different_domain(self): try: sampled_domains = random.sample(self.domain_list, 2) domain1 = sampled_domains[0] domain2 = sampled_domains[1] url1 = random.choice(self.domain_map[domain1]) url2 = random.choice(self.domain_map[domain2]) img1_path = os.path.join(self.root_dir, url1[1], (self.device_name + '-screenshot.webp')) img2_path = os.path.join(self.root_dir, url2[1], (self.device_name + '-screenshot.webp')) pil_img1 = Image.open(img1_path).convert('RGB') pil_img2 = Image.open(img2_path).convert('RGB') return (pil_img1, pil_img2) except: return self.sample_different_domain() def sample_uda_img(self): try: img_path = os.path.join(self.uda_dir, random.choice(self.uda_files)) return Image.open(img_path).convert('RGB') except: return self.sample_uda_img() def __iter__(self): while True: probs = [0.25, 0.25, 0.25, 0.25] funcs = [self.sample_same_scroll, self.sample_same_screen, self.sample_same_domain, self.sample_different_domain] si = choices(list(range(len(funcs))), probs)[0] func = funcs[si] res = func() label = (si < 2) (yield {'label': label, 'image1': self.img_transforms(res[0]), 'image2': self.img_transforms(res[1]), 'imageuda1': self.img_transforms(self.sample_uda_img()), 'imageuda2': self.img_transforms(self.sample_uda_img())})
class WebUISimilarityDataModule(pl.LightningDataModule): def __init__(self, batch_size=16, num_workers=4, split_file='../../downloads/train_split_web350k.json', root_dir='../../downloads/ds', domain_map_file='../../metadata/screensim/domain_map.json', duplicate_map_file='../../metadata/screensim/duplicate_map.json', device_name='iPhone-13 Pro', scroll_height_ratio=2.164, img_size=128): super(WebUISimilarityDataModule, self).__init__() self.batch_size = batch_size self.num_workers = num_workers self.split_file = split_file self.train_dataset = WebUISimilarityDataset(split_file=split_file) self.val_dataset = WebUISimilarityDataset(split_file='../../downloads/val_split_webui.json') self.test_dataset = WebUISimilarityDataset(split_file='../../downloads/test_split_webui.json') def train_dataloader(self): return torch.utils.data.DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size) def val_dataloader(self): return torch.utils.data.DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size) def test_dataloader(self): return torch.utils.data.DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size)
class UIScreenEmbedder(pl.LightningModule): def __init__(self, hidden_size=256, lr=5e-05, margin_pos=0.2, margin_neg=0.5, lambda_dann=1): super(UIScreenEmbedder, self).__init__() self.save_hyperparameters() model = models.resnet18(pretrained=False) replace_default_bn_with_in(model) model.fc = nn.Linear(model.fc.in_features, hidden_size) self.model = model self.classifier = nn.Sequential(RevGrad(), nn.Linear(model.fc.in_features, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 1)) def forward_uda(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = self.model.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def forward(self, x): return self.model(x) def training_step(self, batch, batch_idx): image1 = batch['image1'] image2 = batch['image2'] imageuda1 = batch['imageuda1'] imageuda2 = batch['imageuda2'] labels = batch['label'] outs1 = self.model(image1) outs2 = self.model(image2) batch_size = image1.shape[0] delta = (outs1 - outs2) dist = torch.linalg.norm(delta, dim=(- 1)) losses = torch.zeros(batch_size, device=self.device) losses[labels] = (dist[labels] - self.hparams.margin_pos).clamp(min=0) losses[(~ labels)] = (self.hparams.margin_neg - dist[(~ labels)]).clamp(min=0) loss_sim = losses.mean() if (self.hparams.lambda_dann == 0): loss = loss_sim metrics = {'loss': loss} self.log_dict(metrics) return loss else: cls_pred_outs1 = self.forward_uda(image1) cls_pred_outs2 = self.forward_uda(image2) cls_pred_outsuda1 = self.forward_uda(imageuda1) cls_pred_outsuda2 = self.forward_uda(imageuda2) cls_pred = torch.cat((cls_pred_outs1, cls_pred_outs2, cls_pred_outsuda1, cls_pred_outsuda2), dim=0).squeeze((- 1)) cls_label = torch.cat((torch.ones((batch_size * 2), device=self.device), torch.zeros((batch_size * 2), device=self.device)), dim=0) loss_cls = F.binary_cross_entropy_with_logits(cls_pred, cls_label) loss = (loss_sim + (self.hparams.lambda_dann * loss_cls)) metrics = {'loss': loss, 'loss_sim': loss_sim, 'loss_cls': loss_cls} self.log_dict(metrics) return loss def validation_step(self, batch, batch_idx): image1 = batch['image1'] image2 = batch['image2'] imageuda1 = batch['imageuda1'] imageuda2 = batch['imageuda2'] labels = batch['label'] outs1 = self.model(image1) outs2 = self.model(image2) batch_size = image1.shape[0] delta = (outs1 - outs2) dist = torch.linalg.norm(delta, dim=(- 1)) thresh = (0.5 * (self.hparams.margin_pos + self.hparams.margin_neg)) preds = (dist < thresh) if (self.hparams.lambda_dann == 0): return (preds, labels) else: cls_pred_outs1 = self.forward_uda(image1) cls_pred_outs2 = self.forward_uda(image2) cls_pred_outsuda1 = self.forward_uda(imageuda1) cls_pred_outsuda2 = self.forward_uda(imageuda2) cls_pred = (torch.cat((cls_pred_outs1, cls_pred_outs2, cls_pred_outsuda1, cls_pred_outsuda2), dim=0).squeeze((- 1)) > 0) cls_label = torch.cat((torch.ones((batch_size * 2), device=self.device), torch.zeros((batch_size * 2), device=self.device)), dim=0) return (preds, labels, cls_pred, cls_label) def validation_epoch_end(self, outputs): all_outs = torch.cat([o[0] for o in outputs], dim=0) all_labels = torch.cat([o[1] for o in outputs], dim=0) score = f1_score(all_labels.detach().cpu().numpy(), all_outs.detach().cpu().numpy()) if (self.hparams.lambda_dann == 0): metrics = {'f1': score} self.log_dict(metrics) else: all_outs_uda = torch.cat([o[2] for o in outputs], dim=0) all_labels_uda = torch.cat([o[3] for o in outputs], dim=0) score_uda = f1_score(all_labels_uda.detach().cpu().numpy(), all_outs_uda.detach().cpu().numpy()) metrics = {'f1': score, 'f1_uda': score_uda} self.log_dict(metrics) def configure_optimizers(self): optimizer = torch.optim.AdamW([p for p in self.parameters() if p.requires_grad], lr=self.hparams.lr) return optimizer
def replace_default_bn_with_in(model): for (child_name, child) in model.named_children(): if isinstance(child, nn.BatchNorm2d): setattr(model, child_name, nn.InstanceNorm2d(child.num_features)) else: replace_default_bn_with_in(child)
def load_id2any(index_file, format=None): fspec = open(index_file) ids = [] id2any = dict() for line in fspec.readlines(): (id, any) = line.strip().split('\t') ids.append(id) if (format == 'toFloat'): id2any[id] = [float(i) for i in eval(any)] else: id2any[id] = any return (ids, id2any)
def split_magna(ids, id2path): train_set = [] val_set = [] test_set = [] for id in ids: path = id2path[id] folder = int(path[(path.rfind('/') - 1):path.rfind('/')], 16) if (folder < 12): train_set.append(id) elif (folder < 13): val_set.append(id) else: test_set.append(id) return (train_set, val_set, test_set)
def write_gt_file(ids, id2gt, file_name): fw = open(file_name, 'w') for id in ids: if (id in IDS_ERROR): continue fw.write(('%s\t%s\n' % (id, id2gt[id]))) fw.close()
def evaluation(batch_dispatcher, tf_vars, array_cost, pred_array, id_array): [sess, normalized_y, cost, x, y_, is_train] = tf_vars for batch in tqdm(batch_dispatcher): (pred, cost_pred) = sess.run([normalized_y, cost], feed_dict={x: batch['X'], y_: batch['Y'], is_train: False}) if (not array_cost): pred_array = pred id_array = batch['ID'] else: pred_array = np.concatenate((pred_array, pred), axis=0) id_array = np.append(id_array, batch['ID']) array_cost.append(cost_pred) print('predictions', pred_array.shape) print('cost', np.mean(array_cost)) return (array_cost, pred_array, id_array)
def model_number(x, is_training, config): if (config['model_number'] == 0): print('\nMODEL: Dieleman | BN input') return models_baselines.dieleman(x, is_training, config) elif (config['model_number'] == 1): print('\nMODEL: VGG 32 | BN input') return models_baselines.vgg(x, is_training, config, 32) elif (config['model_number'] == 2): print('\nMODEL: VGG 128 | BN input') return models_baselines.vgg(x, is_training, config, 128) elif (config['model_number'] == 3): print('\nMODEL: Timbre | BN input') return models_baselines.timbre(x, is_training, config, num_filt=1) elif (config['model_number'] == 10): print('\nMODEL: BN input > [7, 70%][7, 40%] + temporal > RESIDUAL > GLOBAL POOLING') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=1.6, type='7774timbraltemporal') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = midend_features_list[3] return backend.temporal_pooling(midend_features, is_training, 50, 200, type='globalpool') elif (config['model_number'] == 11): print('\nMODEL: BN input > [7, 70%][7, 40%] + temporal > DENSE > GLOBAL POOLING') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=1.6, type='7774timbraltemporal') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = tf.concat(midend_features_list, 2) return backend.temporal_pooling(midend_features, is_training, 50, 200, type='globalpool') elif (config['model_number'] == 12): print('\nMODEL: BN input > [7, 40%] > DENSE > ATTENTION + POSITIONAL ENCODING') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=4.5, type='74timbral') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = tf.concat(midend_features_list, 2) return backend.temporal_pooling(midend_features, is_training, 50, 200, type='attention_positional') elif (config['model_number'] == 13): print('\nMODEL: BN input > [7, 40%] > DENSE > AUTOPOOL') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=4.5, type='74timbral') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = tf.concat(midend_features_list, 2) return backend.temporal_pooling(midend_features, is_training, 50, 200, type='autopool') elif (config['model_number'] == 14): print('\nMODEL: BN input > [7, 70%][7, 40%] + temporal > RESIDUAL > RNN') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=1.6, type='7774timbraltemporal') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = midend_features_list[3] return backend.temporal_pooling(midend_features, is_training, 50, 200, type='rnn') raise RuntimeError("ERROR: Model {} can't be found!".format(config['model_number']))
def dieleman(x, is_training, config): print(('Input: ' + str(x.get_shape))) input_layer = tf.expand_dims(x, 3) bn_input = tf.compat.v1.layers.batch_normalization(input_layer, training=is_training) conv1 = tf.compat.v1.layers.conv2d(inputs=bn_input, filters=32, kernel_size=[8, config['yInput']], padding='valid', activation=tf.nn.relu, name='1cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) pool1 = tf.compat.v1.layers.max_pooling2d(inputs=conv1, pool_size=[4, 1], strides=[4, 1], name='1-pool') pool1_rs = tf.reshape(pool1, [(- 1), int(pool1.shape[1]), int(pool1.shape[3]), 1]) print(('\t\t' + str(pool1_rs.get_shape))) conv2 = tf.compat.v1.layers.conv2d(inputs=pool1_rs, filters=32, kernel_size=[8, pool1_rs.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2, pool_size=[4, 1], strides=[4, 1], name='2-pool') flat_pool2 = tf.reshape(pool2, [(- 1), int(((pool2.shape[1] * pool2.shape[2]) * pool2.shape[3]))]) print(('\t\t' + str(flat_pool2.shape))) dense = tf.compat.v1.layers.dense(inputs=flat_pool2, activation=tf.nn.relu, units=100, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) output = tf.compat.v1.layers.dense(inputs=dense, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) print(('output: ' + str(output.get_shape))) return output
def vgg(x, is_training, config, num_filters=32): print(('Input: ' + str(x.get_shape))) input_layer = tf.expand_dims(x, 3) bn_input = tf.compat.v1.layers.batch_normalization(input_layer, training=is_training) conv1 = tf.compat.v1.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training) pool1 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv1, pool_size=[4, 1], strides=[2, 2]) print(('pool1: ' + str(pool1.get_shape))) do_pool1 = tf.compat.v1.layers.dropout(pool1, rate=0.25, training=is_training) conv2 = tf.compat.v1.layers.conv2d(inputs=do_pool1, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2]) print(('pool2: ' + str(pool2.get_shape))) do_pool2 = tf.compat.v1.layers.dropout(pool2, rate=0.25, training=is_training) conv3 = tf.compat.v1.layers.conv2d(inputs=do_pool2, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training) pool3 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2]) print(('pool3: ' + str(pool3.get_shape))) do_pool3 = tf.layers.dropout(pool3, rate=0.25, training=is_training) conv4 = tf.layers.conv2d(inputs=do_pool3, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv4 = tf.compat.v1.layers.batch_normalization(conv4, training=is_training) pool4 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2]) print(('pool4: ' + str(pool4.get_shape))) do_pool4 = tf.compat.v1.layers.dropout(pool4, rate=0.25, training=is_training) conv5 = tf.compat.v1.layers.conv2d(inputs=do_pool4, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv5 = tf.compat.v1.layers.batch_normalization(conv5, training=is_training) pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[4, 4], strides=[4, 4]) print(('pool5: ' + str(pool5.get_shape))) flat_pool5 = tf.contrib.layers.flatten(pool5) do_pool5 = tf.compat.v1.layers.dropout(flat_pool5, rate=0.5, training=is_training) output = tf.compat.v1.layers.dense(inputs=do_pool5, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) print(('output: ' + str(output.get_shape))) return output
def timbre(x, is_training, config, num_filt=1): print(('Input: ' + str(x.get_shape))) expanded_layer = tf.expand_dims(x, 3) input_layer = tf.compat.v1.layers.batch_normalization(expanded_layer, training=is_training) input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT') input_pad_5 = tf.pad(input_layer, [[0, 0], [2, 2], [0, 0], [0, 0]], 'CONSTANT') input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT') conv1 = tf.compat.v1.layers.conv2d(inputs=input_pad_7, filters=(3 * num_filt), kernel_size=[7, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training) pool1 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv1, pool_size=[1, bn_conv1.shape[2]], strides=[1, bn_conv1.shape[2]]) p1 = tf.squeeze(pool1, [2]) conv2 = tf.compat.v1.layers.conv2d(inputs=input_pad_5, filters=(3 * num_filt), kernel_size=[5, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv2, pool_size=[1, bn_conv2.shape[2]], strides=[1, bn_conv2.shape[2]]) p2 = tf.squeeze(pool2, [2]) conv3 = tf.compat.v1.layers.conv2d(inputs=input_pad_3, filters=(6 * num_filt), kernel_size=[3, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training) pool3 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv3, pool_size=[1, bn_conv3.shape[2]], strides=[1, bn_conv3.shape[2]]) p3 = tf.squeeze(pool3, [2]) conv4 = tf.compat.v1.layers.conv2d(inputs=input_layer, filters=(10 * num_filt), kernel_size=[1, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv4 = tf.compat.v1.layers.batch_normalization(conv4, training=is_training) pool4 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv4, pool_size=[1, bn_conv4.shape[2]], strides=[1, bn_conv4.shape[2]]) p4 = tf.squeeze(pool4, [2]) conv5 = tf.compat.v1.layers.conv2d(inputs=input_pad_7, filters=(5 * num_filt), kernel_size=[7, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv5 = tf.compat.v1.layers.batch_normalization(conv5, training=is_training) pool5 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv5, pool_size=[1, bn_conv5.shape[2]], strides=[1, bn_conv5.shape[2]]) p5 = tf.squeeze(pool5, [2]) conv6 = tf.compat.v1.layers.conv2d(inputs=input_pad_5, filters=(5 * num_filt), kernel_size=[5, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv6 = tf.compat.v1.layers.batch_normalization(conv6, training=is_training) pool6 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv6, pool_size=[1, bn_conv6.shape[2]], strides=[1, bn_conv6.shape[2]]) p6 = tf.squeeze(pool6, [2]) conv7 = tf.compat.v1.layers.conv2d(inputs=input_pad_3, filters=(10 * num_filt), kernel_size=[3, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv7 = tf.compat.v1.layers.batch_normalization(conv7, training=is_training) pool7 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv7, pool_size=[1, bn_conv7.shape[2]], strides=[1, bn_conv7.shape[2]]) p7 = tf.squeeze(pool7, [2]) conv8 = tf.compat.v1.layers.conv2d(inputs=input_layer, filters=(15 * num_filt), kernel_size=[1, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv8 = tf.compat.v1.layers.batch_normalization(conv8, training=is_training) pool8 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv8, pool_size=[1, bn_conv8.shape[2]], strides=[1, bn_conv8.shape[2]]) p8 = tf.squeeze(pool8, [2]) conv9 = tf.compat.v1.layers.conv2d(inputs=input_pad_7, filters=(5 * num_filt), kernel_size=[7, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv9 = tf.compat.v1.layers.batch_normalization(conv9, training=is_training) pool9 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv9, pool_size=[1, bn_conv9.shape[2]], strides=[1, bn_conv9.shape[2]]) p9 = tf.squeeze(pool9, [2]) conv10 = tf.compat.v1.layers.conv2d(inputs=input_pad_5, filters=(5 * num_filt), kernel_size=[5, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv10 = tf.compat.v1.layers.batch_normalization(conv10, training=is_training) pool10 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv10, pool_size=[1, bn_conv10.shape[2]], strides=[1, bn_conv10.shape[2]]) p10 = tf.squeeze(pool10, [2]) conv11 = tf.compat.v1.layers.conv2d(inputs=input_pad_3, filters=(10 * num_filt), kernel_size=[3, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv11 = tf.compat.v1.layers.batch_normalization(conv11, training=is_training) pool11 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv11, pool_size=[1, bn_conv11.shape[2]], strides=[1, bn_conv11.shape[2]]) p11 = tf.squeeze(pool11, [2]) conv12 = tf.compat.v1.layers.conv2d(inputs=input_layer, filters=(15 * num_filt), kernel_size=[1, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv12 = tf.compat.v1.layers.batch_normalization(conv12, training=is_training) pool12 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv12, pool_size=[1, bn_conv12.shape[2]], strides=[1, bn_conv12.shape[2]]) p12 = tf.squeeze(pool12, [2]) pool = tf.concat([p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12], 2) out_front_end = tf.expand_dims(pool, 3) conv2 = tf.compat.v1.layers.conv2d(inputs=out_front_end, filters=32, kernel_size=[8, out_front_end.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) print(conv2.get_shape) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2, pool_size=[4, 1], strides=[4, 1], name='2-pool') print(pool2.get_shape) flat_pool2 = tf.reshape(pool2, [(- 1), int(((pool2.shape[1] * pool2.shape[2]) * pool2.shape[3]))]) print(flat_pool2.shape) dense = tf.compat.v1.layers.dense(inputs=flat_pool2, activation=tf.nn.relu, units=100, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) output = tf.compat.v1.layers.dense(inputs=dense, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) return output
def musically_motivated_cnns(x, is_training, yInput, num_filt, type): expanded_layer = tf.expand_dims(x, 3) input_layer = tf.compat.v1.layers.batch_normalization(expanded_layer, training=is_training) input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT') if ('timbral' in type): input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT') if ('74' in type): f74 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.4 * yInput))], is_training=is_training) if ('77' in type): f77 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.7 * yInput))], is_training=is_training) if ('temporal' in type): s1 = tempo_block(inputs=input_layer, filters=int((num_filt * 32)), kernel_size=[128, 1], is_training=is_training) s2 = tempo_block(inputs=input_layer, filters=int((num_filt * 32)), kernel_size=[64, 1], is_training=is_training) s3 = tempo_block(inputs=input_layer, filters=int((num_filt * 32)), kernel_size=[32, 1], is_training=is_training) if (type == '7774timbraltemporal'): return [f74, f77, s1, s2, s3] elif (type == '74timbral'): return [f74]
def timbral_block(inputs, filters, kernel_size, is_training, padding='valid', activation=tf.nn.relu): conv = tf.compat.v1.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, padding=padding, activation=activation) bn_conv = tf.compat.v1.layers.batch_normalization(conv, training=is_training) pool = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv, pool_size=[1, bn_conv.shape[2]], strides=[1, bn_conv.shape[2]]) return tf.squeeze(pool, [2])
def tempo_block(inputs, filters, kernel_size, is_training, padding='same', activation=tf.nn.relu): conv = tf.compat.v1.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, padding=padding, activation=activation) bn_conv = tf.compat.v1.layers.batch_normalization(conv, training=is_training) pool = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv, pool_size=[1, bn_conv.shape[2]], strides=[1, bn_conv.shape[2]]) return tf.squeeze(pool, [2])
def dense_cnns(front_end_output, is_training, num_filt): front_end_pad = tf.pad(front_end_output, [[0, 0], [3, 3], [0, 0]], 'CONSTANT') conv1 = tf.compat.v1.layers.conv1d(inputs=front_end_pad, filters=num_filt, kernel_size=7, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training) bn_conv1_pad = tf.pad(bn_conv1, [[0, 0], [3, 3], [0, 0]], 'CONSTANT') conv2 = tf.compat.v1.layers.conv1d(inputs=bn_conv1_pad, filters=num_filt, kernel_size=7, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training) res_conv2 = tf.add(conv2, bn_conv1) bn_conv2_pad = tf.pad(res_conv2, [[0, 0], [3, 3], [0, 0]], 'CONSTANT') conv3 = tf.compat.v1.layers.conv1d(inputs=bn_conv2_pad, filters=num_filt, kernel_size=7, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training) res_conv3 = tf.add(conv3, res_conv2) return [front_end_output, bn_conv1, res_conv2, res_conv3]
def compute_audio_repr(audio_file, audio_repr_file): (audio, sr) = librosa.load(audio_file, sr=config['resample_sr']) if (config['type'] == 'waveform'): audio_repr = audio audio_repr = np.expand_dims(audio_repr, axis=1) elif (config['spectrogram_type'] == 'mel'): audio_repr = librosa.feature.melspectrogram(y=audio, sr=sr, hop_length=config['hop'], n_fft=config['n_fft'], n_mels=config['n_mels']).T print(audio_repr.shape) length = audio_repr.shape[0] audio_repr = audio_repr.astype(np.float16) with open(audio_repr_file, 'wb') as f: pickle.dump(audio_repr, f) return length
def do_process(files, index): try: [id, audio_file, audio_repr_file] = files[index] if (not os.path.exists(audio_repr_file[:(audio_repr_file.rfind('/') + 1)])): path = Path(audio_repr_file[:(audio_repr_file.rfind('/') + 1)]) path.mkdir(parents=True, exist_ok=True) length = compute_audio_repr(audio_file, audio_repr_file) fw = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'index_') + str(config['machine_i'])) + '.tsv'), 'a') fw.write(('%s\t%s\t%s\n' % (id, audio_repr_file[len(config_file.DATA_FOLDER):], audio_file[len(config_file.DATA_FOLDER):]))) fw.close() print((((str(index) + '/') + str(len(files))) + (' Computed: %s' % audio_file))) except Exception as e: ferrors = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'errors') + str(config['machine_i'])) + '.txt'), 'a') ferrors.write((audio_file + '\n')) ferrors.write(str(e)) ferrors.close() print('Error computing audio representation: ', audio_file) print(str(e))
def process_files(files): if DEBUG: print('WARNING: Parallelization is not used!') for index in range(0, len(files)): do_process(files, index) else: Parallel(n_jobs=config['num_processing_units'])((delayed(do_process)(files, index) for index in range(0, len(files))))
def maybe_download(filename, work_directory): "Download the data from Yann's website, unless it's already here." if (not os.path.exists(work_directory)): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if (not os.path.exists(filepath)): (filepath, _) = urllib.urlretrieve((SOURCE_URL + filename), filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath
def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return int(numpy.frombuffer(bytestream.read(4), dtype=dt))
def extract_images(filename): 'Extract the images into a 4D uint8 numpy array [index, y, x, depth].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2051): raise ValueError(('Invalid magic number %d in MNIST image file: %s' % (magic, filename))) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(((rows * cols) * num_images)) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data
def dense_to_one_hot(labels_dense, num_classes=10): 'Convert class labels from scalars to one-hot vectors.' num_labels = labels_dense.shape[0] index_offset = (numpy.arange(num_labels) * num_classes) labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[(index_offset + labels_dense.ravel())] = 1 return labels_one_hot
def extract_labels(filename, one_hot=False): 'Extract the labels into a 1D uint8 numpy array [index].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2049): raise ValueError(('Invalid magic number %d in MNIST label file: %s' % (magic, filename))) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels) return labels
class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert (images.shape[0] == labels.shape[0]), ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] assert (images.shape[3] == 1) images = images.reshape(images.shape[0], (images.shape[1] * images.shape[2])) images = images.astype(numpy.float32) images = numpy.multiply(images, (1.0 / 255.0)) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): 'Return the next `batch_size` examples from this data set.' if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return ([fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)]) start = self._index_in_epoch self._index_in_epoch += batch_size if (self._index_in_epoch > self._num_examples): self._epochs_completed += 1 perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] start = 0 self._index_in_epoch = batch_size assert (batch_size <= self._num_examples) end = self._index_in_epoch return (self._images[start:end], self._labels[start:end])
def read_data_sets(train_dir, fake_data=False, one_hot=False): class DataSets(object): pass data_sets = DataSets() if fake_data: data_sets.train = DataSet([], [], fake_data=True) data_sets.validation = DataSet([], [], fake_data=True) data_sets.test = DataSet([], [], fake_data=True) return data_sets TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = maybe_download(TRAIN_IMAGES, train_dir) train_images = extract_images(local_file) local_file = maybe_download(TRAIN_LABELS, train_dir) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = maybe_download(TEST_IMAGES, train_dir) test_images = extract_images(local_file) local_file = maybe_download(TEST_LABELS, train_dir) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] data_sets.train = DataSet(train_images, train_labels) data_sets.validation = DataSet(validation_images, validation_labels) data_sets.test = DataSet(test_images, test_labels) return data_sets
def maybe_download(filename, work_directory): "Download the data from Yann's website, unless it's already here." if (not os.path.exists(work_directory)): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if (not os.path.exists(filepath)): (filepath, _) = urllib.urlretrieve((SOURCE_URL + filename), filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath
def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return int(numpy.frombuffer(bytestream.read(4), dtype=dt))
def extract_images(filename): 'Extract the images into a 4D uint8 numpy array [index, y, x, depth].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2051): raise ValueError(('Invalid magic number %d in MNIST image file: %s' % (magic, filename))) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(((rows * cols) * num_images)) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data
def dense_to_one_hot(labels_dense, num_classes=10): 'Convert class labels from scalars to one-hot vectors.' num_labels = labels_dense.shape[0] index_offset = (numpy.arange(num_labels) * num_classes) labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[(index_offset + labels_dense.ravel())] = 1 return labels_one_hot
def extract_labels(filename, one_hot=False): 'Extract the labels into a 1D uint8 numpy array [index].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2049): raise ValueError(('Invalid magic number %d in MNIST label file: %s' % (magic, filename))) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels) return labels
class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert (images.shape[0] == labels.shape[0]), ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] assert (images.shape[3] == 1) images = images.reshape(images.shape[0], (images.shape[1] * images.shape[2])) images = images.astype(numpy.float32) images = numpy.multiply(images, (1.0 / 255.0)) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): 'Return the next `batch_size` examples from this data set.' if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return ([fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)]) start = self._index_in_epoch self._index_in_epoch += batch_size if (self._index_in_epoch > self._num_examples): self._epochs_completed += 1 perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] start = 0 self._index_in_epoch = batch_size assert (batch_size <= self._num_examples) end = self._index_in_epoch return (self._images[start:end], self._labels[start:end])
def read_data_sets(train_dir, fake_data=False, one_hot=False): class DataSets(object): pass data_sets = DataSets() if fake_data: data_sets.train = DataSet([], [], fake_data=True) data_sets.validation = DataSet([], [], fake_data=True) data_sets.test = DataSet([], [], fake_data=True) return data_sets TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = maybe_download(TRAIN_IMAGES, train_dir) train_images = extract_images(local_file) local_file = maybe_download(TRAIN_LABELS, train_dir) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = maybe_download(TEST_IMAGES, train_dir) test_images = extract_images(local_file) local_file = maybe_download(TEST_LABELS, train_dir) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] data_sets.train = DataSet(train_images, train_labels) data_sets.validation = DataSet(validation_images, validation_labels) data_sets.test = DataSet(test_images, test_labels) return data_sets
def make_chain(): chain = [1] while (chain[(- 1)] != states[(- 1)]): choices = transitions[chain[(- 1)]] j = np.random.randint(len(choices)) chain.append(choices[j]) return chain
def valid_chain(chain): if (len(chain) == 0): return False if (chain[0] != states[0]): return False for i in range(1, len(chain)): if (chain[i] not in transitions[chain[(i - 1)]]): return False return True
def convert_chain(chain): sequence = '' for value in chain: sequence += aliases[value] return sequence
def load_data_table(table, image_dir, corrupt_images=None): 'Read data table, find corresponding images, filter out corrupt, missing and MCI images, and return the samples as a pandas dataframe.' print('Loading dataframe for', table) df = pd.read_csv(table) print('Found', len(df), 'images in table') df['filepath'] = df.apply((lambda row: get_image_filepath(row, image_dir)), axis=1) len_before = len(df) if (corrupt_images is not None): df = df[df.apply((lambda row: ('{}/{}'.format(row['PTID'], row['Visit']) not in corrupt_images)), axis=1)] print('Filtered out', (len_before - len(df)), 'of', len_before, 'images because of failed preprocessing') len_before = len(df) df = df[map(os.path.exists, df['filepath'])] print('Filtered out', (len_before - len(df)), 'of', len_before, 'images because of missing files') len_before = len(df) df = df[(df['DX'] != 'MCI')] print('Filtered out', (len_before - len(df)), 'of', len_before, 'images that were MCI') print('Final dataframe contains', len(df), 'images from', len(df['PTID'].unique()), 'patients') print() return df
def load_data_table_3T(): 'Load the data table for all 3 Tesla images.' return load_data_table(table_3T, image_dir_3T, corrupt_images_3T)
def load_data_table_15T(): 'Load the data table for all 1.5 Tesla images.' return load_data_table(table_15T, image_dir_15T, corrupt_images_15T)
def load_data_table_both(): 'Load the data tables for all 1.5 Tesla and 3 Tesla images and combine them.' df_15T = load_data_table(table_15T, image_dir_15T, corrupt_images_15T) df_3T = load_data_table(table_3T, image_dir_3T, corrupt_images_3T) df = pd.concat([df_15T, df_3T]) return df
def get_image_filepath(df_row, root_dir=''): 'Return the filepath of the image that is described in the row of the data table.' filedir = os.path.join(df_row['PTID'], df_row['Visit'].replace(' ', '')) filename = '{}_{}_{}_{}_{}_Warped.nii.gz'.format(df_row['PTID'], df_row['Scan.Date'].replace('/', '-'), df_row['Visit'].replace(' ', ''), df_row['Image.ID'], df_row['DX']) return os.path.join(root_dir, filedir, filename)
class ADNIDataset(Dataset): '\n PyTorch dataset that consists of MRI images and labels.\n \n Args:\n filenames (iterable of strings): The filenames fo the MRI images.\n labels (iterable): The labels for the images.\n mask (array): If not None (default), images are masked by multiplying with this array.\n transform: Any transformations to apply to the images.\n ' def __init__(self, filenames, labels, mask=None, transform=None): self.filenames = filenames self.labels = torch.LongTensor(labels) self.mask = mask self.transform = transform self.num_inputs = 1 self.num_targets = 1 self.mean = 0 self.std = 1 def __len__(self): return len(self.filenames) def __getitem__(self, idx): 'Return the image as a numpy array and the label.' label = self.labels[idx] struct_arr = utils.load_nifti(self.filenames[idx], mask=self.mask) struct_arr = ((struct_arr - self.mean) / (self.std + 1e-10)) struct_arr = struct_arr[None] struct_arr = torch.FloatTensor(struct_arr) if (self.transform is not None): struct_arr = self.transform(struct_arr) return (struct_arr, label) def image_shape(self): 'The shape of the MRI images.' return utils.load_nifti(self.filenames[0], mask=mask).shape def fit_normalization(self, num_sample=None, show_progress=False): '\n Calculate the voxel-wise mean and std across the dataset for normalization.\n \n Args:\n num_sample (int or None): If None (default), calculate the values across the complete dataset, \n otherwise sample a number of images.\n show_progress (bool): Show a progress bar during the calculation."\n ' if (num_sample is None): num_sample = len(self) image_shape = self.image_shape() all_struct_arr = np.zeros((num_sample, image_shape[0], image_shape[1], image_shape[2])) sampled_filenames = np.random.choice(self.filenames, num_sample, replace=False) if show_progress: sampled_filenames = tqdm_notebook(sampled_filenames) for (i, filename) in enumerate(sampled_filenames): struct_arr = utils.load_nifti(filename, mask=mask) all_struct_arr[i] = struct_arr self.mean = all_struct_arr.mean(0) self.std = all_struct_arr.std(0) def get_raw_image(self, idx): 'Return the raw image at index idx (i.e. not normalized, no color channel, no transform.' return utils.load_nifti(self.filenames[idx], mask=self.mask)
def print_df_stats(df, df_train, df_val): 'Print some statistics about the patients and images in a dataset.' headers = ['Images', '-> AD', '-> CN', 'Patients', '-> AD', '-> CN'] def get_stats(df): df_ad = df[(df['DX'] == 'Dementia')] df_cn = df[(df['DX'] == 'CN')] return [len(df), len(df_ad), len(df_cn), len(df['PTID'].unique()), len(df_ad['PTID'].unique()), len(df_cn['PTID'].unique())] stats = [] stats.append((['All'] + get_stats(df))) stats.append((['Train'] + get_stats(df_train))) stats.append((['Val'] + get_stats(df_val))) print(tabulate(stats, headers=headers)) print()
def build_datasets(df, patients_train, patients_val, print_stats=True, normalize=True): '\n Build PyTorch datasets based on a data table and a patient-wise train-test split.\n \n Args:\n df (pandas dataframe): The data table from ADNI.\n patients_train (iterable of strings): The patients to include in the train set.\n patients_val (iterable of strings): The patients to include in the val set.\n print_stats (boolean): Whether to print some statistics about the datasets.\n normalize (boolean): Whether to caluclate mean and std across the dataset for later normalization.\n \n Returns:\n The train and val dataset.\n ' df_train = df[df.apply((lambda row: (row['PTID'] in patients_train)), axis=1)] df_val = df[df.apply((lambda row: (row['PTID'] in patients_val)), axis=1)] if print_stats: print_df_stats(df, df_train, df_val) train_filenames = np.array(df_train['filepath']) val_filenames = np.array(df_val['filepath']) train_labels = np.array((df_train['DX'] == 'Dementia'), dtype=int) val_labels = np.array((df_val['DX'] == 'Dementia'), dtype=int) train_dataset = ADNIDataset(train_filenames, train_labels, mask=mask) val_dataset = ADNIDataset(val_filenames, val_labels, mask=mask) if normalize: print('Calculating mean and std for normalization:') train_dataset.fit_normalization(200, show_progress=True) (val_dataset.mean, val_dataset.std) = (train_dataset.mean, train_dataset.std) else: print('Dataset is not normalized, this could dramatically decrease performance') return (train_dataset, val_dataset)
def build_loaders(train_dataset, val_dataset): 'Build PyTorch data loaders from the datasets.' train_loader = DataLoader(train_dataset, batch_size=5, shuffle=True, num_workers=multiprocessing.cpu_count(), pin_memory=torch.cuda.is_available()) val_loader = DataLoader(val_dataset, batch_size=5, shuffle=False, num_workers=multiprocessing.cpu_count(), pin_memory=torch.cuda.is_available()) return (train_loader, val_loader)
class ClassificationModel3D(nn.Module): 'The model we use in the paper.' def __init__(self, dropout=0, dropout2=0): nn.Module.__init__(self) self.Conv_1 = nn.Conv3d(1, 8, 3) self.Conv_1_bn = nn.BatchNorm3d(8) self.Conv_2 = nn.Conv3d(8, 16, 3) self.Conv_2_bn = nn.BatchNorm3d(16) self.Conv_3 = nn.Conv3d(16, 32, 3) self.Conv_3_bn = nn.BatchNorm3d(32) self.Conv_4 = nn.Conv3d(32, 64, 3) self.Conv_4_bn = nn.BatchNorm3d(64) self.dense_1 = nn.Linear(5120, 128) self.dense_2 = nn.Linear(128, 64) self.dense_3 = nn.Linear(64, 2) self.relu = nn.ReLU() self.dropout = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout2) def forward(self, x): x = self.relu(self.Conv_1_bn(self.Conv_1(x))) x = F.max_pool3d(x, 2) x = self.relu(self.Conv_2_bn(self.Conv_2(x))) x = F.max_pool3d(x, 3) x = self.relu(self.Conv_3_bn(self.Conv_3(x))) x = F.max_pool3d(x, 2) x = self.relu(self.Conv_4_bn(self.Conv_4(x))) x = F.max_pool3d(x, 3) x = x.view(x.size(0), (- 1)) x = self.dropout(x) x = self.relu(self.dense_1(x)) x = self.dropout2(x) x = self.relu(self.dense_2(x)) x = self.dense_3(x) return x
class KorolevModel(nn.Module): 'The model used in Korolev et al. 2017 (https://arxiv.org/abs/1701.06643).' def __init__(self): nn.Module.__init__(self) self.relu = nn.ReLU() self.conv = nn.Sequential(nn.Conv3d(1, 8, 3), self.relu, nn.Conv3d(8, 8, 3), self.relu, nn.BatchNorm3d(8), nn.MaxPool3d(2), nn.Conv3d(8, 16, 3), self.relu, nn.Conv3d(16, 16, 3), self.relu, nn.BatchNorm3d(16), nn.MaxPool3d(2), nn.Conv3d(16, 32, 3), self.relu, nn.Conv3d(32, 32, 3), self.relu, nn.Conv3d(32, 32, 3), self.relu, nn.BatchNorm3d(32), nn.MaxPool3d(2), nn.Conv3d(32, 64, 3), self.relu, nn.Conv3d(64, 64, 3), self.relu, nn.Conv3d(64, 64, 3), self.relu, nn.Conv3d(64, 64, 3), self.relu, nn.BatchNorm3d(64), nn.MaxPool3d(3)) self.fc = nn.Sequential(nn.Linear(2880, 128), self.relu, nn.Dropout(0.7), nn.Linear(128, 64), self.relu, nn.Linear(64, 1)) def forward(self, x): x = self.conv(x) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x
def build_model(): 'Build the model as used in the paper, wrap it in a torchsample trainer and move it to cuda.' net = ClassificationModel3D(dropout=0.8, dropout2=0) optimizer = torch.optim.Adam(net.parameters(), lr=0.0001) loss_function = nn.CrossEntropyLoss() callbacks = [] trainer = torchsample.modules.ModuleTrainer(net) trainer.compile(loss=loss_function, optimizer=optimizer, metrics=[CategoricalAccuracyWithLogits()], callbacks=callbacks) if torch.cuda.is_available(): net.cuda() cuda_device = torch.cuda.current_device() print('Moved network to GPU') else: cuda_device = (- 1) print('GPU not available') return (net, trainer, cuda_device)
def train_model(trainer, train_loader, val_loader, cuda_device, num_epoch=1): 'Train and evaluate the model via torchsample.' trainer.fit_loader(train_loader, val_loader=val_loader, num_epoch=num_epoch, verbose=1, cuda_device=cuda_device)
def calculate_roc_auc(trainer, val_loader, cuda_device): y_val_pred = F.softmax(trainer.predict_loader(val_loader, cuda_device=cuda_device)).data.cpu().numpy() y_val_true = torch.cat([y for (x, y) in val_loader]).numpy() y_val_true = y_val_true[:len(y_val_pred)] return roc_auc_score(y_val_true, y_val_pred.argmax(1))
class BinaryAccuracyWithLogits(torchsample.metrics.BinaryAccuracy): 'Same as torchsample.metrics.BinaryAccuracy, but applies a sigmoid function to the network output before calculating the accuracy. This is intended to be used in combination with BCEWightLogitsLoss.' def __call__(self, y_pred, y_true): return super(BinaryAccuracyWithLogits, self).__call__(F.sigmoid(y_pred), y_true)
class CategoricalAccuracyWithLogits(torchsample.metrics.CategoricalAccuracy): 'Same as torchsample.metrics.CategoricalAccuracy, but applies a softmax function to the network output before calculating the accuracy. This is intended to be used in combination with CrossEntropyLoss.' def __call__(self, y_pred, y_true): return super(CategoricalAccuracyWithLogits, self).__call__(F.softmax(y_pred), y_true)
class DailyDialogParser(): def __init__(self, path, sos, eos, eou): self.path = path self.sos = sos self.eos = eos self.eou = eou def get_dialogs(self): train_dialogs = self.process_file((self.path + 'train.txt')) validation_dialogs = self.process_file((self.path + 'validation.txt')) test_dialogs = self.process_file((self.path + 'test.txt')) return (train_dialogs, validation_dialogs, test_dialogs) def process_file(self, path): with open(path, 'r') as f: data = f.readlines() print('Parsing', path) return [self.process_raw_dialog(line) for line in data] def process_raw_dialog(self, raw_dialog): raw_utterances = raw_dialog.split('__eou__') return [self.process_raw_utterance(raw_utterance) for raw_utterance in raw_utterances if (not raw_utterance.isspace())] def process_raw_utterance(self, raw_utterance): raw_sentences = nltk.sent_tokenize(raw_utterance) utterence = [] for raw_sentence in raw_sentences: utterence.extend(self.process_raw_sentence(raw_sentence)) return (utterence + [self.eou]) def process_raw_sentence(self, raw_sentence): raw_sentence = raw_sentence.lower() raw_sentence = raw_sentence.split() return (([self.sos] + raw_sentence) + [self.eos])
class DPCollator(): def __init__(self, pad_token, reply_length=None): self.pad_token = pad_token self.reply_length = reply_length def __call__(self, batch): (contexts, replies) = zip(*batch) padded_contexts = self.pad(contexts) padded_replies = self.pad(replies, self.reply_length) return (padded_contexts, padded_replies) def pad(self, data, length=None): max_length = length if (max_length is None): max_length = max([len(row) for row in data]) padded_data = [] for row in data: padding = ([self.pad_token] * (max_length - len(row))) padded_data.append((list(row) + padding)) return LongTensor(padded_data)
class DPCorpus(object): SOS = '<s>' EOS = '</s>' EOU = '</u>' PAD = '<pad>' UNK = '<unk>' def __init__(self, dialog_parser=None, vocabulary_limit=None): if (dialog_parser is None): path = (os.path.dirname(os.path.realpath(__file__)) + '/daily_dialog/') dialog_parser = DailyDialogParser(path, self.SOS, self.EOS, self.EOU) (self.train_dialogs, self.validation_dialogs, self.test_dialogs) = dialog_parser.get_dialogs() print('Building vocabulary') self.build_vocab(vocabulary_limit) if (vocabulary_limit is not None): print('Replacing out of vocabulary from train dialogs by unk token.') self.limit_dialogs_to_vocabulary(self.train_dialogs) print('Replacing out of vocabulary from validation dialogs by unk token.') self.limit_dialogs_to_vocabulary(self.validation_dialogs) print('Replacing out of vocabulary from test dialogs by unk token.') self.limit_dialogs_to_vocabulary(self.test_dialogs) def build_vocab(self, vocabulary_limit): special_tokens = [self.PAD, self.UNK] all_words = self.flatten_dialogs(self.train_dialogs) vocabulary_counter = Counter(all_words) if (vocabulary_limit is not None): vocabulary_counter = vocabulary_counter.most_common((vocabulary_limit - len(special_tokens))) else: vocabulary_counter = vocabulary_counter.most_common() self.vocabulary = (special_tokens + [token for (token, _) in vocabulary_counter]) self.token_ids = {token: index for (index, token) in enumerate(self.vocabulary)} def flatten_dialogs(self, dialogs): all_words = [] for dialog in dialogs: for utterance in dialog: all_words.extend(utterance) return all_words def limit_dialogs_to_vocabulary(self, dialogs): for (d_i, dialog) in enumerate(dialogs): for (u_i, utterance) in enumerate(dialog): for (t_i, token) in enumerate(utterance): if (token not in self.vocabulary): dialogs[d_i][u_i][t_i] = self.UNK def utterance_to_ids(self, utterance): utterance_ids = [] for token in utterance: utterance_ids.append(self.token_ids.get(token, self.token_ids[self.UNK])) return utterance_ids def dialogs_to_ids(self, data): data_ids = [] for dialog in data: dialog_ids = [] for utterance in dialog: dialog_ids.append(self.utterance_to_ids(utterance)) data_ids.append(dialog_ids) return data_ids def ids_to_tokens(self, ids): padding_id = self.token_ids[self.PAD] return [self.vocabulary[id] for id in ids if (id != padding_id)] def token_to_id(self, token): return self.token_ids[token] def get_train_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None): return self.get_dataset(self.train_dialogs, context_size, min_reply_length, max_reply_length) def get_validation_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None): return self.get_dataset(self.validation_dialogs, context_size, min_reply_length, max_reply_length) def get_test_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None): return self.get_dataset(self.test_dialogs, context_size, min_reply_length, max_reply_length) def get_dataset(self, dialogs, context_size, min_reply_length, max_reply_length): dialogs_ids = self.dialogs_to_ids(dialogs) return DPDataset(self, dialogs_ids, context_size, min_reply_length, max_reply_length) def get_collator(self, reply_length=None): return DPCollator(self.token_ids[self.PAD], reply_length=reply_length)
class DPDataLoader(DataLoader): def __init__(self, dataset, batch_size=64): if (dataset == None): corpus = DPCorpus(vocabulary_limit=5000) dataset = corpus.get_train_dataset(2, 5, 20) collator = dataset.corpus.get_collator(reply_length=20) super().__init__(dataset, batch_size=batch_size, collate_fn=collator, shuffle=True, drop_last=True)
class DPDataset(Dataset): def __init__(self, corpus, dialogs, context_size=2, min_reply_length=None, max_reply_length=None): self.corpus = corpus self.contexts = [] self.replies = [] for dialog in dialogs: max_start_i = (len(dialog) - context_size) for start_i in range(max_start_i): reply = dialog[(start_i + context_size)] context = [] for i in range(start_i, (start_i + context_size)): context.extend(dialog[i]) if (((min_reply_length is None) or (len(reply) >= min_reply_length)) and ((max_reply_length is None) or (len(reply) <= max_reply_length))): self.contexts.append(context) self.replies.append(reply) def __len__(self): return len(self.contexts) def __getitem__(self, item): context = self.contexts[item] replies = self.replies[item] return (LongTensor(context), LongTensor(replies))
class Discriminator(nn.Module): def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, gpu=False, dropout=0.2, device='cpu'): super(Discriminator, self).__init__() self.hidden_dim = hidden_dim self.embedding_dim = embedding_dim self.max_seq_len = max_seq_len self.device = device self.embeddings = nn.Embedding(vocab_size, embedding_dim) self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout) self.gru2hidden = nn.Linear(((2 * 2) * hidden_dim), hidden_dim) self.dropout_linear = nn.Dropout(p=dropout) self.embeddings2 = nn.Embedding(vocab_size, embedding_dim) self.gru2 = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout) self.gru2hidden2 = nn.Linear(((2 * 2) * hidden_dim), hidden_dim) self.dropout_linear2 = nn.Dropout(p=dropout) self.hidden2out = nn.Linear((2 * hidden_dim), 1) def init_hidden(self, batch_size): h = autograd.Variable(torch.zeros(((2 * 2) * 1), batch_size, self.hidden_dim)).to(self.device) return h def forward(self, reply, context, hidden, hidden2): emb = self.embeddings(reply) emb = emb.permute(1, 0, 2) (_, hidden) = self.gru(emb, hidden) hidden = hidden.permute(1, 0, 2).contiguous() out = self.gru2hidden(hidden.view((- 1), (4 * self.hidden_dim))) out = torch.tanh(out) out_reply = self.dropout_linear(out) emb = self.embeddings2(context) emb = emb.permute(1, 0, 2) (_, hidden) = self.gru2(emb, hidden2) hidden = hidden.permute(1, 0, 2).contiguous() out = self.gru2hidden2(hidden.view((- 1), (4 * self.hidden_dim))) out = torch.tanh(out) out_context = self.dropout_linear2(out) out = self.hidden2out(torch.cat((out_reply, out_context), 1)) out = torch.sigmoid(out) return out def batchClassify(self, reply, context): '\n Classifies a batch of sequences.\n Inputs: inp\n - inp: batch_size x seq_len\n Returns: out\n - out: batch_size ([0,1] score)\n ' h = self.init_hidden(reply.size()[0]) h2 = self.init_hidden(context.size()[0]) out = self.forward(reply.long(), context.long(), h, h2) return out.view((- 1)) def batchBCELoss(self, inp, target): '\n Returns Binary Cross Entropy Loss for discriminator.\n Inputs: inp, target\n - inp: batch_size x seq_len\n - target: batch_size (binary 1/0)\n ' loss_fn = nn.BCELoss() h = self.init_hidden(inp.size()[0]) out = self.forward(inp, h) return loss_fn(out, target)
def greedy_match(fileone, filetwo, w2v): res1 = greedy_score(fileone, filetwo, w2v) res2 = greedy_score(filetwo, fileone, w2v) res_sum = ((res1 + res2) / 2.0) return (np.mean(res_sum), ((1.96 * np.std(res_sum)) / float(len(res_sum))), np.std(res_sum))
def greedy_score(fileone, filetwo, w2v): f1 = open(fileone, 'r') f2 = open(filetwo, 'r') r1 = f1.readlines() r2 = f2.readlines() dim = w2v.layer1_size scores = [] for i in range(len(r1)): tokens1 = r1[i].strip().split(' ') tokens2 = r2[i].strip().split(' ') X = np.zeros((dim,)) y_count = 0 x_count = 0 o = 0.0 Y = np.zeros((dim, 1)) for tok in tokens2: Y = np.hstack((Y, w2v[tok].detach().cpu().numpy().reshape((dim, 1)))) y_count += 1 for tok in tokens1: tmp = w2v[tok].detach().cpu().numpy().reshape((1, dim)).dot(Y) o += np.max(tmp) x_count += 1 if ((x_count < 1) or (y_count < 1)): scores.append(0) continue o /= float(x_count) scores.append(o) return np.asarray(scores)
def extrema_score(fileone, filetwo, w2v): f1 = open(fileone, 'r') f2 = open(filetwo, 'r') r1 = f1.readlines() r2 = f2.readlines() scores = [] for i in range(len(r1)): tokens1 = r1[i].strip().split(' ') tokens2 = r2[i].strip().split(' ') X = [] for tok in tokens1: X.append(w2v[tok].detach().cpu().numpy()) Y = [] for tok in tokens2: Y.append(w2v[tok].detach().cpu().numpy()) if (np.linalg.norm(X) < 1e-11): continue if (np.linalg.norm(Y) < 1e-11): scores.append(0) continue xmax = np.max(X, 0) xmin = np.min(X, 0) xtrema = [] for i in range(len(xmax)): if (np.abs(xmin[i]) > xmax[i]): xtrema.append(xmin[i]) else: xtrema.append(xmax[i]) X = np.array(xtrema) ymax = np.max(Y, 0) ymin = np.min(Y, 0) ytrema = [] for i in range(len(ymax)): if (np.abs(ymin[i]) > ymax[i]): ytrema.append(ymin[i]) else: ytrema.append(ymax[i]) Y = np.array(ytrema) o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y)) scores.append(o) scores = np.asarray(scores) return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
def average(fileone, filetwo, w2v): f1 = open(fileone, 'r') f2 = open(filetwo, 'r') r1 = f1.readlines() r2 = f2.readlines() dim = w2v.layer1_size scores = [] for i in range(len(r1)): tokens1 = r1[i].strip().split(' ') tokens2 = r2[i].strip().split(' ') X = np.zeros((dim,)) for tok in tokens1: X += w2v[tok].detach().cpu().numpy() Y = np.zeros((dim,)) for tok in tokens2: Y += w2v[tok].detach().cpu().numpy() if (np.linalg.norm(X) < 1e-11): continue if (np.linalg.norm(Y) < 1e-11): scores.append(0) continue X = (np.array(X) / np.linalg.norm(X)) Y = (np.array(Y) / np.linalg.norm(Y)) o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y)) scores.append(o) scores = np.asarray(scores) return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
def prepare_discriminator_data(pos_samples, neg_samples, gpu=False): '\n Takes positive (target) samples, negative (generator) samples and prepares inp and target data for discriminator.\n\n Inputs: pos_samples, neg_samples\n - pos_samples: pos_size x seq_len\n - neg_samples: neg_size x seq_len\n\n Returns: inp, target\n - inp: (pos_size + neg_size) x seq_len\n - target: pos_size + neg_size (boolean 1/0)\n ' inp = torch.cat((pos_samples, neg_samples), 0).type(torch.LongTensor) target = torch.ones((pos_samples.size()[0] + neg_samples.size()[0])) target[pos_samples.size()[0]:] = 0 perm = torch.randperm(target.size()[0]) target = target[perm] inp = inp[perm] inp = Variable(inp) target = Variable(target) if gpu: inp = inp.cuda() target = target.cuda() return (inp, target)
def load_data(path='dataset.pickle'): '\n Load data set\n ' if (not os.path.isfile(path)): corpus = DPCorpus(vocabulary_limit=VOCAB_SIZE) train_dataset = corpus.get_train_dataset(min_reply_length=MIN_SEQ_LEN, max_reply_length=MAX_SEQ_LEN) with open(path, 'wb') as handle: pickle.dump(train_dataset, handle, protocol=pickle.HIGHEST_PROTOCOL) train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE) else: with open(path, 'rb') as handle: train_dataset = pickle.load(handle) train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE) return train_data_loader
class ReplayMemory(): def __init__(self, capacity): self.capacity = capacity self.memory = [] def push(self, transition): if (len(self.memory) == self.capacity): del self.memory[0] self.memory.append(transition) def push_batch(self, transition): if (len(self.memory) == self.capacity): del self.memory[0] self.memory.append(transition) def sample(self, batch_size): random_ints = np.random.randint(0, len(self.memory), size=batch_size) sample = [self.memory[random_int] for random_int in random_ints] return sample def __len__(self): return len(self.memory)
class Attention(nn.Module): '\n Applies an attention mechanism on the output features from the decoder.\n\n .. math::\n \\begin{array}{ll}\n x = context*output \\\\\n attn = exp(x_i) / sum_j exp(x_j) \\\\\n output = \\tanh(w * (attn * context) + b * output)\n \\end{array}\n\n Args:\n dim(int): The number of expected features in the output\n\n Inputs: output, context\n - **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.\n - **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.\n\n Outputs: output, attn\n - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.\n - **attn** (batch, output_len, input_len): tensor containing attention weights.\n\n Attributes:\n linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.\n mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.\n\n Examples::\n\n >>> attention = seq2seq.models.Attention(256)\n >>> context = Variable(torch.randn(5, 3, 256))\n >>> output = Variable(torch.randn(5, 5, 256))\n >>> output, attn = attention(output, context)\n\n ' def __init__(self, dim): super(Attention, self).__init__() self.linear_out = nn.Linear((dim * 2), dim) self.mask = None def set_mask(self, mask): '\n Sets indices to be masked\n\n Args:\n mask (torch.Tensor): tensor containing indices to be masked\n ' self.mask = mask def forward(self, output, context): batch_size = output.size(0) hidden_size = output.size(2) input_size = context.size(1) attn = torch.bmm(output, context.transpose(1, 2)) if (self.mask is not None): attn.data.masked_fill_(self.mask, (- float('inf'))) attn = F.softmax(attn.view((- 1), input_size), dim=1).view(batch_size, (- 1), input_size) mix = torch.bmm(attn, context) combined = torch.cat((mix, output), dim=2) output = torch.tanh(self.linear_out(combined.view((- 1), (2 * hidden_size)))).view(batch_size, (- 1), hidden_size) return (output, attn)
class BaseRNN(nn.Module): "\n Applies a multi-layer RNN to an input sequence.\n Note:\n Do not use this class directly, use one of the sub classes.\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): maximum allowed length for the sequence to be processed\n hidden_size (int): number of features in the hidden state `h`\n input_dropout_p (float): dropout probability for the input sequence\n dropout_p (float): dropout probability for the output sequence\n n_layers (int): number of recurrent layers\n rnn_cell (str): type of RNN cell (Eg. 'LSTM' , 'GRU')\n\n Inputs: ``*args``, ``**kwargs``\n - ``*args``: variable length argument list.\n - ``**kwargs``: arbitrary keyword arguments.\n\n Attributes:\n SYM_MASK: masking symbol\n SYM_EOS: end-of-sequence symbol\n " SYM_MASK = 'MASK' SYM_EOS = 'EOS' def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell): super(BaseRNN, self).__init__() self.vocab_size = vocab_size self.max_len = max_len self.hidden_size = hidden_size self.n_layers = n_layers self.input_dropout_p = input_dropout_p self.input_dropout = nn.Dropout(p=input_dropout_p) if (rnn_cell.lower() == 'lstm'): self.rnn_cell = nn.LSTM elif (rnn_cell.lower() == 'gru'): self.rnn_cell = nn.GRU else: raise ValueError('Unsupported RNN Cell: {0}'.format(rnn_cell)) self.dropout_p = dropout_p def forward(self, *args, **kwargs): raise NotImplementedError()
class EncoderRNN(BaseRNN): '\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): a maximum allowed length for the sequence to be processed\n hidden_size (int): the number of features in the hidden state `h`\n input_dropout_p (float, optional): dropout probability for the input sequence (default: 0)\n dropout_p (float, optional): dropout probability for the output sequence (default: 0)\n n_layers (int, optional): number of recurrent layers (default: 1)\n bidirectional (bool, optional): if True, becomes a bidirectional encodr (defulat False)\n rnn_cell (str, optional): type of RNN cell (default: gru)\n variable_lengths (bool, optional): if use variable length RNN (default: False)\n embedding (torch.Tensor, optional): Pre-trained embedding. The size of the tensor has to match\n the size of the embedding parameter: (vocab_size, hidden_size). The embedding layer would be initialized\n with the tensor if provided (default: None).\n update_embedding (bool, optional): If the embedding should be updated during training (default: False).\n\n Inputs: inputs, input_lengths\n - **inputs**: list of sequences, whose length is the batch size and within which each sequence is a list of token IDs.\n - **input_lengths** (list of int, optional): list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n\n Outputs: output, hidden\n - **output** (batch, seq_len, hidden_size): tensor containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the features in the hidden state `h`\n\n Examples::\n\n >>> encoder = EncoderRNN(input_vocab, max_seq_length, hidden_size)\n >>> output, hidden = encoder(input)\n\n ' def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p=0, dropout_p=0, n_layers=1, bidirectional=False, rnn_cell='gru', variable_lengths=False, embedding=None, update_embedding=True): super(EncoderRNN, self).__init__(vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell) self.variable_lengths = variable_lengths self.embedding = nn.Embedding(vocab_size, hidden_size) if (embedding is not None): self.embedding.weight = nn.Parameter(embedding) self.embedding.weight.requires_grad = update_embedding self.rnn = self.rnn_cell(hidden_size, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout=dropout_p) def forward(self, input_var, input_lengths=None): '\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n input_var (batch, seq_len): tensor containing the features of the input sequence.\n input_lengths (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch\n\n Returns: output, hidden\n - **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): variable containing the features in the hidden state h\n ' embedded = self.embedding(input_var) embedded = self.input_dropout(embedded) if self.variable_lengths: embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True) (output, hidden) = self.rnn(embedded) if self.variable_lengths: (output, _) = nn.utils.rnn.pad_packed_sequence(output, batch_first=True) return (output, hidden)
class Seq2seq(nn.Module): ' Standard sequence-to-sequence architecture with configurable encoder\n and decoder.\n\n Args:\n encoder (EncoderRNN): object of EncoderRNN\n decoder (DecoderRNN): object of DecoderRNN\n decode_function (func, optional): function to generate symbols from output hidden states (default: F.log_softmax)\n\n Inputs: input_variable, input_lengths, target_variable, teacher_forcing_ratio\n - **input_variable** (list, option): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the encoder.\n - **input_lengths** (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n - **target_variable** (list, optional): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the decoder.\n - **teacher_forcing_ratio** (int, optional): The probability that teacher forcing will be used. A random number\n is drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0)\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the\n outputs of the decoder.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers\n representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of\n predicted token IDs, *KEY_INPUT* : target outputs if provided for decoding, *KEY_ATTN_SCORE* : list of\n sequences, where each list is of attention weights }.\n\n ' def __init__(self, encoder, decoder, decode_function=F.log_softmax): super(Seq2seq, self).__init__() self.encoder = encoder self.decoder = decoder self.decode_function = decode_function def flatten_parameters(self): self.encoder.rnn.flatten_parameters() self.decoder.rnn.flatten_parameters() def forward(self, input_variable, input_lengths=None, target_variable=None, teacher_forcing_ratio=0, sample=False): (encoder_outputs, encoder_hidden) = self.encoder(input_variable, input_lengths) result = self.decoder(inputs=target_variable, encoder_hidden=encoder_hidden, encoder_outputs=encoder_outputs, function=self.decode_function, teacher_forcing_ratio=teacher_forcing_ratio, sample=sample) return result
class DailyDialogParser(): def __init__(self, path, sos, eos, eou): self.path = path self.sos = sos self.eos = eos self.eou = eou def get_dialogs(self): train_dialogs = self.process_file((self.path + 'train.txt')) validation_dialogs = self.process_file((self.path + 'validation.txt')) test_dialogs = self.process_file((self.path + 'test.txt')) return (train_dialogs, validation_dialogs, test_dialogs) def process_file(self, path): with open(path, 'r') as f: data = f.readlines() print('Parsing', path) return [self.process_raw_dialog(line) for line in data] def process_raw_dialog(self, raw_dialog): raw_utterances = raw_dialog.split('__eou__') return [self.process_raw_utterance(raw_utterance) for raw_utterance in raw_utterances if (not raw_utterance.isspace())] def process_raw_utterance(self, raw_utterance): raw_sentences = nltk.sent_tokenize(raw_utterance) utterence = [] for raw_sentence in raw_sentences: utterence.extend(self.process_raw_sentence(raw_sentence)) return (utterence + [self.eou]) def process_raw_sentence(self, raw_sentence): raw_sentence = raw_sentence.lower() raw_sentence = raw_sentence.split() return (([self.sos] + raw_sentence) + [self.eos])
class DPCollator(): def __init__(self, pad_token, reply_length=None): self.pad_token = pad_token self.reply_length = reply_length def __call__(self, batch): (contexts, replies) = zip(*batch) padded_contexts = self.pad(contexts) padded_replies = self.pad(replies, self.reply_length) return (padded_contexts, padded_replies) def pad(self, data, length=None): max_length = length if (max_length is None): max_length = max([len(row) for row in data]) padded_data = [] for row in data: padding = ([self.pad_token] * (max_length - len(row))) padded_data.append((list(row) + padding)) return LongTensor(padded_data)
class DPCorpus(object): SOS = '<s>' EOS = '</s>' EOU = '</u>' PAD = '<pad>' UNK = '<unk>' def __init__(self, dialog_parser=None, vocabulary_limit=None): if (dialog_parser is None): path = (os.path.dirname(os.path.realpath(__file__)) + '/daily_dialog/') dialog_parser = DailyDialogParser(path, self.SOS, self.EOS, self.EOU) (self.train_dialogs, self.validation_dialogs, self.test_dialogs) = dialog_parser.get_dialogs() print('Building vocabulary') self.build_vocab(vocabulary_limit) if (vocabulary_limit is not None): print('Replacing out of vocabulary from train dialogs by unk token.') self.limit_dialogs_to_vocabulary(self.train_dialogs) print('Replacing out of vocabulary from validation dialogs by unk token.') self.limit_dialogs_to_vocabulary(self.validation_dialogs) print('Replacing out of vocabulary from test dialogs by unk token.') self.limit_dialogs_to_vocabulary(self.test_dialogs) def build_vocab(self, vocabulary_limit): special_tokens = [self.PAD, self.UNK] all_words = self.flatten_dialogs(self.train_dialogs) vocabulary_counter = Counter(all_words) if (vocabulary_limit is not None): vocabulary_counter = vocabulary_counter.most_common((vocabulary_limit - len(special_tokens))) else: vocabulary_counter = vocabulary_counter.most_common() self.vocabulary = (special_tokens + [token for (token, _) in vocabulary_counter]) self.token_ids = {token: index for (index, token) in enumerate(self.vocabulary)} def flatten_dialogs(self, dialogs): all_words = [] for dialog in dialogs: for utterance in dialog: all_words.extend(utterance) return all_words def limit_dialogs_to_vocabulary(self, dialogs): for (d_i, dialog) in enumerate(dialogs): for (u_i, utterance) in enumerate(dialog): for (t_i, token) in enumerate(utterance): if (token not in self.vocabulary): dialogs[d_i][u_i][t_i] = self.UNK def utterance_to_ids(self, utterance): utterance_ids = [] for token in utterance: utterance_ids.append(self.token_ids.get(token, self.token_ids[self.UNK])) return utterance_ids def dialogs_to_ids(self, data): data_ids = [] for dialog in data: dialog_ids = [] for utterance in dialog: dialog_ids.append(self.utterance_to_ids(utterance)) data_ids.append(dialog_ids) return data_ids def ids_to_tokens(self, ids): padding_id = self.token_ids[self.PAD] return [self.vocabulary[id] for id in ids if (id != padding_id)] def token_to_id(self, token): return self.token_ids[token] def get_train_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None): return self.get_dataset(self.train_dialogs, context_size, min_reply_length, max_reply_length) def get_validation_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None): return self.get_dataset(self.validation_dialogs, context_size, min_reply_length, max_reply_length) def get_test_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None): return self.get_dataset(self.test_dialogs, context_size, min_reply_length, max_reply_length) def get_dataset(self, dialogs, context_size, min_reply_length, max_reply_length): dialogs_ids = self.dialogs_to_ids(dialogs) return DPDataset(self, dialogs_ids, context_size, min_reply_length, max_reply_length) def get_collator(self, reply_length=None): return DPCollator(self.token_ids[self.PAD], reply_length=reply_length)
class DPDataLoader(DataLoader): def __init__(self, dataset, batch_size=64): if (dataset == None): corpus = DPCorpus(vocabulary_limit=5000) dataset = corpus.get_train_dataset(2, 5, 20) collator = dataset.corpus.get_collator(reply_length=20) super().__init__(dataset, batch_size=batch_size, collate_fn=collator, shuffle=True, drop_last=True)
class DPDataset(Dataset): def __init__(self, corpus, dialogs, context_size=2, min_reply_length=None, max_reply_length=None): self.corpus = corpus self.contexts = [] self.replies = [] for dialog in dialogs: max_start_i = (len(dialog) - context_size) for start_i in range(max_start_i): reply = dialog[(start_i + context_size)] context = [] for i in range(start_i, (start_i + context_size)): context.extend(dialog[i]) if (((min_reply_length is None) or (len(reply) >= min_reply_length)) and ((max_reply_length is None) or (len(reply) <= max_reply_length))): self.contexts.append(context) self.replies.append(reply) def __len__(self): return len(self.contexts) def __getitem__(self, item): context = self.contexts[item] replies = self.replies[item] return (LongTensor(context), LongTensor(replies))
class Discriminator(nn.Module): def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, gpu=False, dropout=0.2, device='cpu'): super(Discriminator, self).__init__() self.hidden_dim = hidden_dim self.embedding_dim = embedding_dim self.max_seq_len = max_seq_len self.device = device self.embeddings = nn.Embedding(vocab_size, embedding_dim) self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout) self.gru2hidden = nn.Linear(((2 * 2) * hidden_dim), hidden_dim) self.dropout_linear = nn.Dropout(p=dropout) self.embeddings2 = nn.Embedding(vocab_size, embedding_dim) self.gru2 = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout) self.gru2hidden2 = nn.Linear(((2 * 2) * hidden_dim), hidden_dim) self.dropout_linear2 = nn.Dropout(p=dropout) self.hidden2out = nn.Linear((2 * hidden_dim), 1) def init_hidden(self, batch_size): h = autograd.Variable(torch.zeros(((2 * 2) * 1), batch_size, self.hidden_dim)).to(self.device) return h def forward(self, reply, context, hidden, hidden2): emb = self.embeddings(reply) emb = emb.permute(1, 0, 2) (_, hidden) = self.gru(emb, hidden) hidden = hidden.permute(1, 0, 2).contiguous() out = self.gru2hidden(hidden.view((- 1), (4 * self.hidden_dim))) out = torch.tanh(out) out_reply = self.dropout_linear(out) emb = self.embeddings2(context) emb = emb.permute(1, 0, 2) (_, hidden) = self.gru2(emb, hidden2) hidden = hidden.permute(1, 0, 2).contiguous() out = self.gru2hidden2(hidden.view((- 1), (4 * self.hidden_dim))) out = torch.tanh(out) out_context = self.dropout_linear2(out) out = self.hidden2out(torch.cat((out_reply, out_context), 1)) out = torch.sigmoid(out) return out def batchClassify(self, reply, context): '\n Classifies a batch of sequences.\n Inputs: inp\n - inp: batch_size x seq_len\n Returns: out\n - out: batch_size ([0,1] score)\n ' h = self.init_hidden(reply.size()[0]) h2 = self.init_hidden(context.size()[0]) out = self.forward(reply.long(), context.long(), h, h2) return out.view((- 1)) def batchBCELoss(self, inp, target): '\n Returns Binary Cross Entropy Loss for discriminator.\n Inputs: inp, target\n - inp: batch_size x seq_len\n - target: batch_size (binary 1/0)\n ' loss_fn = nn.BCELoss() h = self.init_hidden(inp.size()[0]) out = self.forward(inp, h) return loss_fn(out, target)
def greedy_match(fileone, filetwo, w2v): res1 = greedy_score(fileone, filetwo, w2v) res2 = greedy_score(filetwo, fileone, w2v) res_sum = ((res1 + res2) / 2.0) return (np.mean(res_sum), ((1.96 * np.std(res_sum)) / float(len(res_sum))), np.std(res_sum))