code stringlengths 17 6.64M |
|---|
def collate_fn(batch):
res = defaultdict(list)
for d in batch:
for (k, v) in d.items():
res[k].append(v)
res['label'] = torch.stack(res['label'])
return res
|
def collate_fn_enrico(batch):
res = defaultdict(list)
for d in batch:
for (k, v) in d.items():
res[k].append(v)
res['label'] = torch.tensor(res['label'], dtype=torch.long)
return res
|
class EnricoImageDataset(torch.utils.data.Dataset):
def __init__(self, id_list_path, csv='../../metadata/screenclassification/design_topics.csv', class_map_file='../../metadata/screenclassification/class_map_enrico.json', img_folder=(os.environ['SM_CHANNEL_TRAINING'] if ('SM_CHANNEL_TRAINING' in os.environ) else '../../downloads/enrico/screenshots'), img_size=128, ra_num_ops=(- 1), ra_magnitude=(- 1), one_hot_labels=False):
super(EnricoImageDataset, self).__init__()
self.csv = pd.read_csv(csv)
self.img_folder = img_folder
self.one_hot_labels = one_hot_labels
img_transforms = [transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
if ((ra_num_ops > 0) and (ra_magnitude > 0)):
img_transforms = ([transforms.RandAugment(ra_num_ops, ra_magnitude)] + img_transforms)
self.img_transforms = transforms.Compose(img_transforms)
self.image_names = list(self.csv['screen_id'])
self.labels = list(self.csv['topic'])
self.class_counter = Counter(self.labels)
with open(id_list_path, 'r') as f:
split_ids = set(json.load(f))
keep_inds = [i for i in range(len(self.image_names)) if (str(self.image_names[i]) in split_ids)]
self.image_names = [self.image_names[i] for i in keep_inds]
self.labels = [self.labels[i] for i in keep_inds]
with open(class_map_file, 'r') as f:
map_dict = json.load(f)
self.label2Idx = map_dict['label2Idx']
self.idx2Label = map_dict['idx2Label']
def __len__(self):
return len(self.image_names)
def __getitem__(self, index):
img_path = os.path.join(self.img_folder, (str(self.image_names[index]) + '.jpg'))
image = Image.open(img_path).convert('RGB')
image = self.img_transforms(image)
targets = self.label2Idx[self.labels[index]]
if self.one_hot_labels:
targets = torch.tensor(makeOneHotVec(targets, len(self.idx2Label.keys())), dtype=torch.long)
return {'image': image, 'label': targets}
|
class CombinedImageDataset(torch.utils.data.IterableDataset):
def __init__(self, ds_list, prob_list):
super(CombinedImageDataset, self).__init__()
self.ds_list = ds_list
self.prob_list = prob_list
def __iter__(self):
while True:
dsi = choices(list(range(len(self.ds_list))), self.prob_list)[0]
ds = self.ds_list[dsi]
dse = int((random.random() * len(ds)))
val = ds.__getitem__(dse)
(yield val)
|
class SilverMultilabelImageDataset(torch.utils.data.Dataset):
def __init__(self, id_list_path=None, silver_id_list_path_ignores=None, K=150, P=1, csv='../../metadata/screenclassification/silver_webui-multi_topic.csv', img_folder=(os.environ['SM_CHANNEL_TRAINING'] if ('SM_CHANNEL_TRAINING' in os.environ) else '../../downloads/ds'), img_size=128, one_hot_labels=False, ra_num_ops=(- 1), ra_magnitude=(- 1)):
super(SilverMultilabelImageDataset, self).__init__()
with open(csv, 'r') as file:
first_line = file.readline()
num_classes = (len(first_line.split(',')) - 1)
self.num_classes = num_classes
self.one_hot_labels = one_hot_labels
self.K = K
self.P = P
self.csv = pd.read_csv(csv, names=(['screenshot_path'] + [('class_' + str(i)) for i in range(num_classes)]))
for i in range(num_classes):
self.csv[('class_' + str(i))] = self.csv[('class_' + str(i))].astype(dtype='float')
self.img_folder = img_folder
img_transforms = [transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
if ((ra_num_ops > 0) and (ra_magnitude > 0)):
img_transforms = ([transforms.RandAugment(ra_num_ops, ra_magnitude)] + img_transforms)
self.img_transforms = transforms.Compose(img_transforms)
print('total csv rows', len(self.csv.index))
if (id_list_path is not None):
with open(id_list_path, 'r') as f:
split_ids = set(json.load(f))
self.csv['split_id'] = self.csv['screenshot_path'].str.replace('\\', '/')
self.csv = self.csv[self.csv['split_id'].str.contains('/')]
self.csv['split_id'] = self.csv['split_id'].str.split('/').str[0]
self.csv = self.csv[self.csv['split_id'].isin(split_ids)]
self.csv = self.csv.reset_index(drop=True)
print('filtered csv rows', len(self.csv.index))
if (silver_id_list_path_ignores is not None):
all_ignores = set()
for ignore_path in silver_id_list_path_ignores:
with open(ignore_path, 'r') as f:
all_ignores |= set(json.load(f))
self.csv = self.csv[self.csv['screenshot_path'].str.contains('.')]
self.csv['split_id'] = self.csv['screenshot_path'].str.split('.').str[0]
self.csv = self.csv[(~ self.csv['split_id'].isin(all_ignores))]
self.csv = self.csv.reset_index(drop=True)
print('filtered csv rows2', len(self.csv.index))
keep_inds = []
for i in range(num_classes):
keep_inds.extend(list(self.csv.nlargest(K, ('class_' + str(i))).index.values))
keep_inds = set(keep_inds)
df_mat = self.csv[[('class_' + str(i)) for i in range(num_classes)]]
image_names = []
image_labels = []
for i in keep_inds:
if one_hot_labels:
image_names.append(self.csv.iloc[i]['screenshot_path'])
image_labels.append(torch.tensor(df_mat.iloc[i].to_numpy()))
else:
idxs = np.argsort(df_mat.iloc[i].to_numpy(), axis=(- 1))[(- P):]
image_name = self.csv.iloc[i]['screenshot_path']
for idx in idxs:
image_names.append(image_name)
image_labels.append(idx)
self.image_names = image_names
self.labels = image_labels
self.class_counter = Counter(self.labels)
def __len__(self):
return len(self.image_names)
def __getitem__(self, index):
index = (index % len(self.image_names))
def tryAnother():
return self.__getitem__((index + 1))
try:
img_path = os.path.join(self.img_folder, str(self.image_names[index])).replace('\\', '/')
image = Image.open(img_path).convert('RGB')
image = self.img_transforms(image)
targets = self.labels[index]
return {'image': image, 'label': targets}
except:
return tryAnother()
|
class SilverDataModule(pl.LightningDataModule):
def __init__(self, batch_size=16, num_workers=0, silver_id_list_path=None, silver_id_list_path_ignores=None, ra_num_ops=2, ra_magnitude=9, P=1, K=150, silver_csv='../../metadata/screenclassification/silver_webui-multi_topic.csv', img_folder='../../downloads/ds'):
super(SilverDataModule, self).__init__()
self.batch_size = batch_size
self.num_workers = num_workers
ds1 = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_train_ids.json', one_hot_labels=True, ra_num_ops=ra_num_ops, ra_magnitude=ra_magnitude)
ds2 = SilverMultilabelImageDataset(csv=silver_csv, img_folder=img_folder, id_list_path=silver_id_list_path, silver_id_list_path_ignores=silver_id_list_path_ignores, P=P, K=K, one_hot_labels=True, ra_num_ops=ra_num_ops, ra_magnitude=ra_magnitude)
combined_ds = CombinedImageDataset([ds1, ds2], [(1 / 15), (14 / 15)])
self.train_dataset = combined_ds
self.val_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_val_ids.json')
self.test_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_test_ids.json')
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_silver_multi)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_silver)
def test_dataloader(self):
return torch.utils.data.DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_silver)
|
class EnricoDataModule(pl.LightningDataModule):
def __init__(self, batch_size=16, num_workers=4, img_size=128, ra_num_ops=(- 1), ra_magnitude=(- 1)):
super(EnricoDataModule, self).__init__()
self.batch_size = batch_size
self.num_workers = num_workers
self.train_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_train_ids.json', ra_num_ops=ra_num_ops, ra_magnitude=ra_magnitude, img_size=img_size)
self.val_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_val_ids.json', img_size=img_size)
self.test_dataset = EnricoImageDataset(id_list_path='../../metadata/screenclassification/filtered_test_ids.json', img_size=img_size)
def train_dataloader(self):
samples_weight = torch.tensor([(1 / self.train_dataset.class_counter[t]) for t in self.train_dataset.labels])
sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weight, len(samples_weight))
return torch.utils.data.DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size, sampler=sampler, collate_fn=collate_fn_enrico)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_enrico)
def test_dataloader(self):
return torch.utils.data.DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size, collate_fn=collate_fn_enrico)
|
class UIScreenClassifier(pl.LightningModule):
def __init__(self, num_classes=20, dropout_block=0.0, dropout=0.2, lr=5e-05, soft_labels=True, stochastic_depth_p=0.2, arch='resnet50'):
super(UIScreenClassifier, self).__init__()
self.save_hyperparameters()
if ((arch == 'resnet50') or (arch == 'resnet50_conv')):
model = models.resnet50(pretrained=False)
replace_default_bn_with_custom(model, dropout=dropout_block)
replace_res_blocks_with_stochastic(model, stochastic_depth_p=stochastic_depth_p)
model.fc = nn.Sequential(nn.Dropout(dropout), nn.Linear(model.fc.in_features, num_classes))
self.model = model
self.conv_cls = nn.Sequential(nn.InstanceNorm2d(2048), nn.Dropout2d(dropout), nn.Conv2d(2048, num_classes, 3, stride=1, padding=1))
elif (arch == 'vgg16'):
model = models.vgg16_bn(pretrained=False, dropout=dropout)
replace_default_bn_with_custom(model, dropout=dropout_block)
model.classifier[(- 1)] = nn.Linear(4096, num_classes)
self.model = model
def forward(self, image):
if ((self.hparams.arch == 'resnet50') or (self.hparams.arch == 'vgg16')):
return self.model(image)
elif (self.hparams.arch == 'resnet50_conv'):
x = self.model.conv1(image)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.conv_cls(x)
batch_size = x.shape[0]
res = x.view(batch_size, self.hparams.num_classes, (- 1)).mean(dim=(- 1))
return res
def training_step(self, batch, batch_idx):
image = batch['image']
labels = batch['label']
outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))]
out = torch.cat(outs, dim=0)
if (len(labels.shape) == 2):
if self.hparams.soft_labels:
loss = F.cross_entropy(out, labels.float())
else:
loss = F.binary_cross_entropy_with_logits(out, labels)
else:
loss = F.cross_entropy(out, labels)
return loss
def validation_step(self, batch, batch_idx):
image = batch['image']
labels = batch['label']
outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))]
out = torch.cat(outs, dim=0)
if (len(labels.shape) == 2):
return (out, labels)
else:
(_, inds) = out.max(dim=(- 1))
return (inds, labels)
def validation_epoch_end(self, outputs):
all_outs = torch.cat([o[0] for o in outputs], dim=0)
all_labels = torch.cat([o[1] for o in outputs], dim=0)
if (len(all_labels.shape) == 2):
bce_score = F.binary_cross_entropy_with_logits(all_outs, all_labels)
score_dict = {'bce': bce_score}
print(score_dict)
self.log_dict(score_dict)
else:
all_outs = all_outs.detach().cpu().long().numpy()
all_labels = all_labels.detach().cpu().long().numpy()
macro_score = f1_score(all_labels, all_outs, average='macro')
micro_score = f1_score(all_labels, all_outs, average='micro')
weighted_score = f1_score(all_labels, all_outs, average='weighted')
score_dict = {'f1_macro': macro_score, 'f1_micro': micro_score, 'f1_weighted': weighted_score}
print(score_dict)
self.log_dict(score_dict)
def test_step(self, batch, batch_idx):
image = batch['image']
labels = batch['label']
outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))]
out = torch.cat(outs, dim=0)
if (len(labels.shape) == 2):
return (out, labels)
else:
(_, inds) = out.max(dim=(- 1))
return (inds, labels)
def test_epoch_end(self, outputs):
all_outs = torch.cat([o[0] for o in outputs], dim=0)
all_labels = torch.cat([o[1] for o in outputs], dim=0)
if (len(all_labels.shape) == 2):
bce_score = F.binary_cross_entropy_with_logits(all_outs, all_labels)
score_dict = {'bce': bce_score}
print(score_dict)
self.log_dict(score_dict)
else:
all_outs = all_outs.detach().cpu().long().numpy()
all_labels = all_labels.detach().cpu().long().numpy()
macro_score = f1_score(all_labels, all_outs, average='macro')
micro_score = f1_score(all_labels, all_outs, average='micro')
weighted_score = f1_score(all_labels, all_outs, average='weighted')
score_dict = {'f1_macro': macro_score, 'f1_micro': micro_score, 'f1_weighted': weighted_score}
print(score_dict)
return score_dict
def configure_optimizers(self):
optimizer = torch.optim.AdamW([p for p in self.parameters() if p.requires_grad], lr=self.hparams.lr)
return optimizer
|
class UIScreenSegmenter(pl.LightningModule):
def __init__(self, num_classes=20):
super(UIScreenSegmenter, self).__init__()
self.save_hyperparameters()
model = models.resnet50(pretrained=False, norm_layer=nn.InstanceNorm2d)
model.fc = nn.Linear(model.fc.in_features, num_classes)
self.model = model
self.decoder = nn.Sequential(nn.InstanceNorm2d(2048), nn.Upsample(scale_factor=2), nn.Conv2d(2048, 1024, 3, stride=1, padding=1), nn.InstanceNorm2d(1024), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(1024, 512, 3, stride=1, padding=1), nn.InstanceNorm2d(512), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(512, 256, 3, stride=1, padding=1), nn.InstanceNorm2d(256), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(256, 128, 3, stride=1, padding=1), nn.InstanceNorm2d(128), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(128, 64, 3, stride=1, padding=1), nn.InstanceNorm2d(64), nn.ReLU(), nn.Conv2d(64, num_classes, 3, stride=1, padding=1))
def encode(self, img):
x = self.model.conv1(img)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
return x
def decode(self, x, out_size):
x = self.decoder(x)
return F.interpolate(x, size=out_size, mode='bilinear', align_corners=False)
def forward(self, x):
return self.decode(self.encode(x), (x.shape[(- 2)], x.shape[(- 1)]))
def training_step(self, batch, batch_idx):
image = batch['image']
segmentation = batch['segmentation']
outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))]
losses = [F.binary_cross_entropy_with_logits(outs[i], segmentation[i].unsqueeze(0)) for i in range(len(outs))]
loss = torch.stack(losses).mean()
sch = self.lr_schedulers()
if (sch is not None):
sch.step()
return loss
def validation_step(self, batch, batch_idx):
image = batch['image']
segmentation = batch['segmentation']
outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))]
losses = [F.binary_cross_entropy_with_logits(outs[i], segmentation[i].unsqueeze(0)) for i in range(len(outs))]
loss = torch.stack(losses).mean()
return loss
def validation_epoch_end(self, outputs):
bce_score = torch.stack(outputs).mean()
score_dict = {'bce': bce_score}
print(score_dict)
self.log_dict(score_dict)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=0.001, momentum=0.9, nesterov=True, weight_decay=0.0001)
lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1)
return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
|
class StochasticBasicBlock(nn.Module):
def __init__(self, m, stochastic_depth_p=0.2, stochastic_depth_mode='row'):
super(StochasticBasicBlock, self).__init__()
self.m = m
self.sd = StochasticDepth(stochastic_depth_p, mode=stochastic_depth_mode)
def forward(self, x):
identity = x
out = self.m.conv1(x)
out = self.m.bn1(out)
out = self.m.relu(out)
out = self.m.conv2(out)
out = self.m.bn2(out)
out = self.sd(out)
if (self.m.downsample is not None):
identity = self.m.downsample(x)
out += identity
out = self.m.relu(out)
return out
|
class StochasticBottleneck(nn.Module):
def __init__(self, m, stochastic_depth_p=0.2, stochastic_depth_mode='row'):
super(StochasticBottleneck, self).__init__()
self.m = m
self.sd = StochasticDepth(stochastic_depth_p, mode=stochastic_depth_mode)
def forward(self, x):
identity = x
out = self.m.conv1(x)
out = self.m.bn1(out)
out = self.m.relu(out)
out = self.m.conv2(out)
out = self.m.bn2(out)
out = self.m.relu(out)
out = self.m.conv3(out)
out = self.m.bn3(out)
out = self.sd(out)
if (self.m.downsample is not None):
identity = self.m.downsample(x)
out += identity
out = self.m.relu(out)
return out
|
class CustomNormAndDropout(nn.Module):
def __init__(self, num_features, dropout):
super(CustomNormAndDropout, self).__init__()
self.norm = nn.InstanceNorm2d(num_features)
self.dropout = nn.Dropout2d(dropout)
def forward(self, x):
x = self.norm(x)
x = self.dropout(x)
return x
|
def replace_default_bn_with_custom(model, dropout=0.0):
for (child_name, child) in model.named_children():
if isinstance(child, nn.BatchNorm2d):
setattr(model, child_name, CustomNormAndDropout(child.num_features, dropout))
else:
replace_default_bn_with_custom(child, dropout)
|
def replace_default_bn_with_in(model):
for (child_name, child) in model.named_children():
if isinstance(child, nn.BatchNorm2d):
setattr(model, child_name, nn.InstanceNorm2d(child.num_features))
else:
replace_default_bn_with_in(child)
|
def replace_res_blocks_with_stochastic(model, stochastic_depth_p=0.2, stochastic_depth_mode='row'):
all_blocks = []
def get_blocks(model, blocks):
for (child_name, child) in model.named_children():
if isinstance(child, BasicBlock):
blocks.append((child_name, StochasticBasicBlock))
elif isinstance(child, Bottleneck):
blocks.append((child_name, StochasticBottleneck))
else:
get_blocks(child, blocks)
get_blocks(model, all_blocks)
p_alphas = (torch.linspace(0, 1, len(all_blocks)) * stochastic_depth_p)
for bi in range(len(all_blocks)):
setattr(model, all_blocks[bi][0], all_blocks[bi][1](p_alphas[bi], stochastic_depth_mode))
|
class UIElementDetector(pl.LightningModule):
def __init__(self, num_classes=25, min_size=320, max_size=640, use_multi_head=True, lr=0.0001, val_weights=None, test_weights=None, arch='fcos'):
super(UIElementDetector, self).__init__()
self.save_hyperparameters()
if (arch == 'fcos'):
model = torchvision.models.detection.fcos_resnet50_fpn(min_size=min_size, max_size=max_size, num_classes=num_classes, trainable_backbone_layers=5)
if use_multi_head:
multi_head = FCOSMultiHead(model.backbone.out_channels, model.anchor_generator.num_anchors_per_location()[0], num_classes)
model.head = multi_head
elif (arch == 'ssd'):
model = torchvision.models.detection.ssd300_vgg16(num_classes=num_classes, trainable_backbone_layers=5)
self.model = model
def training_step(self, batch, batch_idx):
(images, targets) = batch
images = list((image for image in images))
targets = [{k: v for (k, v) in t.items()} for t in targets]
loss_dict = self.model(images, targets)
loss = sum((loss for loss in loss_dict.values()))
self.log_dict({'loss': float(loss)})
return loss
def validation_step(self, batch, batch_idx):
(images, targets) = batch
images = list((image for image in images))
targets = [{k: v for (k, v) in t.items()} for t in targets]
outputs = self.model(images)
preds = []
gts = []
for batch_i in range(len(outputs)):
batch_len = outputs[batch_i]['boxes'].shape[0]
pred_box = outputs[batch_i]['boxes']
pred_score = outputs[batch_i]['scores']
pred_label = outputs[batch_i]['labels']
preds.append(torch.cat((pred_box, pred_label.unsqueeze((- 1)), pred_score.unsqueeze((- 1))), dim=(- 1)))
gtsi = []
target_len = targets[batch_i]['boxes'].shape[0]
for i in range(target_len):
target_box = targets[batch_i]['boxes'][i]
target_label = targets[batch_i]['labels'][i]
if (len(target_label.shape) == 1):
for ci in range(target_label.shape[0]):
if (target_label[ci] > 0):
gtsi.append(torch.cat((target_box, torch.tensor([ci, 0, 0], device=target_box.device)), dim=(- 1)))
else:
gtsi.append(torch.cat((target_box, torch.tensor([target_label, 0, 0], device=target_box.device)), dim=(- 1)))
gts.append((torch.stack(gtsi) if (len(gtsi) > 0) else torch.zeros(0, 7, device=self.device)))
return (preds, gts)
def validation_epoch_end(self, outputs):
metric_fn = MetricBuilder.build_evaluation_metric('map_2d', async_mode=True, num_classes=self.hparams.num_classes)
for batch_output in outputs:
for i in range(len(batch_output[0])):
metric_fn.add(batch_output[0][i].detach().cpu().numpy(), batch_output[1][i].detach().cpu().numpy())
metrics = metric_fn.value(iou_thresholds=0.5)
print(np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]]))
if (self.hparams.val_weights is None):
mapscore = metrics['mAP']
else:
weights = np.array(self.hparams.val_weights)
aps = np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]])
mapscore = (aps * weights).sum()
self.log_dict({'mAP': mapscore})
def test_step(self, batch, batch_idx):
(images, targets) = batch
images = list((image for image in images))
targets = [{k: v for (k, v) in t.items()} for t in targets]
outputs = self.model(images)
preds = []
gts = []
for batch_i in range(len(outputs)):
batch_len = outputs[batch_i]['boxes'].shape[0]
pred_box = outputs[batch_i]['boxes']
pred_score = outputs[batch_i]['scores']
pred_label = outputs[batch_i]['labels']
preds.append(torch.cat((pred_box, pred_label.unsqueeze((- 1)), pred_score.unsqueeze((- 1))), dim=(- 1)))
gtsi = []
target_len = targets[batch_i]['boxes'].shape[0]
for i in range(target_len):
target_box = targets[batch_i]['boxes'][i]
target_label = targets[batch_i]['labels'][i]
if (len(target_label.shape) == 1):
for ci in range(target_label.shape[0]):
if (target_label[ci] > 0):
gtsi.append(torch.cat((target_box, torch.tensor([ci, 0, 0], device=target_box.device)), dim=(- 1)))
else:
gtsi.append(torch.cat((target_box, torch.tensor([target_label, 0, 0], device=target_box.device)), dim=(- 1)))
gts.append((torch.stack(gtsi) if (len(gtsi) > 0) else torch.zeros(0, 7, device=self.device)))
return (preds, gts)
def test_epoch_end(self, outputs):
metric_fn = MetricBuilder.build_evaluation_metric('map_2d', async_mode=True, num_classes=self.hparams.num_classes)
for batch_output in outputs:
for i in range(len(batch_output[0])):
metric_fn.add(batch_output[0][i].detach().cpu().numpy(), batch_output[1][i].detach().cpu().numpy())
metrics = metric_fn.value(iou_thresholds=0.5)
print(np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]]))
if (self.hparams.test_weights is None):
mapscore = metrics['mAP']
else:
weights = np.array(self.hparams.test_weights)
aps = np.array([metrics[0.5][c]['ap'] for c in metrics[0.5]])
mapscore = (aps * weights).sum()
self.log_dict({'mAP': mapscore})
def configure_optimizers(self):
return torch.optim.SGD(filter((lambda p: p.requires_grad), self.parameters()), lr=self.hparams.lr)
|
def random_viewport_from_full(height, w, h):
h1 = int((random.random() * (h - height)))
h2 = (h1 + height)
viewport = (0, h1, w, h2)
return viewport
|
def random_viewport_pair_from_full(img_full, height_ratio):
img_pil = Image.open(img_full).convert('RGB')
(w, h) = img_pil.size
height = int((w * height_ratio))
viewport1 = random_viewport_from_full(height, w, h)
vh1 = viewport1[1]
delta = (int((random.random() * (2 * height))) - height)
vh2 = (vh1 + delta)
vh2 = min(max(0, vh2), (h - height))
viewport2 = (0, vh2, w, (vh2 + height))
view1 = img_pil.crop(viewport1)
view2 = img_pil.copy().crop(viewport2)
return (view1, view2)
|
class WebUISimilarityDataset(torch.utils.data.IterableDataset):
def __init__(self, split_file='../../downloads/train_split_web350k.json', root_dir='../../downloads/ds', domain_map_file='../../metadata/screensim/domain_map.json', duplicate_map_file='../../metadata/screensim/duplicate_map.json', device_name='iPhone-13 Pro', scroll_height_ratio=2.164, img_size=(256, 128), uda_dir='../../downloads/rico/combined', uda_ignore_id_files=['../../metadata/screenclassification/filtered_train_ids.json', '../../metadata/screenclassification/filtered_val_ids.json', '../../metadata/screenclassification/filtered_test_ids.json']):
super(WebUISimilarityDataset, self).__init__()
self.root_dir = root_dir
self.device_name = device_name
self.scroll_height_ratio = scroll_height_ratio
with open(split_file, 'r') as f:
split_list = json.load(f)
split_set = set([str(s) for s in split_list])
with open(domain_map_file, 'r') as f:
self.domain_map = json.load(f)
self.domain_list = []
for dn in tqdm(self.domain_map):
if (all([(url[1] in split_set) for url in self.domain_map[dn]]) and (len(set([u[0] for u in self.domain_map[dn]])) > 1)):
self.domain_list.append(dn)
with open(duplicate_map_file, 'r') as f:
self.duplicate_map = json.load(f)
self.duplicate_list = []
for dn in tqdm(self.duplicate_map):
if all([(url in split_set) for url in self.duplicate_map[dn]]):
self.duplicate_list.append(dn)
self.img_transforms = transforms.Compose([transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
ignore_ids = set()
for ignore_file in uda_ignore_id_files:
with open(ignore_file, 'r') as f:
ignore_file_ids = set(json.load(f))
ignore_ids |= ignore_file_ids
self.uda_dir = uda_dir
self.uda_files = [f for f in os.listdir(uda_dir) if (f.endswith('.jpg') and (f.replace('.jpg', '') not in ignore_ids))]
def sample_same_scroll(self):
try:
random_domain = random.choice(self.domain_list)
domain_urls = self.domain_map[random_domain]
crawl_id = random.choice(domain_urls)[1]
screenshot_full_path = os.path.join(self.root_dir, crawl_id, (self.device_name + '-screenshot-full.webp'))
(pil_img1, pil_img2) = random_viewport_pair_from_full(screenshot_full_path, self.scroll_height_ratio)
return (pil_img1, pil_img2)
except:
return self.sample_same_scroll()
def sample_same_screen(self):
try:
random_duplicate = random.choice(self.duplicate_list)
sampled_screens = random.sample(self.duplicate_map[random_duplicate], 2)
img1_path = os.path.join(self.root_dir, sampled_screens[0], (self.device_name + '-screenshot.webp'))
img2_path = os.path.join(self.root_dir, sampled_screens[1], (self.device_name + '-screenshot.webp'))
pil_img1 = Image.open(img1_path).convert('RGB')
pil_img2 = Image.open(img2_path).convert('RGB')
return (pil_img1, pil_img2)
except:
return self.sample_same_screen()
def sample_same_domain(self):
try:
random_domain = random.choice(self.domain_list)
url1 = random.choice(self.domain_map[random_domain])
candidates = [u for u in self.domain_map[random_domain] if (u[0] != url1[0])]
url2 = random.choice(candidates)
img1_path = os.path.join(self.root_dir, url1[1], (self.device_name + '-screenshot.webp'))
img2_path = os.path.join(self.root_dir, url2[1], (self.device_name + '-screenshot.webp'))
pil_img1 = Image.open(img1_path).convert('RGB')
pil_img2 = Image.open(img2_path).convert('RGB')
return (pil_img1, pil_img2)
except:
return self.sample_same_domain()
def sample_different_domain(self):
try:
sampled_domains = random.sample(self.domain_list, 2)
domain1 = sampled_domains[0]
domain2 = sampled_domains[1]
url1 = random.choice(self.domain_map[domain1])
url2 = random.choice(self.domain_map[domain2])
img1_path = os.path.join(self.root_dir, url1[1], (self.device_name + '-screenshot.webp'))
img2_path = os.path.join(self.root_dir, url2[1], (self.device_name + '-screenshot.webp'))
pil_img1 = Image.open(img1_path).convert('RGB')
pil_img2 = Image.open(img2_path).convert('RGB')
return (pil_img1, pil_img2)
except:
return self.sample_different_domain()
def sample_uda_img(self):
try:
img_path = os.path.join(self.uda_dir, random.choice(self.uda_files))
return Image.open(img_path).convert('RGB')
except:
return self.sample_uda_img()
def __iter__(self):
while True:
probs = [0.25, 0.25, 0.25, 0.25]
funcs = [self.sample_same_scroll, self.sample_same_screen, self.sample_same_domain, self.sample_different_domain]
si = choices(list(range(len(funcs))), probs)[0]
func = funcs[si]
res = func()
label = (si < 2)
(yield {'label': label, 'image1': self.img_transforms(res[0]), 'image2': self.img_transforms(res[1]), 'imageuda1': self.img_transforms(self.sample_uda_img()), 'imageuda2': self.img_transforms(self.sample_uda_img())})
|
class WebUISimilarityDataModule(pl.LightningDataModule):
def __init__(self, batch_size=16, num_workers=4, split_file='../../downloads/train_split_web350k.json', root_dir='../../downloads/ds', domain_map_file='../../metadata/screensim/domain_map.json', duplicate_map_file='../../metadata/screensim/duplicate_map.json', device_name='iPhone-13 Pro', scroll_height_ratio=2.164, img_size=128):
super(WebUISimilarityDataModule, self).__init__()
self.batch_size = batch_size
self.num_workers = num_workers
self.split_file = split_file
self.train_dataset = WebUISimilarityDataset(split_file=split_file)
self.val_dataset = WebUISimilarityDataset(split_file='../../downloads/val_split_webui.json')
self.test_dataset = WebUISimilarityDataset(split_file='../../downloads/test_split_webui.json')
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size)
def test_dataloader(self):
return torch.utils.data.DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size)
|
class UIScreenEmbedder(pl.LightningModule):
def __init__(self, hidden_size=256, lr=5e-05, margin_pos=0.2, margin_neg=0.5, lambda_dann=1):
super(UIScreenEmbedder, self).__init__()
self.save_hyperparameters()
model = models.resnet18(pretrained=False)
replace_default_bn_with_in(model)
model.fc = nn.Linear(model.fc.in_features, hidden_size)
self.model = model
self.classifier = nn.Sequential(RevGrad(), nn.Linear(model.fc.in_features, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 1))
def forward_uda(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
image1 = batch['image1']
image2 = batch['image2']
imageuda1 = batch['imageuda1']
imageuda2 = batch['imageuda2']
labels = batch['label']
outs1 = self.model(image1)
outs2 = self.model(image2)
batch_size = image1.shape[0]
delta = (outs1 - outs2)
dist = torch.linalg.norm(delta, dim=(- 1))
losses = torch.zeros(batch_size, device=self.device)
losses[labels] = (dist[labels] - self.hparams.margin_pos).clamp(min=0)
losses[(~ labels)] = (self.hparams.margin_neg - dist[(~ labels)]).clamp(min=0)
loss_sim = losses.mean()
if (self.hparams.lambda_dann == 0):
loss = loss_sim
metrics = {'loss': loss}
self.log_dict(metrics)
return loss
else:
cls_pred_outs1 = self.forward_uda(image1)
cls_pred_outs2 = self.forward_uda(image2)
cls_pred_outsuda1 = self.forward_uda(imageuda1)
cls_pred_outsuda2 = self.forward_uda(imageuda2)
cls_pred = torch.cat((cls_pred_outs1, cls_pred_outs2, cls_pred_outsuda1, cls_pred_outsuda2), dim=0).squeeze((- 1))
cls_label = torch.cat((torch.ones((batch_size * 2), device=self.device), torch.zeros((batch_size * 2), device=self.device)), dim=0)
loss_cls = F.binary_cross_entropy_with_logits(cls_pred, cls_label)
loss = (loss_sim + (self.hparams.lambda_dann * loss_cls))
metrics = {'loss': loss, 'loss_sim': loss_sim, 'loss_cls': loss_cls}
self.log_dict(metrics)
return loss
def validation_step(self, batch, batch_idx):
image1 = batch['image1']
image2 = batch['image2']
imageuda1 = batch['imageuda1']
imageuda2 = batch['imageuda2']
labels = batch['label']
outs1 = self.model(image1)
outs2 = self.model(image2)
batch_size = image1.shape[0]
delta = (outs1 - outs2)
dist = torch.linalg.norm(delta, dim=(- 1))
thresh = (0.5 * (self.hparams.margin_pos + self.hparams.margin_neg))
preds = (dist < thresh)
if (self.hparams.lambda_dann == 0):
return (preds, labels)
else:
cls_pred_outs1 = self.forward_uda(image1)
cls_pred_outs2 = self.forward_uda(image2)
cls_pred_outsuda1 = self.forward_uda(imageuda1)
cls_pred_outsuda2 = self.forward_uda(imageuda2)
cls_pred = (torch.cat((cls_pred_outs1, cls_pred_outs2, cls_pred_outsuda1, cls_pred_outsuda2), dim=0).squeeze((- 1)) > 0)
cls_label = torch.cat((torch.ones((batch_size * 2), device=self.device), torch.zeros((batch_size * 2), device=self.device)), dim=0)
return (preds, labels, cls_pred, cls_label)
def validation_epoch_end(self, outputs):
all_outs = torch.cat([o[0] for o in outputs], dim=0)
all_labels = torch.cat([o[1] for o in outputs], dim=0)
score = f1_score(all_labels.detach().cpu().numpy(), all_outs.detach().cpu().numpy())
if (self.hparams.lambda_dann == 0):
metrics = {'f1': score}
self.log_dict(metrics)
else:
all_outs_uda = torch.cat([o[2] for o in outputs], dim=0)
all_labels_uda = torch.cat([o[3] for o in outputs], dim=0)
score_uda = f1_score(all_labels_uda.detach().cpu().numpy(), all_outs_uda.detach().cpu().numpy())
metrics = {'f1': score, 'f1_uda': score_uda}
self.log_dict(metrics)
def configure_optimizers(self):
optimizer = torch.optim.AdamW([p for p in self.parameters() if p.requires_grad], lr=self.hparams.lr)
return optimizer
|
def replace_default_bn_with_in(model):
for (child_name, child) in model.named_children():
if isinstance(child, nn.BatchNorm2d):
setattr(model, child_name, nn.InstanceNorm2d(child.num_features))
else:
replace_default_bn_with_in(child)
|
class DailyDialogParser():
def __init__(self, path, sos, eos, eou):
self.path = path
self.sos = sos
self.eos = eos
self.eou = eou
def get_dialogs(self):
train_dialogs = self.process_file((self.path + 'train.txt'))
validation_dialogs = self.process_file((self.path + 'validation.txt'))
test_dialogs = self.process_file((self.path + 'test.txt'))
return (train_dialogs, validation_dialogs, test_dialogs)
def process_file(self, path):
with open(path, 'r') as f:
data = f.readlines()
print('Parsing', path)
return [self.process_raw_dialog(line) for line in data]
def process_raw_dialog(self, raw_dialog):
raw_utterances = raw_dialog.split('__eou__')
return [self.process_raw_utterance(raw_utterance) for raw_utterance in raw_utterances if (not raw_utterance.isspace())]
def process_raw_utterance(self, raw_utterance):
raw_sentences = nltk.sent_tokenize(raw_utterance)
utterence = []
for raw_sentence in raw_sentences:
utterence.extend(self.process_raw_sentence(raw_sentence))
return (utterence + [self.eou])
def process_raw_sentence(self, raw_sentence):
raw_sentence = raw_sentence.lower()
raw_sentence = raw_sentence.split()
return (([self.sos] + raw_sentence) + [self.eos])
|
class DPCollator():
def __init__(self, pad_token, reply_length=None):
self.pad_token = pad_token
self.reply_length = reply_length
def __call__(self, batch):
(contexts, replies) = zip(*batch)
padded_contexts = self.pad(contexts)
padded_replies = self.pad(replies, self.reply_length)
return (padded_contexts, padded_replies)
def pad(self, data, length=None):
max_length = length
if (max_length is None):
max_length = max([len(row) for row in data])
padded_data = []
for row in data:
padding = ([self.pad_token] * (max_length - len(row)))
padded_data.append((list(row) + padding))
return LongTensor(padded_data)
|
class DPCorpus(object):
SOS = '<s>'
EOS = '</s>'
EOU = '</u>'
PAD = '<pad>'
UNK = '<unk>'
def __init__(self, dialog_parser=None, vocabulary_limit=None):
if (dialog_parser is None):
path = (os.path.dirname(os.path.realpath(__file__)) + '/daily_dialog/')
dialog_parser = DailyDialogParser(path, self.SOS, self.EOS, self.EOU)
(self.train_dialogs, self.validation_dialogs, self.test_dialogs) = dialog_parser.get_dialogs()
print('Building vocabulary')
self.build_vocab(vocabulary_limit)
if (vocabulary_limit is not None):
print('Replacing out of vocabulary from train dialogs by unk token.')
self.limit_dialogs_to_vocabulary(self.train_dialogs)
print('Replacing out of vocabulary from validation dialogs by unk token.')
self.limit_dialogs_to_vocabulary(self.validation_dialogs)
print('Replacing out of vocabulary from test dialogs by unk token.')
self.limit_dialogs_to_vocabulary(self.test_dialogs)
def build_vocab(self, vocabulary_limit):
special_tokens = [self.PAD, self.UNK]
all_words = self.flatten_dialogs(self.train_dialogs)
vocabulary_counter = Counter(all_words)
if (vocabulary_limit is not None):
vocabulary_counter = vocabulary_counter.most_common((vocabulary_limit - len(special_tokens)))
else:
vocabulary_counter = vocabulary_counter.most_common()
self.vocabulary = (special_tokens + [token for (token, _) in vocabulary_counter])
self.token_ids = {token: index for (index, token) in enumerate(self.vocabulary)}
def flatten_dialogs(self, dialogs):
all_words = []
for dialog in dialogs:
for utterance in dialog:
all_words.extend(utterance)
return all_words
def limit_dialogs_to_vocabulary(self, dialogs):
for (d_i, dialog) in enumerate(dialogs):
for (u_i, utterance) in enumerate(dialog):
for (t_i, token) in enumerate(utterance):
if (token not in self.vocabulary):
dialogs[d_i][u_i][t_i] = self.UNK
def utterance_to_ids(self, utterance):
utterance_ids = []
for token in utterance:
utterance_ids.append(self.token_ids.get(token, self.token_ids[self.UNK]))
return utterance_ids
def dialogs_to_ids(self, data):
data_ids = []
for dialog in data:
dialog_ids = []
for utterance in dialog:
dialog_ids.append(self.utterance_to_ids(utterance))
data_ids.append(dialog_ids)
return data_ids
def ids_to_tokens(self, ids):
padding_id = self.token_ids[self.PAD]
return [self.vocabulary[id] for id in ids if (id != padding_id)]
def token_to_id(self, token):
return self.token_ids[token]
def get_train_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None):
return self.get_dataset(self.train_dialogs, context_size, min_reply_length, max_reply_length)
def get_validation_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None):
return self.get_dataset(self.validation_dialogs, context_size, min_reply_length, max_reply_length)
def get_test_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None):
return self.get_dataset(self.test_dialogs, context_size, min_reply_length, max_reply_length)
def get_dataset(self, dialogs, context_size, min_reply_length, max_reply_length):
dialogs_ids = self.dialogs_to_ids(dialogs)
return DPDataset(self, dialogs_ids, context_size, min_reply_length, max_reply_length)
def get_collator(self, reply_length=None):
return DPCollator(self.token_ids[self.PAD], reply_length=reply_length)
|
class DPDataLoader(DataLoader):
def __init__(self, dataset, batch_size=64):
if (dataset == None):
corpus = DPCorpus(vocabulary_limit=5000)
dataset = corpus.get_train_dataset(2, 5, 20)
collator = dataset.corpus.get_collator(reply_length=20)
super().__init__(dataset, batch_size=batch_size, collate_fn=collator, shuffle=True, drop_last=True)
|
class DPDataset(Dataset):
def __init__(self, corpus, dialogs, context_size=2, min_reply_length=None, max_reply_length=None):
self.corpus = corpus
self.contexts = []
self.replies = []
for dialog in dialogs:
max_start_i = (len(dialog) - context_size)
for start_i in range(max_start_i):
reply = dialog[(start_i + context_size)]
context = []
for i in range(start_i, (start_i + context_size)):
context.extend(dialog[i])
if (((min_reply_length is None) or (len(reply) >= min_reply_length)) and ((max_reply_length is None) or (len(reply) <= max_reply_length))):
self.contexts.append(context)
self.replies.append(reply)
def __len__(self):
return len(self.contexts)
def __getitem__(self, item):
context = self.contexts[item]
replies = self.replies[item]
return (LongTensor(context), LongTensor(replies))
|
class Discriminator(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, gpu=False, dropout=0.2, device='cpu'):
super(Discriminator, self).__init__()
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.max_seq_len = max_seq_len
self.device = device
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout)
self.gru2hidden = nn.Linear(((2 * 2) * hidden_dim), hidden_dim)
self.dropout_linear = nn.Dropout(p=dropout)
self.embeddings2 = nn.Embedding(vocab_size, embedding_dim)
self.gru2 = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout)
self.gru2hidden2 = nn.Linear(((2 * 2) * hidden_dim), hidden_dim)
self.dropout_linear2 = nn.Dropout(p=dropout)
self.hidden2out = nn.Linear((2 * hidden_dim), 1)
def init_hidden(self, batch_size):
h = autograd.Variable(torch.zeros(((2 * 2) * 1), batch_size, self.hidden_dim)).to(self.device)
return h
def forward(self, reply, context, hidden, hidden2):
emb = self.embeddings(reply)
emb = emb.permute(1, 0, 2)
(_, hidden) = self.gru(emb, hidden)
hidden = hidden.permute(1, 0, 2).contiguous()
out = self.gru2hidden(hidden.view((- 1), (4 * self.hidden_dim)))
out = torch.tanh(out)
out_reply = self.dropout_linear(out)
emb = self.embeddings2(context)
emb = emb.permute(1, 0, 2)
(_, hidden) = self.gru2(emb, hidden2)
hidden = hidden.permute(1, 0, 2).contiguous()
out = self.gru2hidden2(hidden.view((- 1), (4 * self.hidden_dim)))
out = torch.tanh(out)
out_context = self.dropout_linear2(out)
out = self.hidden2out(torch.cat((out_reply, out_context), 1))
out = torch.sigmoid(out)
return out
def batchClassify(self, reply, context):
'\n Classifies a batch of sequences.\n Inputs: inp\n - inp: batch_size x seq_len\n Returns: out\n - out: batch_size ([0,1] score)\n '
h = self.init_hidden(reply.size()[0])
h2 = self.init_hidden(context.size()[0])
out = self.forward(reply.long(), context.long(), h, h2)
return out.view((- 1))
def batchBCELoss(self, inp, target):
'\n Returns Binary Cross Entropy Loss for discriminator.\n Inputs: inp, target\n - inp: batch_size x seq_len\n - target: batch_size (binary 1/0)\n '
loss_fn = nn.BCELoss()
h = self.init_hidden(inp.size()[0])
out = self.forward(inp, h)
return loss_fn(out, target)
|
def greedy_match(fileone, filetwo, w2v):
res1 = greedy_score(fileone, filetwo, w2v)
res2 = greedy_score(filetwo, fileone, w2v)
res_sum = ((res1 + res2) / 2.0)
return (np.mean(res_sum), ((1.96 * np.std(res_sum)) / float(len(res_sum))), np.std(res_sum))
|
def greedy_score(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
dim = w2v.layer1_size
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = np.zeros((dim,))
y_count = 0
x_count = 0
o = 0.0
Y = np.zeros((dim, 1))
for tok in tokens2:
Y = np.hstack((Y, w2v[tok].detach().cpu().numpy().reshape((dim, 1))))
y_count += 1
for tok in tokens1:
tmp = w2v[tok].detach().cpu().numpy().reshape((1, dim)).dot(Y)
o += np.max(tmp)
x_count += 1
if ((x_count < 1) or (y_count < 1)):
scores.append(0)
continue
o /= float(x_count)
scores.append(o)
return np.asarray(scores)
|
def extrema_score(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = []
for tok in tokens1:
X.append(w2v[tok].detach().cpu().numpy())
Y = []
for tok in tokens2:
Y.append(w2v[tok].detach().cpu().numpy())
if (np.linalg.norm(X) < 1e-11):
continue
if (np.linalg.norm(Y) < 1e-11):
scores.append(0)
continue
xmax = np.max(X, 0)
xmin = np.min(X, 0)
xtrema = []
for i in range(len(xmax)):
if (np.abs(xmin[i]) > xmax[i]):
xtrema.append(xmin[i])
else:
xtrema.append(xmax[i])
X = np.array(xtrema)
ymax = np.max(Y, 0)
ymin = np.min(Y, 0)
ytrema = []
for i in range(len(ymax)):
if (np.abs(ymin[i]) > ymax[i]):
ytrema.append(ymin[i])
else:
ytrema.append(ymax[i])
Y = np.array(ytrema)
o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y))
scores.append(o)
scores = np.asarray(scores)
return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
|
def average(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
dim = w2v.layer1_size
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = np.zeros((dim,))
for tok in tokens1:
X += w2v[tok].detach().cpu().numpy()
Y = np.zeros((dim,))
for tok in tokens2:
Y += w2v[tok].detach().cpu().numpy()
if (np.linalg.norm(X) < 1e-11):
continue
if (np.linalg.norm(Y) < 1e-11):
scores.append(0)
continue
X = (np.array(X) / np.linalg.norm(X))
Y = (np.array(Y) / np.linalg.norm(Y))
o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y))
scores.append(o)
scores = np.asarray(scores)
return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
|
def prepare_discriminator_data(pos_samples, neg_samples, gpu=False):
'\n Takes positive (target) samples, negative (generator) samples and prepares inp and target data for discriminator.\n\n Inputs: pos_samples, neg_samples\n - pos_samples: pos_size x seq_len\n - neg_samples: neg_size x seq_len\n\n Returns: inp, target\n - inp: (pos_size + neg_size) x seq_len\n - target: pos_size + neg_size (boolean 1/0)\n '
inp = torch.cat((pos_samples, neg_samples), 0).type(torch.LongTensor)
target = torch.ones((pos_samples.size()[0] + neg_samples.size()[0]))
target[pos_samples.size()[0]:] = 0
perm = torch.randperm(target.size()[0])
target = target[perm]
inp = inp[perm]
inp = Variable(inp)
target = Variable(target)
if gpu:
inp = inp.cuda()
target = target.cuda()
return (inp, target)
|
def load_data(path='dataset.pickle'):
'\n Load data set\n '
if (not os.path.isfile(path)):
corpus = DPCorpus(vocabulary_limit=VOCAB_SIZE)
train_dataset = corpus.get_train_dataset(min_reply_length=MIN_SEQ_LEN, max_reply_length=MAX_SEQ_LEN)
with open(path, 'wb') as handle:
pickle.dump(train_dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)
train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE)
else:
with open(path, 'rb') as handle:
train_dataset = pickle.load(handle)
train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE)
return train_data_loader
|
class ReplayMemory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
def push(self, transition):
if (len(self.memory) == self.capacity):
del self.memory[0]
self.memory.append(transition)
def push_batch(self, transition):
if (len(self.memory) == self.capacity):
del self.memory[0]
self.memory.append(transition)
def sample(self, batch_size):
random_ints = np.random.randint(0, len(self.memory), size=batch_size)
sample = [self.memory[random_int] for random_int in random_ints]
return sample
def __len__(self):
return len(self.memory)
|
class Attention(nn.Module):
'\n Applies an attention mechanism on the output features from the decoder.\n\n .. math::\n \\begin{array}{ll}\n x = context*output \\\\\n attn = exp(x_i) / sum_j exp(x_j) \\\\\n output = \\tanh(w * (attn * context) + b * output)\n \\end{array}\n\n Args:\n dim(int): The number of expected features in the output\n\n Inputs: output, context\n - **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.\n - **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.\n\n Outputs: output, attn\n - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.\n - **attn** (batch, output_len, input_len): tensor containing attention weights.\n\n Attributes:\n linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.\n mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.\n\n Examples::\n\n >>> attention = seq2seq.models.Attention(256)\n >>> context = Variable(torch.randn(5, 3, 256))\n >>> output = Variable(torch.randn(5, 5, 256))\n >>> output, attn = attention(output, context)\n\n '
def __init__(self, dim):
super(Attention, self).__init__()
self.linear_out = nn.Linear((dim * 2), dim)
self.mask = None
def set_mask(self, mask):
'\n Sets indices to be masked\n\n Args:\n mask (torch.Tensor): tensor containing indices to be masked\n '
self.mask = mask
def forward(self, output, context):
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
attn = torch.bmm(output, context.transpose(1, 2))
if (self.mask is not None):
attn.data.masked_fill_(self.mask, (- float('inf')))
attn = F.softmax(attn.view((- 1), input_size), dim=1).view(batch_size, (- 1), input_size)
mix = torch.bmm(attn, context)
combined = torch.cat((mix, output), dim=2)
output = torch.tanh(self.linear_out(combined.view((- 1), (2 * hidden_size)))).view(batch_size, (- 1), hidden_size)
return (output, attn)
|
class BaseRNN(nn.Module):
"\n Applies a multi-layer RNN to an input sequence.\n Note:\n Do not use this class directly, use one of the sub classes.\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): maximum allowed length for the sequence to be processed\n hidden_size (int): number of features in the hidden state `h`\n input_dropout_p (float): dropout probability for the input sequence\n dropout_p (float): dropout probability for the output sequence\n n_layers (int): number of recurrent layers\n rnn_cell (str): type of RNN cell (Eg. 'LSTM' , 'GRU')\n\n Inputs: ``*args``, ``**kwargs``\n - ``*args``: variable length argument list.\n - ``**kwargs``: arbitrary keyword arguments.\n\n Attributes:\n SYM_MASK: masking symbol\n SYM_EOS: end-of-sequence symbol\n "
SYM_MASK = 'MASK'
SYM_EOS = 'EOS'
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.input_dropout = nn.Dropout(p=input_dropout_p)
if (rnn_cell.lower() == 'lstm'):
self.rnn_cell = nn.LSTM
elif (rnn_cell.lower() == 'gru'):
self.rnn_cell = nn.GRU
else:
raise ValueError('Unsupported RNN Cell: {0}'.format(rnn_cell))
self.dropout_p = dropout_p
def forward(self, *args, **kwargs):
raise NotImplementedError()
|
class EncoderRNN(BaseRNN):
'\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): a maximum allowed length for the sequence to be processed\n hidden_size (int): the number of features in the hidden state `h`\n input_dropout_p (float, optional): dropout probability for the input sequence (default: 0)\n dropout_p (float, optional): dropout probability for the output sequence (default: 0)\n n_layers (int, optional): number of recurrent layers (default: 1)\n bidirectional (bool, optional): if True, becomes a bidirectional encodr (defulat False)\n rnn_cell (str, optional): type of RNN cell (default: gru)\n variable_lengths (bool, optional): if use variable length RNN (default: False)\n embedding (torch.Tensor, optional): Pre-trained embedding. The size of the tensor has to match\n the size of the embedding parameter: (vocab_size, hidden_size). The embedding layer would be initialized\n with the tensor if provided (default: None).\n update_embedding (bool, optional): If the embedding should be updated during training (default: False).\n\n Inputs: inputs, input_lengths\n - **inputs**: list of sequences, whose length is the batch size and within which each sequence is a list of token IDs.\n - **input_lengths** (list of int, optional): list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n\n Outputs: output, hidden\n - **output** (batch, seq_len, hidden_size): tensor containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the features in the hidden state `h`\n\n Examples::\n\n >>> encoder = EncoderRNN(input_vocab, max_seq_length, hidden_size)\n >>> output, hidden = encoder(input)\n\n '
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p=0, dropout_p=0, n_layers=1, bidirectional=False, rnn_cell='gru', variable_lengths=False, embedding=None, update_embedding=True):
super(EncoderRNN, self).__init__(vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell)
self.variable_lengths = variable_lengths
self.embedding = nn.Embedding(vocab_size, hidden_size)
if (embedding is not None):
self.embedding.weight = nn.Parameter(embedding)
self.embedding.weight.requires_grad = update_embedding
self.rnn = self.rnn_cell(hidden_size, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout=dropout_p)
def forward(self, input_var, input_lengths=None):
'\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n input_var (batch, seq_len): tensor containing the features of the input sequence.\n input_lengths (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch\n\n Returns: output, hidden\n - **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): variable containing the features in the hidden state h\n '
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
(output, hidden) = self.rnn(embedded)
if self.variable_lengths:
(output, _) = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
return (output, hidden)
|
class Seq2seq(nn.Module):
' Standard sequence-to-sequence architecture with configurable encoder\n and decoder.\n\n Args:\n encoder (EncoderRNN): object of EncoderRNN\n decoder (DecoderRNN): object of DecoderRNN\n decode_function (func, optional): function to generate symbols from output hidden states (default: F.log_softmax)\n\n Inputs: input_variable, input_lengths, target_variable, teacher_forcing_ratio\n - **input_variable** (list, option): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the encoder.\n - **input_lengths** (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n - **target_variable** (list, optional): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the decoder.\n - **teacher_forcing_ratio** (int, optional): The probability that teacher forcing will be used. A random number\n is drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0)\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the\n outputs of the decoder.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers\n representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of\n predicted token IDs, *KEY_INPUT* : target outputs if provided for decoding, *KEY_ATTN_SCORE* : list of\n sequences, where each list is of attention weights }.\n\n '
def __init__(self, encoder, decoder, decode_function=F.log_softmax):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.decode_function = decode_function
def flatten_parameters(self):
self.encoder.rnn.flatten_parameters()
self.decoder.rnn.flatten_parameters()
def forward(self, input_variable, input_lengths=None, target_variable=None, teacher_forcing_ratio=0, sample=False):
(encoder_outputs, encoder_hidden) = self.encoder(input_variable, input_lengths)
result = self.decoder(inputs=target_variable, encoder_hidden=encoder_hidden, encoder_outputs=encoder_outputs, function=self.decode_function, teacher_forcing_ratio=teacher_forcing_ratio, sample=sample)
return result
|
class DailyDialogParser():
def __init__(self, path, sos, eos, eou):
self.path = path
self.sos = sos
self.eos = eos
self.eou = eou
def get_dialogs(self):
train_dialogs = self.process_file((self.path + 'train.txt'))
validation_dialogs = self.process_file((self.path + 'validation.txt'))
test_dialogs = self.process_file((self.path + 'test.txt'))
return (train_dialogs, validation_dialogs, test_dialogs)
def process_file(self, path):
with open(path, 'r') as f:
data = f.readlines()
print('Parsing', path)
return [self.process_raw_dialog(line) for line in data]
def process_raw_dialog(self, raw_dialog):
raw_utterances = raw_dialog.split('__eou__')
return [self.process_raw_utterance(raw_utterance) for raw_utterance in raw_utterances if (not raw_utterance.isspace())]
def process_raw_utterance(self, raw_utterance):
raw_sentences = nltk.sent_tokenize(raw_utterance)
utterence = []
for raw_sentence in raw_sentences:
utterence.extend(self.process_raw_sentence(raw_sentence))
return (utterence + [self.eou])
def process_raw_sentence(self, raw_sentence):
raw_sentence = raw_sentence.lower()
raw_sentence = raw_sentence.split()
return (([self.sos] + raw_sentence) + [self.eos])
|
class DPCollator():
def __init__(self, pad_token, reply_length=None):
self.pad_token = pad_token
self.reply_length = reply_length
def __call__(self, batch):
(contexts, replies) = zip(*batch)
padded_contexts = self.pad(contexts)
padded_replies = self.pad(replies, self.reply_length)
return (padded_contexts, padded_replies)
def pad(self, data, length=None):
max_length = length
if (max_length is None):
max_length = max([len(row) for row in data])
padded_data = []
for row in data:
padding = ([self.pad_token] * (max_length - len(row)))
padded_data.append((list(row) + padding))
return LongTensor(padded_data)
|
class DPCorpus(object):
SOS = '<s>'
EOS = '</s>'
EOU = '</u>'
PAD = '<pad>'
UNK = '<unk>'
def __init__(self, dialog_parser=None, vocabulary_limit=None):
if (dialog_parser is None):
path = (os.path.dirname(os.path.realpath(__file__)) + '/daily_dialog/')
dialog_parser = DailyDialogParser(path, self.SOS, self.EOS, self.EOU)
(self.train_dialogs, self.validation_dialogs, self.test_dialogs) = dialog_parser.get_dialogs()
print('Building vocabulary')
self.build_vocab(vocabulary_limit)
if (vocabulary_limit is not None):
print('Replacing out of vocabulary from train dialogs by unk token.')
self.limit_dialogs_to_vocabulary(self.train_dialogs)
print('Replacing out of vocabulary from validation dialogs by unk token.')
self.limit_dialogs_to_vocabulary(self.validation_dialogs)
print('Replacing out of vocabulary from test dialogs by unk token.')
self.limit_dialogs_to_vocabulary(self.test_dialogs)
def build_vocab(self, vocabulary_limit):
special_tokens = [self.PAD, self.UNK]
all_words = self.flatten_dialogs(self.train_dialogs)
vocabulary_counter = Counter(all_words)
if (vocabulary_limit is not None):
vocabulary_counter = vocabulary_counter.most_common((vocabulary_limit - len(special_tokens)))
else:
vocabulary_counter = vocabulary_counter.most_common()
self.vocabulary = (special_tokens + [token for (token, _) in vocabulary_counter])
self.token_ids = {token: index for (index, token) in enumerate(self.vocabulary)}
def flatten_dialogs(self, dialogs):
all_words = []
for dialog in dialogs:
for utterance in dialog:
all_words.extend(utterance)
return all_words
def limit_dialogs_to_vocabulary(self, dialogs):
for (d_i, dialog) in enumerate(dialogs):
for (u_i, utterance) in enumerate(dialog):
for (t_i, token) in enumerate(utterance):
if (token not in self.vocabulary):
dialogs[d_i][u_i][t_i] = self.UNK
def utterance_to_ids(self, utterance):
utterance_ids = []
for token in utterance:
utterance_ids.append(self.token_ids.get(token, self.token_ids[self.UNK]))
return utterance_ids
def dialogs_to_ids(self, data):
data_ids = []
for dialog in data:
dialog_ids = []
for utterance in dialog:
dialog_ids.append(self.utterance_to_ids(utterance))
data_ids.append(dialog_ids)
return data_ids
def ids_to_tokens(self, ids):
padding_id = self.token_ids[self.PAD]
return [self.vocabulary[id] for id in ids if (id != padding_id)]
def token_to_id(self, token):
return self.token_ids[token]
def get_train_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None):
return self.get_dataset(self.train_dialogs, context_size, min_reply_length, max_reply_length)
def get_validation_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None):
return self.get_dataset(self.validation_dialogs, context_size, min_reply_length, max_reply_length)
def get_test_dataset(self, context_size=2, min_reply_length=None, max_reply_length=None):
return self.get_dataset(self.test_dialogs, context_size, min_reply_length, max_reply_length)
def get_dataset(self, dialogs, context_size, min_reply_length, max_reply_length):
dialogs_ids = self.dialogs_to_ids(dialogs)
return DPDataset(self, dialogs_ids, context_size, min_reply_length, max_reply_length)
def get_collator(self, reply_length=None):
return DPCollator(self.token_ids[self.PAD], reply_length=reply_length)
|
class DPDataLoader(DataLoader):
def __init__(self, dataset, batch_size=64):
if (dataset == None):
corpus = DPCorpus(vocabulary_limit=5000)
dataset = corpus.get_train_dataset(2, 5, 20)
collator = dataset.corpus.get_collator(reply_length=20)
super().__init__(dataset, batch_size=batch_size, collate_fn=collator, shuffle=True, drop_last=True)
|
class DPDataset(Dataset):
def __init__(self, corpus, dialogs, context_size=2, min_reply_length=None, max_reply_length=None):
self.corpus = corpus
self.contexts = []
self.replies = []
for dialog in dialogs:
max_start_i = (len(dialog) - context_size)
for start_i in range(max_start_i):
reply = dialog[(start_i + context_size)]
context = []
for i in range(start_i, (start_i + context_size)):
context.extend(dialog[i])
if (((min_reply_length is None) or (len(reply) >= min_reply_length)) and ((max_reply_length is None) or (len(reply) <= max_reply_length))):
self.contexts.append(context)
self.replies.append(reply)
def __len__(self):
return len(self.contexts)
def __getitem__(self, item):
context = self.contexts[item]
replies = self.replies[item]
return (LongTensor(context), LongTensor(replies))
|
class Discriminator(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, gpu=False, dropout=0.2, device='cpu'):
super(Discriminator, self).__init__()
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.max_seq_len = max_seq_len
self.device = device
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout)
self.gru2hidden = nn.Linear(((2 * 2) * hidden_dim), hidden_dim)
self.dropout_linear = nn.Dropout(p=dropout)
self.embeddings2 = nn.Embedding(vocab_size, embedding_dim)
self.gru2 = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout)
self.gru2hidden2 = nn.Linear(((2 * 2) * hidden_dim), hidden_dim)
self.dropout_linear2 = nn.Dropout(p=dropout)
self.hidden2out = nn.Linear((2 * hidden_dim), 1)
def init_hidden(self, batch_size):
h = autograd.Variable(torch.zeros(((2 * 2) * 1), batch_size, self.hidden_dim)).to(self.device)
return h
def forward(self, reply, context, hidden, hidden2):
emb = self.embeddings(reply)
emb = emb.permute(1, 0, 2)
(_, hidden) = self.gru(emb, hidden)
hidden = hidden.permute(1, 0, 2).contiguous()
out = self.gru2hidden(hidden.view((- 1), (4 * self.hidden_dim)))
out = torch.tanh(out)
out_reply = self.dropout_linear(out)
emb = self.embeddings2(context)
emb = emb.permute(1, 0, 2)
(_, hidden) = self.gru2(emb, hidden2)
hidden = hidden.permute(1, 0, 2).contiguous()
out = self.gru2hidden2(hidden.view((- 1), (4 * self.hidden_dim)))
out = torch.tanh(out)
out_context = self.dropout_linear2(out)
out = self.hidden2out(torch.cat((out_reply, out_context), 1))
out = torch.sigmoid(out)
return out
def batchClassify(self, reply, context):
'\n Classifies a batch of sequences.\n Inputs: inp\n - inp: batch_size x seq_len\n Returns: out\n - out: batch_size ([0,1] score)\n '
h = self.init_hidden(reply.size()[0])
h2 = self.init_hidden(context.size()[0])
out = self.forward(reply.long(), context.long(), h, h2)
return out.view((- 1))
def batchBCELoss(self, inp, target):
'\n Returns Binary Cross Entropy Loss for discriminator.\n Inputs: inp, target\n - inp: batch_size x seq_len\n - target: batch_size (binary 1/0)\n '
loss_fn = nn.BCELoss()
h = self.init_hidden(inp.size()[0])
out = self.forward(inp, h)
return loss_fn(out, target)
|
def greedy_match(fileone, filetwo, w2v):
res1 = greedy_score(fileone, filetwo, w2v)
res2 = greedy_score(filetwo, fileone, w2v)
res_sum = ((res1 + res2) / 2.0)
return (np.mean(res_sum), ((1.96 * np.std(res_sum)) / float(len(res_sum))), np.std(res_sum))
|
def greedy_score(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
dim = w2v.layer1_size
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = np.zeros((dim,))
y_count = 0
x_count = 0
o = 0.0
Y = np.zeros((dim, 1))
for tok in tokens2:
Y = np.hstack((Y, w2v[tok].detach().cpu().numpy().reshape((dim, 1))))
y_count += 1
for tok in tokens1:
tmp = w2v[tok].detach().cpu().numpy().reshape((1, dim)).dot(Y)
o += np.max(tmp)
x_count += 1
if ((x_count < 1) or (y_count < 1)):
scores.append(0)
continue
o /= float(x_count)
scores.append(o)
return np.asarray(scores)
|
def extrema_score(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = []
for tok in tokens1:
X.append(w2v[tok].detach().cpu().numpy())
Y = []
for tok in tokens2:
Y.append(w2v[tok].detach().cpu().numpy())
if (np.linalg.norm(X) < 1e-11):
continue
if (np.linalg.norm(Y) < 1e-11):
scores.append(0)
continue
xmax = np.max(X, 0)
xmin = np.min(X, 0)
xtrema = []
for i in range(len(xmax)):
if (np.abs(xmin[i]) > xmax[i]):
xtrema.append(xmin[i])
else:
xtrema.append(xmax[i])
X = np.array(xtrema)
ymax = np.max(Y, 0)
ymin = np.min(Y, 0)
ytrema = []
for i in range(len(ymax)):
if (np.abs(ymin[i]) > ymax[i]):
ytrema.append(ymin[i])
else:
ytrema.append(ymax[i])
Y = np.array(ytrema)
o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y))
scores.append(o)
scores = np.asarray(scores)
return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
|
def average(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
dim = w2v.layer1_size
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = np.zeros((dim,))
for tok in tokens1:
X += w2v[tok].detach().cpu().numpy()
Y = np.zeros((dim,))
for tok in tokens2:
Y += w2v[tok].detach().cpu().numpy()
if (np.linalg.norm(X) < 1e-11):
continue
if (np.linalg.norm(Y) < 1e-11):
scores.append(0)
continue
X = (np.array(X) / np.linalg.norm(X))
Y = (np.array(Y) / np.linalg.norm(Y))
o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y))
scores.append(o)
scores = np.asarray(scores)
return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
|
def prepare_discriminator_data(pos_samples, neg_samples, gpu=False):
'\n Takes positive (target) samples, negative (generator) samples and prepares inp and target data for discriminator.\n\n Inputs: pos_samples, neg_samples\n - pos_samples: pos_size x seq_len\n - neg_samples: neg_size x seq_len\n\n Returns: inp, target\n - inp: (pos_size + neg_size) x seq_len\n - target: pos_size + neg_size (boolean 1/0)\n '
inp = torch.cat((pos_samples, neg_samples), 0).type(torch.LongTensor)
target = torch.ones((pos_samples.size()[0] + neg_samples.size()[0]))
target[pos_samples.size()[0]:] = 0
perm = torch.randperm(target.size()[0])
target = target[perm]
inp = inp[perm]
inp = Variable(inp)
target = Variable(target)
if gpu:
inp = inp.cuda()
target = target.cuda()
return (inp, target)
|
def load_data(path='dataset.pickle'):
'\n Load data set\n '
if (not os.path.isfile(path)):
corpus = DPCorpus(vocabulary_limit=VOCAB_SIZE)
train_dataset = corpus.get_train_dataset(min_reply_length=MIN_SEQ_LEN, max_reply_length=MAX_SEQ_LEN)
with open(path, 'wb') as handle:
pickle.dump(train_dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)
train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE)
else:
with open(path, 'rb') as handle:
train_dataset = pickle.load(handle)
train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE)
return train_data_loader
|
class ReplayMemory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
def push(self, transition):
if (len(self.memory) == self.capacity):
del self.memory[0]
self.memory.append(transition)
def push_batch(self, transition):
if (len(self.memory) == self.capacity):
del self.memory[0]
self.memory.append(transition)
def sample(self, batch_size):
random_ints = np.random.randint(0, len(self.memory), size=batch_size)
sample = [self.memory[random_int] for random_int in random_ints]
return sample
def __len__(self):
return len(self.memory)
|
class Attention(nn.Module):
'\n Applies an attention mechanism on the output features from the decoder.\n\n .. math::\n \\begin{array}{ll}\n x = context*output \\\\\n attn = exp(x_i) / sum_j exp(x_j) \\\\\n output = \\tanh(w * (attn * context) + b * output)\n \\end{array}\n\n Args:\n dim(int): The number of expected features in the output\n\n Inputs: output, context\n - **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.\n - **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.\n\n Outputs: output, attn\n - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.\n - **attn** (batch, output_len, input_len): tensor containing attention weights.\n\n Attributes:\n linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.\n mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.\n\n Examples::\n\n >>> attention = seq2seq.models.Attention(256)\n >>> context = Variable(torch.randn(5, 3, 256))\n >>> output = Variable(torch.randn(5, 5, 256))\n >>> output, attn = attention(output, context)\n\n '
def __init__(self, dim):
super(Attention, self).__init__()
self.linear_out = nn.Linear((dim * 2), dim)
self.mask = None
def set_mask(self, mask):
'\n Sets indices to be masked\n\n Args:\n mask (torch.Tensor): tensor containing indices to be masked\n '
self.mask = mask
def forward(self, output, context):
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
attn = torch.bmm(output, context.transpose(1, 2))
if (self.mask is not None):
attn.data.masked_fill_(self.mask, (- float('inf')))
attn = F.softmax(attn.view((- 1), input_size), dim=1).view(batch_size, (- 1), input_size)
mix = torch.bmm(attn, context)
combined = torch.cat((mix, output), dim=2)
output = torch.tanh(self.linear_out(combined.view((- 1), (2 * hidden_size)))).view(batch_size, (- 1), hidden_size)
return (output, attn)
|
class BaseRNN(nn.Module):
"\n Applies a multi-layer RNN to an input sequence.\n Note:\n Do not use this class directly, use one of the sub classes.\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): maximum allowed length for the sequence to be processed\n hidden_size (int): number of features in the hidden state `h`\n input_dropout_p (float): dropout probability for the input sequence\n dropout_p (float): dropout probability for the output sequence\n n_layers (int): number of recurrent layers\n rnn_cell (str): type of RNN cell (Eg. 'LSTM' , 'GRU')\n\n Inputs: ``*args``, ``**kwargs``\n - ``*args``: variable length argument list.\n - ``**kwargs``: arbitrary keyword arguments.\n\n Attributes:\n SYM_MASK: masking symbol\n SYM_EOS: end-of-sequence symbol\n "
SYM_MASK = 'MASK'
SYM_EOS = 'EOS'
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.input_dropout = nn.Dropout(p=input_dropout_p)
if (rnn_cell.lower() == 'lstm'):
self.rnn_cell = nn.LSTM
elif (rnn_cell.lower() == 'gru'):
self.rnn_cell = nn.GRU
else:
raise ValueError('Unsupported RNN Cell: {0}'.format(rnn_cell))
self.dropout_p = dropout_p
def forward(self, *args, **kwargs):
raise NotImplementedError()
|
class EncoderRNN(BaseRNN):
'\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): a maximum allowed length for the sequence to be processed\n hidden_size (int): the number of features in the hidden state `h`\n input_dropout_p (float, optional): dropout probability for the input sequence (default: 0)\n dropout_p (float, optional): dropout probability for the output sequence (default: 0)\n n_layers (int, optional): number of recurrent layers (default: 1)\n bidirectional (bool, optional): if True, becomes a bidirectional encodr (defulat False)\n rnn_cell (str, optional): type of RNN cell (default: gru)\n variable_lengths (bool, optional): if use variable length RNN (default: False)\n embedding (torch.Tensor, optional): Pre-trained embedding. The size of the tensor has to match\n the size of the embedding parameter: (vocab_size, hidden_size). The embedding layer would be initialized\n with the tensor if provided (default: None).\n update_embedding (bool, optional): If the embedding should be updated during training (default: False).\n\n Inputs: inputs, input_lengths\n - **inputs**: list of sequences, whose length is the batch size and within which each sequence is a list of token IDs.\n - **input_lengths** (list of int, optional): list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n\n Outputs: output, hidden\n - **output** (batch, seq_len, hidden_size): tensor containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the features in the hidden state `h`\n\n Examples::\n\n >>> encoder = EncoderRNN(input_vocab, max_seq_length, hidden_size)\n >>> output, hidden = encoder(input)\n\n '
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p=0, dropout_p=0, n_layers=1, bidirectional=False, rnn_cell='gru', variable_lengths=False, embedding=None, update_embedding=True):
super(EncoderRNN, self).__init__(vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell)
self.variable_lengths = variable_lengths
self.embedding = nn.Embedding(vocab_size, hidden_size)
if (embedding is not None):
self.embedding.weight = nn.Parameter(embedding)
self.embedding.weight.requires_grad = update_embedding
self.rnn = self.rnn_cell(hidden_size, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout=dropout_p)
def forward(self, input_var, input_lengths=None):
'\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n input_var (batch, seq_len): tensor containing the features of the input sequence.\n input_lengths (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch\n\n Returns: output, hidden\n - **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): variable containing the features in the hidden state h\n '
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
(output, hidden) = self.rnn(embedded)
if self.variable_lengths:
(output, _) = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
return (output, hidden)
|
class Seq2seq(nn.Module):
' Standard sequence-to-sequence architecture with configurable encoder\n and decoder.\n\n Args:\n encoder (EncoderRNN): object of EncoderRNN\n decoder (DecoderRNN): object of DecoderRNN\n decode_function (func, optional): function to generate symbols from output hidden states (default: F.log_softmax)\n\n Inputs: input_variable, input_lengths, target_variable, teacher_forcing_ratio\n - **input_variable** (list, option): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the encoder.\n - **input_lengths** (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n - **target_variable** (list, optional): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the decoder.\n - **teacher_forcing_ratio** (int, optional): The probability that teacher forcing will be used. A random number\n is drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0)\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the\n outputs of the decoder.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers\n representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of\n predicted token IDs, *KEY_INPUT* : target outputs if provided for decoding, *KEY_ATTN_SCORE* : list of\n sequences, where each list is of attention weights }.\n\n '
def __init__(self, encoder, decoder, decode_function=F.log_softmax):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.decode_function = decode_function
def flatten_parameters(self):
self.encoder.rnn.flatten_parameters()
self.decoder.rnn.flatten_parameters()
def forward(self, input_variable, input_lengths=None, target_variable=None, teacher_forcing_ratio=0, sample=False):
(encoder_outputs, encoder_hidden) = self.encoder(input_variable, input_lengths)
result = self.decoder(inputs=target_variable, encoder_hidden=encoder_hidden, encoder_outputs=encoder_outputs, function=self.decode_function, teacher_forcing_ratio=teacher_forcing_ratio, sample=sample)
return result
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.eval_options):
raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if (samples_per_gpu > 1):
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max([ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if (samples_per_gpu > 1):
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(rank, _) = get_dist_info()
if ((args.work_dir is not None) and (rank == 0)):
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
if ('custom_hooks' in cfg):
for hook in cfg.custom_hooks:
if (hook.type == 'FisherPruningHook'):
hook_cfg = hook.copy()
hook_cfg.pop('priority', None)
from mmcv.runner.hooks import HOOKS
hook_cls = HOOKS.get(hook_cfg['type'])
if hasattr(hook_cls, 'after_build_model'):
pruning_hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
pruning_hook.after_build_model(model)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((args.work_dir is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file (deprecate), change to --cfg-options instead.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(_, world_size) = get_dist_info()
cfg.gpu_ids = range(world_size)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
if (args.seed is not None):
logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
model.init_weights()
if ('custom_hooks' in cfg):
for hook in cfg.custom_hooks:
if (hook.type == 'FisherPruningHook'):
hook_cfg = hook.copy()
hook_cfg.pop('priority', None)
from mmcv.runner.hooks import HOOKS
hook_cls = HOOKS.get(hook_cfg['type'])
if hasattr(hook_cls, 'after_build_model'):
pruning_hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
pruning_hook.after_build_model(model)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=(__version__ + get_git_hash()[:7]), CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
|
class BMAML():
def __init__(self, dim_input, dim_output, dim_hidden=32, num_layers=4, num_particles=2, max_test_step=5):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.num_particles = num_particles
self.follow_lr = tf.placeholder_with_default(input=FLAGS.follow_lr, name='follow_lr', shape=[])
self.leader_lr = tf.placeholder_with_default(input=FLAGS.leader_lr, name='leader_lr', shape=[])
self.meta_lr = tf.placeholder_with_default(input=FLAGS.meta_lr, name='meta_lr', shape=[])
self.max_test_step = max_test_step
self.bnn = BNN(dim_input=self.dim_input, dim_output=self.dim_output, dim_hidden=self.dim_hidden, num_layers=self.num_layers, is_bnn=True)
self.construct_network_weights = self.bnn.construct_network_weights
self.forward_network = self.bnn.forward_network
self.follow_x = tf.placeholder(dtype=tf.float32, name='follow_x')
self.follow_y = tf.placeholder(dtype=tf.float32, name='follow_y')
self.leader_x = tf.placeholder(dtype=tf.float32, name='leader_x')
self.leader_y = tf.placeholder(dtype=tf.float32, name='leader_y')
self.valid_x = tf.placeholder(dtype=tf.float32, name='valid_x')
self.valid_y = tf.placeholder(dtype=tf.float32, name='valid_y')
self.W_network_particles = None
def construct_model(self, is_training=True):
print('start model construction')
with tf.variable_scope('model', reuse=None) as training_scope:
if (is_training or (self.W_network_particles is None)):
self.W_network_particles = [self.construct_network_weights(scope='network{}'.format(p_idx)) for p_idx in range(self.num_particles)]
else:
training_scope.reuse_variables()
if is_training:
max_follow_step = FLAGS.follow_step
else:
max_follow_step = max(FLAGS.follow_step, self.max_test_step)
def fast_learn_one_task(inputs):
[follow_x, leader_x, valid_x, follow_y, leader_y, valid_y] = inputs
WW_follow = [OrderedDict(zip(W_dic.keys(), W_dic.values())) for W_dic in self.W_network_particles]
[step_follow_weight_var, step_follow_data_var, step_follow_train_llik, step_follow_valid_llik, step_follow_train_loss, step_follow_valid_loss, step_follow_train_pred, step_follow_valid_pred, step_follow_weight_lprior, step_follow_gamma_lprior, step_follow_lambda_lprior, step_follow_lpost, step_follow_kernel_h, WW_follow] = self.update_particle(train_x=follow_x, train_y=follow_y, valid_x=valid_x, valid_y=valid_y, WW=WW_follow, num_updates=max_follow_step, lr=self.follow_lr)
WW_leader = [OrderedDict(zip(W_dic.keys(), W_dic.values())) for W_dic in WW_follow]
[step_leader_weight_var, step_leader_data_var, step_leader_train_llik, step_leader_valid_llik, step_leader_train_loss, step_leader_valid_loss, step_leader_train_pred, step_leader_valid_pred, step_leader_weight_lprior, step_leader_gamma_lprior, step_leader_lambda_lprior, step_leader_lpost, step_leader_kernel_h, WW_leader] = self.update_particle(train_x=leader_x, train_y=leader_y, valid_x=valid_x, valid_y=valid_y, WW=WW_leader, num_updates=FLAGS.leader_step, lr=self.leader_lr)
meta_loss = []
for p_idx in range(self.num_particles):
p_dist_list = []
for name in WW_leader[p_idx].keys():
if ('log' in name):
continue
p_dist = tf.square((WW_follow[p_idx][name] - tf.stop_gradient(WW_leader[p_idx][name])))
p_dist = tf.reduce_sum(p_dist)
p_dist_list.append(p_dist)
meta_loss.append(tf.reduce_sum(p_dist_list))
meta_loss = tf.reduce_sum(meta_loss)
return [step_follow_weight_lprior, step_follow_gamma_lprior, step_follow_lambda_lprior, step_follow_train_llik, step_follow_valid_llik, step_follow_train_loss, step_follow_valid_loss, step_follow_train_pred, step_follow_valid_pred, step_follow_weight_var, step_follow_data_var, step_follow_lpost, step_follow_kernel_h, step_leader_weight_lprior, step_leader_gamma_lprior, step_leader_lambda_lprior, step_leader_train_llik, step_leader_valid_llik, step_leader_train_loss, step_leader_valid_loss, step_leader_train_pred, step_leader_valid_pred, step_leader_weight_var, step_leader_data_var, step_leader_lpost, step_leader_kernel_h, meta_loss]
out_dtype = [([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * max_follow_step), ([tf.float32] * max_follow_step), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * FLAGS.leader_step), ([tf.float32] * FLAGS.leader_step), tf.float32]
result = tf.map_fn(fast_learn_one_task, elems=[self.follow_x, self.leader_x, self.valid_x, self.follow_y, self.leader_y, self.valid_y], dtype=out_dtype, parallel_iterations=FLAGS.num_tasks)
[full_step_follow_weight_lprior, full_step_follow_gamma_lprior, full_step_follow_lambda_lprior, full_step_follow_train_llik, full_step_follow_valid_llik, full_step_follow_train_loss, full_step_follow_valid_loss, full_step_follow_train_pred, full_step_follow_valid_pred, full_step_follow_weight_var, full_step_follow_data_var, full_step_follow_lpost, full_step_follow_kernel_h, full_step_leader_weight_lprior, full_step_leader_gamma_lprior, full_step_leader_lambda_lprior, full_step_leader_train_llik, full_step_leader_valid_llik, full_step_leader_train_loss, full_step_leader_valid_loss, full_step_leader_train_pred, full_step_leader_valid_pred, full_step_leader_weight_var, full_step_leader_data_var, full_step_leader_lpost, full_step_leader_kernel_h, full_meta_loss] = result
if is_training:
self.total_follow_weight_lprior = [tf.reduce_mean(full_step_follow_weight_lprior[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_gamma_lprior = [tf.reduce_mean(full_step_follow_gamma_lprior[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_lambda_lprior = [tf.reduce_mean(full_step_follow_lambda_lprior[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_train_llik = [tf.reduce_mean(full_step_follow_train_llik[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_valid_llik = [tf.reduce_mean(full_step_follow_valid_llik[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_train_loss = [tf.reduce_mean(full_step_follow_train_loss[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_valid_loss = [tf.reduce_mean(full_step_follow_valid_loss[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_weight_var = [tf.reduce_mean(full_step_follow_weight_var[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_data_var = [tf.reduce_mean(full_step_follow_data_var[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_lpost = [tf.reduce_mean(full_step_follow_lpost[j]) for j in range(FLAGS.follow_step)]
self.total_follow_kernel_h = [tf.reduce_mean(full_step_follow_kernel_h[j]) for j in range(FLAGS.follow_step)]
self.total_leader_weight_lprior = [tf.reduce_mean(full_step_leader_weight_lprior[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_gamma_lprior = [tf.reduce_mean(full_step_leader_gamma_lprior[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_lambda_lprior = [tf.reduce_mean(full_step_leader_lambda_lprior[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_train_llik = [tf.reduce_mean(full_step_leader_train_llik[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_valid_llik = [tf.reduce_mean(full_step_leader_valid_llik[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_train_loss = [tf.reduce_mean(full_step_leader_train_loss[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_valid_loss = [tf.reduce_mean(full_step_leader_valid_loss[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_weight_var = [tf.reduce_mean(full_step_leader_weight_var[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_data_var = [tf.reduce_mean(full_step_leader_data_var[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_lpost = [tf.reduce_mean(full_step_leader_lpost[j]) for j in range(FLAGS.leader_step)]
self.total_leader_kernel_h = [tf.reduce_mean(full_step_leader_kernel_h[j]) for j in range(FLAGS.leader_step)]
self.total_meta_loss = tf.reduce_mean(full_meta_loss)
self.total_train_z_list = full_step_follow_train_pred
self.total_valid_z_list = full_step_follow_valid_pred
update_params_list = []
update_params_name = []
for p_idx in range(self.num_particles):
for name in self.W_network_particles[0].keys():
update_params_name.append([p_idx, name])
update_params_list.append(self.W_network_particles[p_idx][name])
optimizer = tf.train.AdamOptimizer(learning_rate=self.meta_lr)
gv_list = optimizer.compute_gradients(loss=self.total_meta_loss, var_list=update_params_list)
if (FLAGS.out_grad_clip > 0):
gv_list = [(tf.clip_by_value(grad, (- FLAGS.out_grad_clip), FLAGS.out_grad_clip), var) for (grad, var) in gv_list]
self.metatrain_op = optimizer.apply_gradients(gv_list)
else:
self.eval_train_llik = [tf.reduce_mean(full_step_follow_train_llik[j]) for j in range((max_follow_step + 1))]
self.eval_train_loss = [tf.reduce_mean(full_step_follow_train_loss[j]) for j in range((max_follow_step + 1))]
self.eval_valid_llik = [tf.reduce_mean(full_step_follow_valid_llik[j]) for j in range((max_follow_step + 1))]
self.eval_valid_loss = [tf.reduce_mean(full_step_follow_valid_loss[j]) for j in range((max_follow_step + 1))]
self.eval_train_z_list = full_step_follow_train_pred
self.eval_valid_z_list = full_step_follow_valid_pred
print('end of model construction')
def kernel(self, particle_tensor, h=(- 1)):
euclidean_dists = tf_utils.pdist(particle_tensor)
pairwise_dists = (tf_utils.squareform(euclidean_dists) ** 2)
if (h == (- 1)):
if (FLAGS.kernel == 'org'):
mean_dist = tf_utils.median(pairwise_dists)
h = (mean_dist / math.log(self.num_particles))
h = tf.stop_gradient(h)
elif (FLAGS.kernel == 'med'):
mean_dist = (tf_utils.median(euclidean_dists) ** 2)
h = (mean_dist / math.log(self.num_particles))
h = tf.stop_gradient(h)
else:
mean_dist = (tf.reduce_mean(euclidean_dists) ** 2)
h = (mean_dist / math.log(self.num_particles))
kernel_matrix = tf.exp(((- pairwise_dists) / h))
kernel_sum = tf.reduce_sum(kernel_matrix, axis=1, keep_dims=True)
grad_kernel = (- tf.matmul(kernel_matrix, particle_tensor))
grad_kernel += (particle_tensor * kernel_sum)
grad_kernel /= h
return (kernel_matrix, grad_kernel, h)
def diclist2tensor(self, WW):
list_m = []
for Wm_dic in WW:
W_vec = tf.concat([tf.reshape(ww, [(- 1)]) for ww in Wm_dic.values()], axis=0)
list_m.append(W_vec)
tensor = tf.stack(list_m)
return tensor
def tensor2diclist(self, tensor):
return [self.bnn.vec2dic(tensor[m]) for m in range(self.num_particles)]
def update_particle(self, train_x, train_y, valid_x, valid_y, WW, num_updates, lr):
step_weight_lprior = ([None] * (num_updates + 1))
step_lambda_lprior = ([None] * (num_updates + 1))
step_gamma_lprior = ([None] * (num_updates + 1))
step_train_llik = ([None] * (num_updates + 1))
step_valid_llik = ([None] * (num_updates + 1))
step_train_loss = ([None] * (num_updates + 1))
step_valid_loss = ([None] * (num_updates + 1))
step_train_pred = ([None] * (num_updates + 1))
step_valid_pred = ([None] * (num_updates + 1))
step_weight_var = ([None] * (num_updates + 1))
step_data_var = ([None] * (num_updates + 1))
step_kernel_h = ([None] * num_updates)
step_lpost = ([None] * num_updates)
for s_idx in range((num_updates + 1)):
train_z_list = []
valid_z_list = []
train_llik_list = []
valid_llik_list = []
weight_lprior_list = []
lambda_lprior_list = []
gamma_lprior_list = []
weight_var_list = []
data_var_list = []
for p_idx in range(self.num_particles):
train_z = self.forward_network(x=train_x, W_dict=WW[p_idx])
valid_z = self.forward_network(x=valid_x, W_dict=WW[p_idx])
train_llik_list.append(self.bnn.log_likelihood_data(predict_y=train_z, target_y=train_y, log_gamma=WW[p_idx]['log_gamma']))
valid_llik_list.append(self.bnn.log_likelihood_data(predict_y=valid_z, target_y=valid_y, log_gamma=WW[p_idx]['log_gamma']))
train_z_list.append(train_z)
valid_z_list.append(valid_z)
(weight_lprior, gamma_lprior, lambda_lprior) = self.bnn.log_prior_weight(W_dict=WW[p_idx])
weight_lprior_list.append(weight_lprior)
lambda_lprior_list.append(lambda_lprior)
gamma_lprior_list.append(gamma_lprior)
weight_var_list.append(tf.reciprocal(tf.exp(WW[p_idx]['log_lambda'])))
data_var_list.append(tf.reciprocal(tf.exp(WW[p_idx]['log_gamma'])))
if (s_idx < num_updates):
WW_tensor = self.diclist2tensor(WW=WW)
dWW = []
for p_idx in range(self.num_particles):
lpost = weight_lprior_list[p_idx]
lpost += lambda_lprior_list[p_idx]
lpost += gamma_lprior_list[p_idx]
lpost += tf.reduce_sum(train_llik_list[p_idx])
dWp = tf.gradients(ys=lpost, xs=list(WW[p_idx].values()))
if (p_idx == 0):
step_lpost[s_idx] = []
step_lpost[s_idx].append(lpost)
if FLAGS.stop_grad:
dWp = [tf.stop_gradient(grad) for grad in dWp]
dWW.append(OrderedDict(zip(WW[p_idx].keys(), dWp)))
step_lpost[s_idx] = tf.reduce_mean(step_lpost[s_idx])
dWW_tensor = self.diclist2tensor(WW=dWW)
[kernel_mat, grad_kernel, kernel_h] = self.kernel(particle_tensor=WW_tensor)
dWW_tensor = tf.divide((tf.matmul(kernel_mat, dWW_tensor) + grad_kernel), self.num_particles)
step_kernel_h[s_idx] = kernel_h
dWW = self.tensor2diclist(tensor=dWW_tensor)
for p_idx in range(self.num_particles):
param_names = []
param_vals = []
for key in list(WW[p_idx].keys()):
if (FLAGS.in_grad_clip > 0):
grad = tf.clip_by_value(dWW[p_idx][key], (- FLAGS.in_grad_clip), FLAGS.in_grad_clip)
else:
grad = dWW[p_idx][key]
param_names.append(key)
if ('log' in key):
param_vals.append((WW[p_idx][key] + ((FLAGS.lambda_lr * lr) * grad)))
else:
param_vals.append((WW[p_idx][key] + (lr * grad)))
WW[p_idx] = OrderedDict(zip(param_names, param_vals))
train_z = tf.reduce_mean(train_z_list, 0)
valid_z = tf.reduce_mean(valid_z_list, 0)
step_weight_lprior[s_idx] = tf.reduce_mean(weight_lprior_list)
step_gamma_lprior[s_idx] = tf.reduce_mean(gamma_lprior_list)
step_lambda_lprior[s_idx] = tf.reduce_mean(lambda_lprior_list)
step_train_llik[s_idx] = tf.reduce_mean([tf.reduce_mean(train_llik) for train_llik in train_llik_list])
step_valid_llik[s_idx] = tf.reduce_mean([tf.reduce_mean(valid_llik) for valid_llik in valid_llik_list])
step_train_loss[s_idx] = tf.reduce_mean(tf.square((train_z - train_y)))
step_valid_loss[s_idx] = tf.reduce_mean(tf.square((valid_z - valid_y)))
step_train_pred[s_idx] = tf.concat([tf.expand_dims(train_z, 0) for train_z in train_z_list], axis=0)
step_valid_pred[s_idx] = tf.concat([tf.expand_dims(valid_z, 0) for valid_z in valid_z_list], axis=0)
step_weight_var[s_idx] = tf.reduce_mean(weight_var_list)
step_data_var[s_idx] = tf.reduce_mean(data_var_list)
return [step_weight_var, step_data_var, step_train_llik, step_valid_llik, step_train_loss, step_valid_loss, step_train_pred, step_valid_pred, step_weight_lprior, step_gamma_lprior, step_lambda_lprior, step_lpost, step_kernel_h, WW]
|
def train(model, dataset, saver, sess, config_str):
experiment_dir = ((FLAGS.logdir + '/') + config_str)
train_writer = tf.summary.FileWriter(experiment_dir, sess.graph)
print('Done initializing, starting training.')
num_iters_per_epoch = int((FLAGS.train_total_num_tasks / FLAGS.num_tasks))
if (not FLAGS.finite):
num_iters_per_epoch = 1
follow_lpost = []
follow_weight_lprior = []
follow_gamma_lprior = []
follow_lambda_lprior = []
follow_train_llik = []
follow_valid_llik = []
follow_train_loss = []
follow_valid_loss = []
follow_weight_var = []
follow_data_var = []
follow_kernel_h = []
leader_lpost = []
leader_weight_lprior = []
leader_gamma_lprior = []
leader_lambda_lprior = []
leader_train_llik = []
leader_valid_llik = []
leader_train_loss = []
leader_valid_loss = []
leader_weight_var = []
leader_data_var = []
leader_kernel_h = []
meta_loss = []
test_itr_list = []
test_train_loss_list = []
test_valid_loss_list = []
best_test_loss = 1000.0
best_test_iter = 0
itr = 0
for e_idx in range(FLAGS.num_epochs):
for b_idx in range(num_iters_per_epoch):
itr += 1
[follow_x, leader_x, valid_x, follow_y, leader_y, valid_y] = dataset.generate_batch(is_training=True, batch_idx=None, inc_follow=True)
meta_lr = (FLAGS.meta_lr * (FLAGS.decay_lr ** (float((itr - 1)) / float(((FLAGS.num_epochs * num_iters_per_epoch) / 100)))))
feed_in = OrderedDict()
feed_in[model.meta_lr] = meta_lr
feed_in[model.follow_x] = follow_x
feed_in[model.follow_y] = follow_y
feed_in[model.leader_x] = leader_x
feed_in[model.leader_y] = leader_y
feed_in[model.valid_x] = valid_x
feed_in[model.valid_y] = valid_y
fetch_out = [model.metatrain_op, model.total_follow_lpost, model.total_follow_weight_lprior, model.total_follow_gamma_lprior, model.total_follow_lambda_lprior, model.total_follow_train_llik, model.total_follow_valid_llik, model.total_follow_train_loss, model.total_follow_valid_loss, model.total_follow_weight_var, model.total_follow_data_var, model.total_follow_kernel_h, model.total_leader_lpost, model.total_leader_weight_lprior, model.total_leader_gamma_lprior, model.total_leader_lambda_lprior, model.total_leader_train_llik, model.total_leader_valid_llik, model.total_leader_train_loss, model.total_leader_valid_loss, model.total_leader_weight_var, model.total_leader_data_var, model.total_leader_kernel_h, model.total_meta_loss]
result = sess.run(fetch_out, feed_in)[1:]
follow_lpost.append(result[0])
follow_weight_lprior.append(result[1])
follow_gamma_lprior.append(result[2])
follow_lambda_lprior.append(result[3])
follow_train_llik.append(result[4])
follow_valid_llik.append(result[5])
follow_train_loss.append(result[6])
follow_valid_loss.append(result[7])
follow_weight_var.append(result[8])
follow_data_var.append(result[9])
follow_kernel_h.append(result[10])
leader_lpost.append(result[11])
leader_weight_lprior.append(result[12])
leader_gamma_lprior.append(result[13])
leader_lambda_lprior.append(result[14])
leader_train_llik.append(result[15])
leader_valid_llik.append(result[16])
leader_train_loss.append(result[17])
leader_valid_loss.append(result[18])
leader_weight_var.append(result[19])
leader_data_var.append(result[20])
leader_kernel_h.append(result[21])
meta_loss.append(result[22])
if ((itr % PRINT_INTERVAL) == 0):
follow_lpost = np.stack(follow_lpost).mean(axis=0)
follow_weight_lprior = np.stack(follow_weight_lprior).mean(axis=0)
follow_gamma_lprior = np.stack(follow_gamma_lprior).mean(axis=0)
follow_lambda_lprior = np.stack(follow_lambda_lprior).mean(axis=0)
follow_train_llik = np.stack(follow_train_llik).mean(axis=0)
follow_valid_llik = np.stack(follow_valid_llik).mean(axis=0)
follow_train_loss = np.stack(follow_train_loss).mean(axis=0)
follow_valid_loss = np.stack(follow_valid_loss).mean(axis=0)
follow_weight_var = np.stack(follow_weight_var).mean(axis=0)
follow_data_var = np.stack(follow_data_var).mean(axis=0)
follow_kernel_h = np.stack(follow_kernel_h).mean(axis=0)
leader_lpost = np.stack(leader_lpost).mean(axis=0)
leader_weight_lprior = np.stack(leader_weight_lprior).mean(axis=0)
leader_gamma_lprior = np.stack(leader_gamma_lprior).mean(axis=0)
leader_lambda_lprior = np.stack(leader_lambda_lprior).mean(axis=0)
leader_train_llik = np.stack(leader_train_llik).mean(axis=0)
leader_valid_llik = np.stack(leader_valid_llik).mean(axis=0)
leader_train_loss = np.stack(leader_train_loss).mean(axis=0)
leader_valid_loss = np.stack(leader_valid_loss).mean(axis=0)
leader_weight_var = np.stack(leader_weight_var).mean(axis=0)
leader_data_var = np.stack(leader_data_var).mean(axis=0)
leader_kernel_h = np.stack(leader_kernel_h).mean(axis=0)
meta_loss = np.stack(meta_loss).mean(axis=0)
print('======================================')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('follower')
print('--------------------------------------')
print('log-posterior: ', follow_lpost)
print('weight-log-prior: ', follow_weight_lprior)
print('gamma-log-prior: ', follow_gamma_lprior)
print('lambda-log-prior: ', follow_lambda_lprior)
print('train_llik: ', follow_train_llik)
print('valid_llik: ', follow_valid_llik)
print('train_loss: ', follow_train_loss)
print('valid_loss: ', follow_valid_loss)
print('- - - - - - - - - - - - - - - - - - - ')
print('data var: ', follow_data_var)
print('weight var: ', follow_weight_var)
print('kernel_h: ', follow_kernel_h)
print('--------------------------------------')
print('leader')
print('--------------------------------------')
print('log-posterior: ', leader_lpost)
print('weight-log-prior: ', leader_weight_lprior)
print('gamma-log-prior: ', leader_gamma_lprior)
print('lambda-log-prior: ', leader_lambda_lprior)
print('train_llik: ', leader_train_llik)
print('valid_llik: ', leader_valid_llik)
print('train_loss: ', leader_train_loss)
print('valid_loss: ', leader_valid_loss)
print('- - - - - - - - - - - - - - - - - - - ')
print('data var: ', leader_data_var)
print('weight var: ', leader_weight_var)
print('kernel_h: ', leader_kernel_h)
print('--------------------------------------')
print('meta_loss: ', meta_loss)
print('meta_lr: ', meta_lr)
print('--------------------------------------')
print('best_test_loss: ', best_test_loss, '({})'.format(best_test_iter))
follow_lpost = []
follow_weight_lprior = []
follow_gamma_lprior = []
follow_lambda_lprior = []
follow_train_llik = []
follow_valid_llik = []
follow_train_loss = []
follow_valid_loss = []
follow_weight_var = []
follow_data_var = []
follow_kernel_h = []
leader_lpost = []
leader_weight_lprior = []
leader_gamma_lprior = []
leader_lambda_lprior = []
leader_train_llik = []
leader_valid_llik = []
leader_train_loss = []
leader_valid_loss = []
leader_weight_var = []
leader_data_var = []
leader_kernel_h = []
meta_loss = []
if ((itr % TEST_PRINT_INTERVAL) == 0):
eval_train_llik_list = []
eval_valid_llik_list = []
eval_train_loss_list = []
eval_valid_loss_list = []
fetch_out = [model.eval_train_llik[:(FLAGS.follow_step + 1)], model.eval_valid_llik[:(FLAGS.follow_step + 1)], model.eval_train_loss[:(FLAGS.follow_step + 1)], model.eval_valid_loss[:(FLAGS.follow_step + 1)]]
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[follow_x, _, valid_x, follow_y, _, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks), inc_follow=True)
feed_in = OrderedDict()
feed_in[model.follow_x] = follow_x
feed_in[model.follow_y] = follow_y
feed_in[model.valid_x] = valid_x
feed_in[model.valid_y] = valid_y
result = sess.run(fetch_out, feed_in)
eval_train_llik_list.append(result[0])
eval_valid_llik_list.append(result[1])
eval_train_loss_list.append(result[2])
eval_valid_loss_list.append(result[3])
eval_train_llik = np.stack(eval_train_llik_list).mean(axis=0)
eval_valid_llik = np.stack(eval_valid_llik_list).mean(axis=0)
eval_train_loss = np.stack(eval_train_loss_list).mean(axis=0)
eval_valid_loss = np.stack(eval_valid_loss_list).mean(axis=0)
print('======================================')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('Eval')
print('--------------------------------------')
print('train_llik: ', eval_train_llik)
print('valid_llik: ', eval_valid_llik)
print('train_loss: ', eval_train_loss)
print('valid_loss: ', eval_valid_loss)
test_itr_list.append(itr)
test_train_loss_list.append(eval_train_loss[(- 1)])
test_valid_loss_list.append(eval_valid_loss[(- 1)])
pkl.dump([test_itr_list, test_train_loss_list, test_valid_loss_list], open(((experiment_dir + '/') + 'results.pkl'), 'wb'))
plt.title('valid loss during training')
plt.plot(test_itr_list, test_valid_loss_list, '-', label='test loss')
plt.savefig(((experiment_dir + '/') + 'test_loss.png'))
plt.close()
if (best_test_loss > test_valid_loss_list[(- 1)]):
best_test_loss = test_valid_loss_list[(- 1)]
best_test_iter = itr
if (itr > 10000):
saver.save(sess, ((experiment_dir + '/') + 'best_model'))
|
def test(model, dataset, sess, inner_lr):
eval_valid_loss_list = []
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[follow_x, _, valid_x, follow_y, _, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks), inc_follow=True)
feed_in = OrderedDict()
feed_in[model.follow_lr] = inner_lr
feed_in[model.follow_x] = follow_x
feed_in[model.follow_y] = follow_y
feed_in[model.valid_x] = valid_x
feed_in[model.valid_y] = valid_y
eval_valid_loss_list.append(sess.run(model.eval_valid_loss, feed_in))
eval_valid_loss_list = np.array(eval_valid_loss_list)
eval_valid_loss_mean = np.mean(eval_valid_loss_list, axis=0)
return eval_valid_loss_mean
|
def main():
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
if (not os.path.exists(FLAGS.logdir)):
os.makedirs(FLAGS.logdir)
fname_args = []
if FLAGS.finite:
fname_args += [('train_total_num_tasks', 'SinusoidFinite')]
fname_args += [('test_total_num_tasks', 'Test')]
else:
fname_args += [('test_total_num_tasks', 'SinusoidInfiniteTest')]
fname_args += [('num_epochs', 'Epoch'), ('num_tasks', 'T'), ('seed', 'SEED'), ('noise_factor', 'Noise'), ('num_particles', 'M'), ('dim_hidden', 'H'), ('num_layers', 'L'), ('phase', 'PHS'), ('freq', 'FRQ'), ('few_k_shot', 'TrainK'), ('val_k_shot', 'ValidK'), ('in_grad_clip', 'InGrad'), ('out_grad_clip', 'OutGrad'), ('follow_step', 'FStep'), ('leader_step', 'LStep'), ('follow_lr', 'FLr'), ('leader_lr', 'LLr'), ('meta_lr', 'MetaLr'), ('decay_lr', 'DecLr'), ('lambda_lr', 'LmdLr'), ('kernel', 'Kernel'), ('a_g', 'AG'), ('b_g', 'BG'), ('a_l', 'AL'), ('b_l', 'BL')]
config_str = utils.experiment_string2(FLAGS.flag_values_dict(), fname_args, separator='_')
config_str = ((str(time.mktime(datetime.now().timetuple()))[:(- 2)] + '_BMAML_CHASE') + config_str)
print(config_str)
dataset = SinusoidGenerator()
dim_output = dataset.dim_output
dim_input = dataset.dim_input
model = BMAML(dim_input=dim_input, dim_output=dim_output, dim_hidden=FLAGS.dim_hidden, num_layers=FLAGS.num_layers, num_particles=FLAGS.num_particles, max_test_step=10)
model.construct_model(is_training=True)
model.construct_model(is_training=False)
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=1)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
if FLAGS.train:
train(model, dataset, saver, sess, config_str)
|
class BNN(object):
def __init__(self, dim_input, dim_output, dim_hidden, num_layers, is_bnn=True):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.is_bnn = is_bnn
def construct_network_weights(self, scope='network'):
params = OrderedDict()
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32)
params['w1'] = tf.get_variable(name=(scope + '_w1'), shape=[self.dim_input, self.dim_hidden], initializer=fc_initializer)
params['b1'] = tf.Variable(name=(scope + '_b1'), initial_value=tf.random_normal([self.dim_hidden], 0.0, 0.01))
for l in range(self.num_layers):
if (l < (self.num_layers - 1)):
dim_output = self.dim_hidden
else:
dim_output = self.dim_output
params['w{}'.format((l + 2))] = tf.get_variable(name=(scope + '_w{}'.format((l + 2))), shape=[self.dim_hidden, dim_output], initializer=fc_initializer)
params['b{}'.format((l + 2))] = tf.Variable(name=(scope + '_b{}'.format((l + 2))), initial_value=tf.random_normal([dim_output], 0.0, 0.01))
if self.is_bnn:
init_val = np.random.normal((- np.log(FLAGS.m_l)), 0.001, [1])
params['log_lambda'] = tf.Variable(name=(scope + '_log_lambda'), initial_value=init_val, dtype=tf.float32)
print('log_lambda: ', init_val)
init_val = np.random.normal((- np.log(FLAGS.m_g)), 0.001, [1])
params['log_gamma'] = tf.Variable(name=(scope + '_log_gamma'), initial_value=init_val, dtype=tf.float32)
print('log_gamma: ', init_val)
return params
def log_likelihood_data(self, predict_y, target_y, log_gamma):
if (not self.is_bnn):
NotImplementedError()
error_y = (predict_y - target_y)
log_lik_data = ((0.5 * log_gamma) - ((0.5 * tf.exp(log_gamma)) * tf.square(error_y)))
return log_lik_data
def log_prior_weight(self, W_dict):
if (not self.is_bnn):
NotImplementedError()
W_vec = self.dicval2vec(W_dict)
log_lambda = tf.reshape(W_vec[(- 2)], (1,))
log_gamma = tf.reshape(W_vec[(- 1)], (1,))
W_vec = W_vec[:(- 2)]
num_params = tf.cast(W_vec.shape[0], tf.float32)
log_prior_gamma = ((((FLAGS.a_g - 1) * log_gamma) - (FLAGS.b_g * tf.exp(log_gamma))) + log_gamma)
W_diff = W_vec
log_prior_w = (((0.5 * num_params) * log_lambda) - ((0.5 * tf.exp(log_lambda)) * tf.reduce_sum((W_diff ** 2))))
log_prior_lambda = ((((FLAGS.a_l - 1) * log_lambda) - (FLAGS.b_l * tf.exp(log_lambda))) + log_lambda)
return (log_prior_w, log_prior_gamma, log_prior_lambda)
def mse_data(self, predict_y, target_y):
return tf.reduce_sum(tf.square((predict_y - target_y)), axis=1)
def forward_network(self, x, W_dict):
hid = tf.nn.relu((tf.matmul(x, W_dict['w1']) + W_dict['b1']))
for l in range(self.num_layers):
hid = (tf.matmul(hid, W_dict['w{}'.format((l + 2))]) + W_dict['b{}'.format((l + 2))])
if (l < (self.num_layers - 1)):
hid = tf.nn.relu(hid)
return hid
def list2vec(self, list_in):
return tf.concat([tf.reshape(ww, [(- 1)]) for ww in list_in], axis=0)
def vec2dic(self, W_vec):
if self.is_bnn:
log_lambda = tf.reshape(W_vec[(- 2)], (1,))
log_gamma = tf.reshape(W_vec[(- 1)], (1,))
W_vec = W_vec[:(- 2)]
W_dic = self.network_weight_vec2dict(W_vec)
W_dic['log_lambda'] = log_lambda
W_dic['log_gamma'] = log_gamma
else:
W_dic = self.network_weight_vec2dict(W_vec)
return W_dic
def network_weight_vec2dict(self, W_vec):
W_dic = OrderedDict()
dim_list = (([self.dim_input] + ([self.dim_hidden] * self.num_layers)) + [self.dim_output])
for l in range((len(dim_list) - 1)):
(dim_input, dim_output) = (dim_list[l], dim_list[(l + 1)])
W_dic['w{}'.format((l + 1))] = tf.reshape(W_vec[:(dim_input * dim_output)], [dim_input, dim_output])
W_dic['b{}'.format((l + 1))] = W_vec[(dim_input * dim_output):((dim_input * dim_output) + dim_output)]
if (l < (len(dim_list) - 2)):
W_vec = W_vec[((dim_input * dim_output) + dim_output):]
return W_dic
def dicval2vec(self, dic):
return tf.concat([tf.reshape(val, [(- 1)]) for val in dic.values()], axis=0)
|
class EMAML():
def __init__(self, dim_input, dim_output, dim_hidden=32, num_layers=4, num_particles=2, max_test_step=5):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.num_particles = num_particles
self.in_lr = tf.placeholder_with_default(input=FLAGS.in_lr, name='in_lr', shape=[])
self.out_lr = tf.placeholder_with_default(input=FLAGS.out_lr, name='out_lr', shape=[])
self.max_test_step = max_test_step
self.bnn = BNN(dim_input=self.dim_input, dim_output=self.dim_output, dim_hidden=self.dim_hidden, num_layers=self.num_layers, is_bnn=False)
self.construct_network_weights = self.bnn.construct_network_weights
self.forward_network = self.bnn.forward_network
self.train_x = tf.placeholder(dtype=tf.float32, name='train_x')
self.train_y = tf.placeholder(dtype=tf.float32, name='train_y')
self.valid_x = tf.placeholder(dtype=tf.float32, name='valid_x')
self.valid_y = tf.placeholder(dtype=tf.float32, name='valid_y')
self.W_network_particles = None
def construct_model(self, is_training=True):
print('start model construction')
with tf.variable_scope('model', reuse=None) as training_scope:
if (is_training or (self.W_network_particles is None)):
self.W_network_particles = [self.construct_network_weights(scope='network{}'.format(p_idx)) for p_idx in range(self.num_particles)]
else:
training_scope.reuse_variables()
if is_training:
max_update_step = FLAGS.in_step
else:
max_update_step = max(FLAGS.in_step, self.max_test_step)
def fast_learn_one_task(inputs):
[train_x, valid_x, train_y, valid_y] = inputs
meta_loss = []
WW_update = [OrderedDict(zip(W_dic.keys(), W_dic.values())) for W_dic in self.W_network_particles]
step_train_loss = ([None] * (max_update_step + 1))
step_valid_loss = ([None] * (max_update_step + 1))
step_train_pred = ([None] * (max_update_step + 1))
step_valid_pred = ([None] * (max_update_step + 1))
for s_idx in range((max_update_step + 1)):
train_z_list = []
valid_z_list = []
train_mse_list = []
valid_mse_list = []
for p_idx in range(FLAGS.num_particles):
train_z_list.append(self.forward_network(x=train_x, W_dict=WW_update[p_idx]))
valid_z_list.append(self.forward_network(x=valid_x, W_dict=WW_update[p_idx]))
train_mse_list.append(self.bnn.mse_data(predict_y=train_z_list[(- 1)], target_y=train_y))
valid_mse_list.append(self.bnn.mse_data(predict_y=valid_z_list[(- 1)], target_y=valid_y))
if (s_idx < max_update_step):
particle_loss = tf.reduce_mean(train_mse_list[(- 1)])
dWp = tf.gradients(ys=particle_loss, xs=list(WW_update[p_idx].values()))
if FLAGS.stop_grad:
dWp = [tf.stop_gradient(grad) for grad in dWp]
dWp = OrderedDict(zip(WW_update[p_idx].keys(), dWp))
param_names = []
param_vals = []
for key in list(WW_update[p_idx].keys()):
if (FLAGS.in_grad_clip > 0):
grad = tf.clip_by_value(dWp[key], (- FLAGS.in_grad_clip), FLAGS.in_grad_clip)
else:
grad = dWp[key]
param_names.append(key)
param_vals.append((WW_update[p_idx][key] - (self.in_lr * grad)))
WW_update[p_idx] = OrderedDict(zip(param_names, param_vals))
else:
meta_loss.append(tf.reduce_mean(valid_mse_list[(- 1)]))
step_train_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(train_mse) for train_mse in train_mse_list])
step_valid_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(valid_mse) for valid_mse in valid_mse_list])
step_train_pred[s_idx] = tf.concat([tf.expand_dims(train_z, 0) for train_z in train_z_list], axis=0)
step_valid_pred[s_idx] = tf.concat([tf.expand_dims(valid_z, 0) for valid_z in valid_z_list], axis=0)
meta_loss = tf.reduce_sum(meta_loss)
return [step_train_loss, step_valid_loss, step_train_pred, step_valid_pred, meta_loss]
out_dtype = [([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), tf.float32]
result = tf.map_fn(fast_learn_one_task, elems=[self.train_x, self.valid_x, self.train_y, self.valid_y], dtype=out_dtype, parallel_iterations=FLAGS.num_tasks)
full_step_train_loss = result[0]
full_step_valid_loss = result[1]
full_step_train_pred = result[2]
full_step_valid_pred = result[3]
full_meta_loss = result[4]
if is_training:
self.total_train_loss = [tf.reduce_mean(full_step_train_loss[j]) for j in range((FLAGS.in_step + 1))]
self.total_valid_loss = [tf.reduce_mean(full_step_valid_loss[j]) for j in range((FLAGS.in_step + 1))]
self.total_meta_loss = tf.reduce_mean(full_meta_loss)
self.total_train_z_list = full_step_train_pred
self.total_valid_z_list = full_step_valid_pred
update_params_list = []
update_params_name = []
for p in range(FLAGS.num_particles):
for name in self.W_network_particles[0].keys():
update_params_name.append([p, name])
update_params_list.append(self.W_network_particles[p][name])
optimizer = tf.train.AdamOptimizer(learning_rate=self.out_lr)
gv_list = optimizer.compute_gradients(loss=self.total_meta_loss, var_list=update_params_list)
if (FLAGS.out_grad_clip > 0):
gv_list = [(tf.clip_by_value(grad, (- FLAGS.out_grad_clip), FLAGS.out_grad_clip), var) for (grad, var) in gv_list]
self.metatrain_op = optimizer.apply_gradients(gv_list)
else:
self.eval_train_loss = [tf.reduce_mean(full_step_train_loss[j]) for j in range((max_update_step + 1))]
self.eval_valid_loss = [tf.reduce_mean(full_step_valid_loss[j]) for j in range((max_update_step + 1))]
self.eval_train_z_list = full_step_train_pred
self.eval_valid_z_list = full_step_valid_pred
print('end of model construction')
|
def train(model, dataset, saver, sess, config_str):
experiment_dir = ((FLAGS.logdir + '/') + config_str)
train_writer = tf.summary.FileWriter(experiment_dir, sess.graph)
print('Done initializing, starting training.')
num_iters_per_epoch = int((FLAGS.train_total_num_tasks / FLAGS.num_tasks))
if (not FLAGS.finite):
num_iters_per_epoch = 1
inner_train_loss = []
inner_valid_loss = []
meta_loss = []
test_itr_list = []
test_valid_loss_list = []
best_test_loss = 1000.0
best_test_iter = 0
itr = 0
for e_idx in range(FLAGS.num_epochs):
for b_idx in range(num_iters_per_epoch):
itr += 1
[train_x, valid_x, train_y, valid_y] = dataset.generate_batch(is_training=True, batch_idx=None)
out_lr = (FLAGS.out_lr * (FLAGS.decay_lr ** (float((itr - 1)) / float(((FLAGS.num_epochs * num_iters_per_epoch) / 100)))))
feed_in = OrderedDict()
feed_in[model.out_lr] = out_lr
feed_in[model.train_x] = train_x
feed_in[model.valid_x] = valid_x
feed_in[model.train_y] = train_y
feed_in[model.valid_y] = valid_y
fetch_out = [model.metatrain_op, model.total_train_loss, model.total_valid_loss, model.total_meta_loss]
result = sess.run(fetch_out, feed_in)[1:]
inner_train_loss.append(result[0])
inner_valid_loss.append(result[1])
meta_loss.append(result[2])
if ((itr % PRINT_INTERVAL) == 0):
inner_train_loss = np.stack(inner_train_loss).mean(axis=0)
inner_valid_loss = np.stack(inner_valid_loss).mean(axis=0)
meta_loss = np.stack(meta_loss).mean(axis=0)
print('======================================')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('train_loss: ', inner_train_loss)
print('valid_loss: ', inner_valid_loss)
print('--------------------------------------')
print('meta_loss: ', meta_loss)
print('out_lr: ', out_lr)
print('--------------------------------------')
print('best_test_loss: ', best_test_loss, '({})'.format(best_test_iter))
inner_train_loss = []
inner_valid_loss = []
meta_loss = []
if ((itr % TEST_PRINT_INTERVAL) == 0):
eval_train_loss_list = []
eval_valid_loss_list = []
fetch_out = [model.eval_train_loss[:(FLAGS.in_step + 1)], model.eval_valid_loss[:(FLAGS.in_step + 1)]]
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[train_x, valid_x, train_y, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks))
feed_in = OrderedDict()
feed_in[model.train_x] = train_x
feed_in[model.valid_x] = valid_x
feed_in[model.train_y] = train_y
feed_in[model.valid_y] = valid_y
result = sess.run(fetch_out, feed_in)
eval_train_loss_list.append(result[0])
eval_valid_loss_list.append(result[1])
eval_train_loss = np.stack(eval_train_loss_list).mean(axis=0)
eval_valid_loss = np.stack(eval_valid_loss_list).mean(axis=0)
print('======================================')
print('Eval')
print('--------------------------------------')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('train_loss: ', eval_train_loss)
print('valid_loss: ', eval_valid_loss)
test_itr_list.append(itr)
test_valid_loss_list.append(eval_valid_loss[(- 1)])
pkl.dump([test_itr_list, test_valid_loss_list], open(((experiment_dir + '/') + 'results.pkl'), 'wb'))
plt.title('valid loss during training')
plt.plot(test_itr_list, test_valid_loss_list, '-', label='test loss')
plt.savefig(((experiment_dir + '/') + 'test_loss.png'))
plt.close()
if (best_test_loss > test_valid_loss_list[(- 1)]):
best_test_loss = test_valid_loss_list[(- 1)]
best_test_iter = itr
saver.save(sess, ((experiment_dir + '/') + 'best_model'))
|
def test(model, dataset, sess, inner_lr):
eval_valid_loss_list = []
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[train_x, valid_x, train_y, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks))
feed_in = OrderedDict()
feed_in[model.in_lr] = inner_lr
feed_in[model.train_x] = train_x
feed_in[model.valid_x] = valid_x
feed_in[model.train_y] = train_y
feed_in[model.valid_y] = valid_y
eval_valid_loss_list.append(sess.run(model.eval_valid_loss, feed_in))
eval_valid_loss_list = np.array(eval_valid_loss_list)
eval_valid_loss_mean = np.mean(eval_valid_loss_list, axis=0)
return eval_valid_loss_mean
|
def main():
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
if (not os.path.exists(FLAGS.logdir)):
os.makedirs(FLAGS.logdir)
fname_args = []
if FLAGS.finite:
fname_args += [('train_total_num_tasks', 'SinusoidFinite')]
fname_args += [('test_total_num_tasks', 'Test')]
else:
fname_args += [('test_total_num_tasks', 'SinusoidInfiniteTest')]
fname_args += [('num_epochs', 'Epoch'), ('num_tasks', 'T'), ('seed', 'SEED'), ('noise_factor', 'Noise'), ('num_particles', 'M'), ('dim_hidden', 'H'), ('num_layers', 'L'), ('phase', 'PHS'), ('freq', 'FRQ'), ('few_k_shot', 'TrainK'), ('val_k_shot', 'ValidK'), ('in_step', 'InStep'), ('in_grad_clip', 'InGrad'), ('out_grad_clip', 'OutGrad'), ('in_lr', 'InLr'), ('out_lr', 'OutLr'), ('decay_lr', 'DecLr')]
config_str = utils.experiment_string2(FLAGS.flag_values_dict(), fname_args, separator='_')
config_str = ((str(time.mktime(datetime.now().timetuple()))[:(- 2)] + '_EMAML') + config_str)
print(config_str)
dataset = SinusoidGenerator(split_data=False)
dim_output = dataset.dim_output
dim_input = dataset.dim_input
model = EMAML(dim_input=dim_input, dim_output=dim_output, dim_hidden=FLAGS.dim_hidden, num_layers=FLAGS.num_layers, num_particles=FLAGS.num_particles, max_test_step=10)
model.construct_model(is_training=True)
model.construct_model(is_training=False)
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=1)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
if FLAGS.train:
train(model, dataset, saver, sess, config_str)
|
def pdist(tensor, metric='euclidean'):
assert isinstance(tensor, (tf.Variable, tf.Tensor)), 'tensor_utils.pdist: Input must be a `tensorflow.Tensor` instance.'
if (len(tensor.shape.as_list()) != 2):
raise ValueError('tensor_utils.pdist: A 2-d tensor must be passed.')
if (metric == 'euclidean'):
def pairwise_euclidean_distance(tensor):
def euclidean_distance(tensor1, tensor2):
return tf.norm((tensor1 - tensor2))
m = tensor.shape.as_list()[0]
distances = []
for i in range(m):
for j in range((i + 1), m):
distances.append(euclidean_distance(tensor[i], tensor[j]))
return tf.convert_to_tensor(distances)
metric_function = pairwise_euclidean_distance
else:
raise NotImplementedError("tensor_utils.pdist: Metric '{metric}' currently not supported!".format(metric=metric))
return metric_function(tensor)
|
def _is_vector(tensor):
return (len(tensor.shape.as_list()) == 1)
|
def median(tensor):
tensor_reshaped = tf.reshape(tensor, [(- 1)])
n_elements = tensor_reshaped.get_shape()[0]
sorted_tensor = tf.nn.top_k(tensor_reshaped, n_elements, sorted=True)
mid_index = (n_elements // 2)
if ((n_elements % 2) == 1):
return sorted_tensor.values[mid_index]
return ((sorted_tensor.values[(mid_index - 1)] + sorted_tensor.values[mid_index]) / 2)
|
def squareform(tensor):
assert isinstance(tensor, tf.Tensor), 'tensor_utils.squareform: Input must be a `tensorflow.Tensor` instance.'
tensor_shape = tensor.shape.as_list()
n_elements = tensor_shape[0]
if _is_vector(tensor):
if (n_elements == 0):
return tf.zeros((1, 1), dtype=tensor.dtype)
dimension = int(np.ceil(np.sqrt((n_elements * 2))))
if ((dimension * (dimension - 1)) != (n_elements * 2)):
raise ValueError('Incompatible vector size. It must be a binomial coefficient n choose 2 for some integer n >=2.')
n_total_elements_matrix = (dimension ** 2)
n_diagonal_zeros = dimension
n_fill_zeros = ((n_total_elements_matrix - n_elements) - n_diagonal_zeros)
condensed_distance_tensor = tf.reshape(tensor, shape=(n_elements, 1))
diagonal_zeros = tf.zeros(shape=(n_diagonal_zeros, 1), dtype=condensed_distance_tensor.dtype)
fill_zeros = tf.zeros(shape=(n_fill_zeros, 1), dtype=condensed_distance_tensor.dtype)
def upper_triangular_indices(dimension):
' For a square matrix with shape (`dimension`, `dimension`),\n return a list of indices into a vector with\n `dimension * dimension` elements that correspond to its\n upper triangular part after reshaping.\n Parameters\n ----------\n dimension : int\n Target dimensionality of the square matrix we want to\n obtain by reshaping a `dimension * dimension` element\n vector.\n Yields\n -------\n index: int\n Indices are indices into a `dimension * dimension` element\n vector that correspond to the upper triangular part of the\n matrix obtained by reshaping it into shape\n `(dimension, dimension)`.\n '
assert (dimension > 0), 'tensor_utils.upper_triangular_indices: Dimension must be positive integer!'
for row in range(dimension):
for column in range((row + 1), dimension):
element_index = ((dimension * row) + column)
(yield element_index)
all_indices = set(range(n_total_elements_matrix))
diagonal_indices = list(range(0, n_total_elements_matrix, (dimension + 1)))
upper_triangular = list(upper_triangular_indices(dimension))
remaining_indices = all_indices.difference(set(diagonal_indices).union(upper_triangular))
data = (diagonal_zeros, condensed_distance_tensor, fill_zeros)
indices = (tuple(diagonal_indices), tuple(upper_triangular), tuple(remaining_indices))
stitch_vector = tf.dynamic_stitch(data=data, indices=indices)
upper_triangular = tf.reshape(stitch_vector, (dimension, dimension))
lower_triangular = tf.transpose(upper_triangular)
return (upper_triangular + lower_triangular)
else:
raise NotImplementedError('tensor_utils.squareform: Only 1-d (vector) input is supported!')
|
def get_images(paths, labels, nb_samples=None, shuffle=True):
if (nb_samples is not None):
sampler = (lambda x: random.sample(x, nb_samples))
else:
sampler = (lambda x: x)
images = [(i, os.path.join(path, image)) for (i, path) in zip(labels, paths) for image in sampler(os.listdir(path))]
if shuffle:
random.shuffle(images)
return images
|
def clip_if_not_none(grad, min_value, max_value):
if (grad is None):
return grad
return tf.clip_by_value(grad, min_value, max_value)
|
def str2bool(v):
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
|
def make_logdir(configs, fname_args=[]):
this_run_str = (time.strftime('%H%M%S_') + str(socket.gethostname()))
if is_git_dir():
this_run_str += ('_git' + git_hash_str())
for str_arg in fname_args:
if (str_arg in configs.keys()):
this_run_str += ((('_' + str_arg.title().replace('_', '')) + '_') + str(configs[str_arg]))
else:
raise ValueError(('%s in fname_args does not exist in configs' % str_arg))
this_run_str = this_run_str.replace('/', '_')
return log_dir
|
def experiment_prefix_str(separator=',', hostname=False, git=True):
this_run_str = time.strftime('%y%m%d_%H%M%S')
if hostname:
this_run_str += str(socket.gethostname())
if (git and is_git_dir()):
this_run_str += (separator + str(git_hash_str()))
this_run_str = this_run_str.replace('-', '')
return this_run_str
|
def experiment_string2(configs, fname_args=[], separator=','):
this_run_str = ''
for (org_arg_str, short_arg_str) in fname_args:
short_arg_str = (org_arg_str.title().replace('_', '') if (short_arg_str is None) else short_arg_str)
if (org_arg_str in configs.keys()):
this_run_str += ((separator + short_arg_str) + str(configs[org_arg_str]).title().replace('_', ''))
else:
raise ValueError(('%s in fname_args doesn not exist in configs' % org_arg_str))
this_run_str = this_run_str.replace('/', '_')
return this_run_str
|
def experiment_string(configs, fname_args=[], separator=','):
this_run_str = expr_prefix_str(configs)
for str_arg in fname_args:
if (str_arg in configs.keys()):
this_run_str += (((separator + str_arg.title().replace('_', '')) + '=') + str(configs[str_arg]))
else:
raise ValueError(('%s in fname_args does not exist in configs' % str_arg))
this_run_str = this_run_str.replace('/', '_')
return this_run_str
|
def is_git_dir():
from subprocess import call, STDOUT
if (call(['git', 'branch'], stderr=STDOUT, stdout=open(os.devnull, 'w')) != 0):
return False
else:
return True
|
def git_hash_str(hash_len=7):
import subprocess
hash_str = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
return str(hash_str[:hash_len])
|
def multi_collate_fn(batch, samples_per_gpu=1):
'Puts each data field into a tensor/DataContainer with outer dimension\n batch size. This is mainly used in query_support dataloader. The main\n difference with the :func:`collate_fn` in mmcv is it can process\n list[list[DataContainer]].\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data.\n 2. cpu_only = False, stack = True, e.g., images tensors.\n 3. cpu_only = False, stack = False, e.g., gt bboxes.\n\n Args:\n batch (list[list[:obj:`mmcv.parallel.DataContainer`]] |\n list[:obj:`mmcv.parallel.DataContainer`]): Data of\n single batch.\n samples_per_gpu (int): The number of samples of single GPU.\n '
if (not isinstance(batch, Sequence)):
raise TypeError(f'{batch.dtype} is not supported.')
if isinstance(batch[0], Sequence):
samples_per_gpu = (len(batch[0]) * samples_per_gpu)
batch = sum(batch, [])
if isinstance(batch[0], DataContainer):
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
if (batch[i].pad_dims is not None):
ndim = batch[i].dim()
assert (ndim > batch[i].pad_dims)
max_shape = [0 for _ in range(batch[i].pad_dims)]
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = batch[i].size((- dim))
for sample in batch[i:(i + samples_per_gpu)]:
for dim in range(0, (ndim - batch[i].pad_dims)):
assert (batch[i].size(dim) == sample.size(dim))
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = max(max_shape[(dim - 1)], sample.size((- dim)))
padded_samples = []
for sample in batch[i:(i + samples_per_gpu)]:
pad = [0 for _ in range((batch[i].pad_dims * 2))]
for dim in range(1, (batch[i].pad_dims + 1)):
pad[((2 * dim) - 1)] = (max_shape[(dim - 1)] - sample.size((- dim)))
padded_samples.append(F.pad(sample.data, pad, value=sample.padding_value))
stacked.append(default_collate(padded_samples))
elif (batch[i].pad_dims is None):
stacked.append(default_collate([sample.data for sample in batch[i:(i + samples_per_gpu)]]))
else:
raise ValueError('pad_dims should be either None or integers (1-3)')
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], Mapping):
return {key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]}
else:
return default_collate(batch)
|
def build_point_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, **kwargs):
'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n '
(rank, world_size) = get_dist_info()
if dist:
if shuffle:
sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False, seed=seed)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = (GroupSampler(dataset, samples_per_gpu) if shuffle else None)
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(multi_collate_fn, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs)
return data_loader
|
class PointGenerator(object):
def __init__(self, ann_file):
self.ann_file = ann_file
self.coco = COCO(ann_file)
self.seed = 0
def generate_points(self):
save_json = dict()
save_json['images'] = self.coco.dataset['images']
save_json['annotations'] = []
annotations = self.coco.dataset['annotations']
save_json['categories'] = self.coco.dataset['categories']
id_info = dict()
for img_info in self.coco.dataset['images']:
id_info[img_info['id']] = img_info
prog_bar = mmcv.ProgressBar(len(annotations))
with local_numpy_seed(self.seed):
for ann in annotations:
prog_bar.update()
img_info = id_info[ann['image_id']]
segm = ann.get('segmentation', None)
if isinstance(segm, list):
rles = maskUtils.frPyObjects(segm, img_info['height'], img_info['width'])
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
rle = maskUtils.frPyObjects(segm, img_info['height'], img_info['width'])
else:
rle = segm
mask = maskUtils.decode(rle)
if (mask.sum() > 0):
(ys, xs) = np.nonzero(mask)
point_idx = np.random.randint(len(xs))
x1 = int(xs[point_idx])
y1 = int(ys[point_idx])
ann['point'] = [x1, y1, x1, y1]
else:
(x1, y1, w, h) = ann['bbox']
x1 = np.random.uniform(x1, (x1 + w))
y1 = np.random.uniform(y1, (y1 + h))
ann['point'] = [x1, y1, x1, y1]
save_json['annotations'].append(ann)
mmcv.mkdir_or_exist('./point_ann/')
ann_name = self.ann_file.split('/')[(- 1)]
with open(f'./point_ann/{ann_name}', 'w') as f:
json.dump(save_json, f)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--ceph', action='store_true', help='whether not to evaluate the checkpoint during training')
parser.add_argument('--vis', action='store_true', help='whether not to evaluate the checkpoint during training')
parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.eval_options):
raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
for (k, v) in args.cfg_options.items():
args.cfg_options[k] = eval(v)
cfg.merge_from_dict(args.cfg_options)
if args.vis:
cfg.model.vis = True
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if (samples_per_gpu > 1):
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max([ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if (samples_per_gpu > 1):
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(rank, _) = get_dist_info()
if ((args.work_dir is not None) and (rank == 0)):
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
dataset = build_dataset(cfg.data.test)
data_loader = build_point_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = pointdet_single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = pointdet_multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
outputs = [item[2] for item in outputs]
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((args.work_dir is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file)
|
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(log_level=cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
if ('imgs_per_gpu' in cfg.data):
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. Please use "samples_per_gpu" instead')
if ('samples_per_gpu' in cfg.data):
logger.warning(f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and "samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(f'Automatically set "samples_per_gpu"="imgs_per_gpu"={cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
data_loaders = [build_point_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
optimizer = build_optimizer(model, cfg.optimizer)
if ('runner' not in cfg):
cfg.runner = {'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs}
warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning)
elif ('total_epochs' in cfg):
assert (cfg.total_epochs == cfg.runner.max_epochs)
runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
if validate:
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if (val_samples_per_gpu > 1):
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner')
eval_hook = (PointdetDistEvalHook if distributed else PointdetEvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), f'Each item in custom_hooks expects dict type, but got {type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--port', type=int, default=20001, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file (deprecate), change to --cfg-options instead.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
for (k, v) in args.cfg_options.items():
args.cfg_options[k] = eval(v)
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(_, world_size) = get_dist_info()
cfg.gpu_ids = range(world_size)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
if (args.seed is not None):
logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=(__version__ + get_git_hash()[:7]), CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--device', type=int, default=0, help='CUDA device id')
parser.add_argument('--camera-id', type=int, default=0, help='camera device id')
parser.add_argument('--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
model = init_detector(args.config, args.checkpoint, device=torch.device('cuda', args.device))
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
(ret_val, img) = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ((ch == 27) or (ch == ord('q')) or (ch == ord('Q'))):
break
show_result(img, result, model.CLASSES, score_thr=args.score_thr, wait_time=1)
|
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=(not show), **data)
results.append(result)
if show:
model.module.show_result(data, result)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
|
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n "
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if (rank == 0):
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if (rank == 0):
batch_size = data['img'][0].size(0)
for _ in range((batch_size * world_size)):
prog_bar.update()
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
|
def collect_results_cpu(result_part, size, tmpdir=None):
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
|
def collect_results_gpu(result_part, size):
(rank, world_size) = get_dist_info()
part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
dist.all_gather(part_recv_list, part_send)
if (rank == 0):
part_list = []
for (recv, shape) in zip(part_recv_list, shape_list):
part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
return ordered_results
|
def set_random_seed(seed, deterministic=False):
'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n '
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
def parse_losses(losses):
log_vars = OrderedDict()
for (loss_name, loss_value) in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value))
else:
raise TypeError('{} is not a tensor or list of tensors'.format(loss_name))
loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key)))
log_vars['loss'] = loss
for (loss_name, loss_value) in log_vars.items():
if (dist.is_available() and dist.is_initialized()):
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return (loss, log_vars)
|
def batch_processor(model, data, train_mode):
'Process a data batch.\n\n This method is required as an argument of Runner, which defines how to\n process a data batch and obtain proper outputs. The first 3 arguments of\n batch_processor are fixed.\n\n Args:\n model (nn.Module): A PyTorch model.\n data (dict): The data batch in a dict.\n train_mode (bool): Training mode or not. It may be useless for some\n models.\n\n Returns:\n dict: A dict containing losses and log vars.\n '
losses = model(**data)
(loss, log_vars) = parse_losses(losses)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
|
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
data_loaders = [build_dataloader(ds, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(model, batch_processor, optimizer, cfg.work_dir, logger=logger, meta=meta)
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config)
if distributed:
runner.register_hook(DistSamplerSeedHook())
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.