code stringlengths 17 6.64M |
|---|
class Trainer(object):
'This class implemetns the training and validation functionality for training ML model for medical imaging'
def __init__(self, opts):
super(Trainer, self).__init__()
self.opts = opts
self.best_acc = 0
self.start_epoch = 0
self.max_bsz_cnn_gpu0 = opts.max_bsz_cnn_gpu0
self.resume = (self.opts.checkpoint if ((self.opts.checkpoint is not None) and os.path.isdir(self.opts.checkpoint)) else None)
self.global_setter()
def global_setter(self):
self.setup_device()
self.setup_directories()
self.setup_logger()
self.setup_lr_scheduler()
self.setup_dataloader()
self.setup_model_optimizer_lossfn()
def setup_directories(self):
if (not os.path.isdir(self.opts.savedir)):
os.makedirs(self.opts.savedir)
def setup_device(self):
num_gpus = torch.cuda.device_count()
self.num_gpus = num_gpus
if (num_gpus > 0):
print_log_message('Using {} GPUs'.format(num_gpus))
else:
print_log_message('Using CPU')
self.device = torch.device(('cuda:0' if (num_gpus > 0) else 'cpu'))
self.use_multi_gpu = (True if (num_gpus > 1) else False)
if torch.backends.cudnn.is_available():
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
def setup_logger(self):
try:
from torch.utils.tensorboard import SummaryWriter
except:
from utilities.summary_writer import SummaryWriter
self.logger = SummaryWriter(log_dir=self.opts.savedir, comment='Training and Validation logs')
def setup_lr_scheduler(self):
self.lr_scheduler = get_lr_scheduler(self.opts)
def setup_dataloader(self):
from model.base_feature_extractor import BaseFeatureExtractor
base_feature_extractor = BaseFeatureExtractor(opts=self.opts)
base_feature_extractor = base_feature_extractor.to(device=self.device)
if self.use_multi_gpu:
base_feature_extractor = torch.nn.DataParallel(base_feature_extractor)
self.base_feature_extractor = base_feature_extractor
self.base_feature_extractor.eval()
if self.base_feature_extractor.training:
print_warning_message('Base feature extractor is in training mode. Moving to evaluation mode')
self.base_feature_extractor.eval()
(train_loader, val_loader, diag_classes, class_weights) = get_data_loader(opts=self.opts)
self.train_loader = train_loader
self.val_loader = val_loader
self.diag_classes = diag_classes
self.class_weights = torch.from_numpy(class_weights)
def setup_model_optimizer_lossfn(self):
odim = (self.base_feature_extractor.module.output_feature_sz if self.use_multi_gpu else self.base_feature_extractor.output_feature_sz)
mi_model = build_model(opts=self.opts, diag_classes=self.diag_classes, base_feature_odim=odim)
if (self.resume is not None):
(resume_ep, resume_model_state, resume_optim_state, resume_perf) = load_checkpoint(checkpoint_dir=self.opts.checkpoint, device=self.device)
self.start_epoch = resume_ep
self.best_acc = resume_perf
self.mi_model.load_state_dict(resume_model_state)
self.optimizer.load_state_dict(resume_optim_state)
for state in self.optimizer.state.values():
for (k, v) in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=self.device)
print_log_message('Resuming from checkpoint saved at {}th epoch'.format(self.start_epoch))
mi_model = mi_model.to(device=self.device)
if self.use_multi_gpu:
mi_model = torch.nn.DataParallel(mi_model)
self.mi_model = mi_model
criteria = build_criteria(opts=self.opts, class_weights=self.class_weights.float())
self.criteria = criteria.to(device=self.device)
self.optimizer = build_optimizer(model=self.mi_model, opts=self.opts)
def training(self, epoch, lr, *args, **kwargs):
train_stats = Statistics()
self.mi_model.train()
self.optimizer.zero_grad()
num_samples = len(self.train_loader)
epoch_start_time = time.time()
for (batch_id, batch) in enumerate(self.train_loader):
(words, true_diag_labels) = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(words=words, cnn_model=self.base_feature_extractor, mi_model=self.mi_model, max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0, num_gpus=self.num_gpus, device=self.device)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
if ((((batch_id + 1) % self.opts.accum_count) == 0) or ((batch_id + 1) == len(self.train_loader))):
self.optimizer.step()
self.optimizer.zero_grad()
train_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if (((batch_id % self.opts.log_interval) == 0) and (batch_id > 0)):
train_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
return (train_stats.avg_acc(), train_stats.avg_loss())
def warm_up(self, *args, **kwargs):
self.mi_model.train()
num_samples = len(self.train_loader)
warm_up_iterations = int((math.ceil(((self.opts.warm_up_iterations * 1.0) / num_samples)) * num_samples))
print_info_message('Warming Up')
print_log_message('LR will linearly change from {} to {} in about {} steps'.format(self.opts.warm_up_min_lr, self.opts.lr, warm_up_iterations))
lr_list = np.linspace(1e-07, self.opts.lr, warm_up_iterations)
epoch_start_time = time.time()
iteration = (- 1)
while (iteration < warm_up_iterations):
warm_up_stats = Statistics()
for (batch_id, batch) in enumerate(self.train_loader):
if (iteration >= warm_up_iterations):
break
iteration += 1
try:
lr_iter = lr_list[iteration]
except:
lr_iter = self.opts.lr
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=lr_iter)
(words, true_diag_labels) = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(words=words, cnn_model=self.base_feature_extractor, mi_model=self.mi_model, max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0, num_gpus=self.num_gpus, device=self.device)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
if ((((batch_id + 1) % self.opts.accum_count) == 0) or ((batch_id + 1) == len(self.train_loader))):
self.optimizer.step()
self.optimizer.zero_grad()
warm_up_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if (((batch_id % self.opts.log_interval) == 0) and (batch_id > 0)):
warm_up_stats.output(epoch=(- 1), batch=iteration, n_batches=warm_up_iterations, start=epoch_start_time, lr=lr_iter)
gc.collect()
print_log_message('Warming Up... Done!!!')
def validation(self, epoch, lr, *args, **kwargs):
val_stats = Statistics()
self.mi_model.eval()
num_samples = len(self.val_loader)
with torch.no_grad():
epoch_start_time = time.time()
for (batch_id, batch) in enumerate(self.val_loader):
(words, true_diag_labels) = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(words=words, cnn_model=self.base_feature_extractor, mi_model=self.mi_model, max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0, num_gpus=self.num_gpus, device=self.device)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
val_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if (((batch_id % self.opts.log_interval) == 0) and (batch_id > 0)):
val_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
gc.collect()
avg_acc = val_stats.avg_acc()
avg_loss = val_stats.avg_loss()
print_log_message('* Validation Stats')
print_log_message('* Loss: {:5.2f}, Mean Acc: {:3.2f}'.format(avg_loss, avg_acc))
return (avg_acc, avg_loss)
def run(self, *args, **kwargs):
kwargs['need_attn'] = False
if self.opts.warm_up:
self.warm_up(args=args, kwargs=kwargs)
if (self.resume is not None):
for epoch in range(self.start_epoch):
self.lr_scheduler.step(epoch)
eval_stats_dict = dict()
for epoch in range(self.start_epoch, self.opts.epochs):
epoch_lr = self.lr_scheduler.step(epoch)
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=epoch_lr)
(train_acc, train_loss) = self.training(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
(val_acc, val_loss) = self.validation(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
eval_stats_dict[epoch] = val_acc
gc.collect()
is_best = (val_acc >= self.best_acc)
self.best_acc = max(val_acc, self.best_acc)
model_state = (self.mi_model.module.state_dict() if isinstance(self.mi_model, torch.nn.DataParallel) else self.mi_model.state_dict())
optimizer_state = self.optimizer.state_dict()
save_checkpoint(epoch=epoch, model_state=model_state, optimizer_state=optimizer_state, best_perf=self.best_acc, save_dir=self.opts.savedir, is_best=is_best, keep_best_k_models=self.opts.keep_best_k_models)
self.logger.add_scalar('LR', round(epoch_lr, 6), epoch)
self.logger.add_scalar('TrainingLoss', train_loss, epoch)
self.logger.add_scalar('TrainingAcc', train_acc, epoch)
self.logger.add_scalar('ValidationLoss', val_loss, epoch)
self.logger.add_scalar('ValidationAcc', val_acc, epoch)
eval_stats_dict_sort = {k: v for (k, v) in sorted(eval_stats_dict.items(), key=(lambda item: item[1]), reverse=True)}
eval_stats_fname = '{}/val_stats_bag_{}_word_{}_{}_{}'.format(self.opts.savedir, self.opts.bag_size, self.opts.word_size, self.opts.attn_fn, self.opts.attn_type)
writer = DictWriter(file_name=eval_stats_fname, format='json')
if (not os.path.isfile(eval_stats_fname)):
writer.write(data_dict=eval_stats_dict_sort)
else:
with open(eval_stats_fname, 'r') as json_file:
eval_stats_dict_old = json.load(json_file)
eval_stats_dict_old.update(eval_stats_dict_sort)
eval_stats_dict_updated = {k: v for (k, v) in sorted(eval_stats_dict_old.items(), key=(lambda item: item[1]), reverse=True)}
writer.write(data_dict=eval_stats_dict_updated)
self.logger.close()
|
def build_criteria(opts, class_weights):
'\n Build the criterian function\n :param opts: arguments\n :return: Loss function\n '
criteria = None
if (opts.loss_fn == 'ce'):
if opts.label_smoothing:
from criterions.cross_entropy import CrossEntropyWithLabelSmoothing
criteria = CrossEntropyWithLabelSmoothing(ls_eps=opts.label_smoothing_eps)
print_log_message('Using label smoothing value of : \n\t{}'.format(opts.label_smoothing_eps))
else:
criteria = nn.CrossEntropyLoss(weight=class_weights)
class_wts_str = '\n\t'.join(['{} --> {:.3f}'.format(cl_id, class_weights[cl_id]) for cl_id in range(class_weights.size(0))])
print_log_message('Using class-weights: \n\t{}'.format(class_wts_str))
elif (opts.loss_fn == 'bce'):
criteria = nn.BCEWithLogitsLoss(pos_weight=class_weights)
class_wts_str = '\n\t'.join(['{} --> {:.3f}'.format(cl_id, class_weights[cl_id]) for cl_id in range(class_weights.size(0))])
print_log_message('Using class-weights: \n\t{}'.format(class_wts_str))
else:
print_error_message('{} critiria not yet supported')
if (criteria is None):
print_error_message('Criteria function cannot be None. Please check')
return criteria
|
def get_criteria_opts(parser):
'Loss function details'
group = parser.add_argument_group('Criteria options')
group.add_argument('--loss-fn', default='ce', choices=supported_loss_fns, help='Loss function')
group.add_argument('--label-smoothing', action='store_true', default=False, help='Smooth labels or not')
group.add_argument('--label-smoothing-eps', default=0.1, type=float, help='Epsilon for label smoothing')
return parser
|
def get_data_loader(opts):
'\n Create data loaders\n :param opts: arguments\n :param base_feature_extractor: base feature extractor that transforms RGB words to vectors\n :return: train and validation dataloaders along with number of diagnostic classes\n '
(train_loader, val_loader, diag_classes) = (None, None, 0)
if (opts.dataset == 'bbwsi'):
from data_loader.bbwsi_dataset import BBWSIDataset
train_dataset = BBWSIDataset(img_dir=opts.img_dir, split_file=opts.train_file, img_extn=opts.img_extn, delimeter=',')
val_dataset = BBWSIDataset(img_dir=opts.img_dir, split_file=opts.val_file, img_extn=opts.img_extn, delimeter=',')
diag_classes = train_dataset.n_classes
bag_word_size = (opts.bag_size, opts.word_size)
diag_labels = train_dataset.diag_labels
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opts.batch_size, shuffle=True, pin_memory=False, num_workers=opts.data_workers, collate_fn=(lambda batch: gen_collate_fn(batch=batch, bag_word_size=bag_word_size, is_training=True, scale_factor=opts.scale_factor, scale_multipliers=opts.scale_multipliers)))
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=opts.batch_size, shuffle=False, pin_memory=False, num_workers=opts.data_workers, collate_fn=(lambda batch: gen_collate_fn(batch=batch, bag_word_size=bag_word_size, is_training=False)))
else:
print_error_message('{} dataset not supported yet'.format(opts.dataset))
if opts.class_weights:
class_weights = np.histogram(diag_labels, bins=diag_classes)[0]
class_weights = (np.array(class_weights) / sum(class_weights))
for i in range(diag_classes):
class_weights[i] = round(np.log((1 / class_weights[i])), 5)
else:
class_weights = np.ones(diag_classes, dtype=np.float)
print_log_message('Bag size: {}, word size: {}'.format(opts.bag_size, opts.word_size))
return (train_loader, val_loader, diag_classes, class_weights)
|
def get_test_data_loader(opts):
'\n Creates a data loader for test images\n :param opts: Arguments\n :param base_feature_extractor: base feature extractor that transforms RGB words to vectors\n :return: test dataloader along with number of diagnostic classes\n '
test_loader = None
diag_classes = 0
class_names = None
if (opts.dataset == 'bbwsi'):
from data_loader.bbwsi_dataset import BBWSIDataset
test_dataset = BBWSIDataset(img_dir=opts.img_dir, split_file=opts.test_file, img_extn=opts.img_extn, delimeter=',')
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=opts.batch_size, shuffle=False, pin_memory=False, num_workers=opts.data_workers)
diag_classes = 4
class_names = test_dataset.class_names
else:
print_error_message('{} dataset not supported yet'.format(opts.dataset))
return (test_loader, diag_classes, class_names)
|
def get_dataset_opts(parser):
'\n Medical imaging Dataset details\n '
group = parser.add_argument_group('Dataset general details')
group.add_argument('--img-dir', type=str, default='./data', required=True, help='Dataset location')
group.add_argument('--img-extn', type=str, default='tiff', help='Extension of WSIs. Default is tiff')
group.add_argument('--dataset', type=str, default='bbwsi', choices=supported_datasets, help='Dataset name')
group.add_argument('--train-file', type=str, default='vision_datasets/breast_biopsy_wsi/train.txt', help='Text file with training image ids and labels')
group.add_argument('--val-file', type=str, default='vision_datasets/breast_biopsy_wsi/val.txt', help='Text file with validation image ids and labels')
group.add_argument('--test-file', type=str, default='vision_datasets/breast_biopsy_wsi/test.txt', help='Text file with testing image ids and labels')
group = parser.add_argument_group('Input details')
group.add_argument('--bag-size', type=int, default=1024, help='Bag size. We use square bags')
group.add_argument('--word-size', type=int, default=256, help='Word size. We use square bags')
group.add_argument('--scale-factor', type=int, default=32, help='Factor by which word size will be increased or decrease. Default is 32 because ImageNet models down-sample the input image by 32')
group.add_argument('--scale-multipliers', type=int, default=[(- 2), (- 1), 0, 1, 2], nargs='+', help='Factor by which word size will be increased or decrease')
group = parser.add_argument_group('Batching details')
group.add_argument('--batch-size', type=int, default=1, help='Batch size')
group.add_argument('--data-workers', type=int, default=1, help='Number of workers for data loading')
group = parser.add_argument_group('Class-wise weights for loss fn')
group.add_argument('--class-weights', action='store_true', default=False, help='Compute normalized to address class-imbalance')
return parser
|
def build_model(opts, diag_classes, base_feature_odim):
'\n This function is to load the Medical Imaging Model\n\n :param opts: Arguments\n :param diag_classes: Number of diagnostic classes\n :param base_feature_odim: Output dimension of base feature extractor such as CNN\n :return:\n '
mi_model = None
if (opts.dataset in supported_datasets):
from model.mi_model_e2e import MIModel
assert ((opts.bag_size % opts.word_size) == 0), 'Bag size should be divisible by word size'
num_bags_words = (opts.bag_size // opts.word_size)
mi_model = MIModel(n_classes=diag_classes, cnn_feature_sz=base_feature_odim, out_features=opts.out_features, num_bags_words=num_bags_words, num_heads=opts.attn_heads, dropout=opts.dropout, attn_type=opts.attn_type, attn_dropout=opts.attn_p, attn_fn=opts.attn_fn)
else:
print_error_message('Model for this dataset ({}) not yet supported'.format('self.opts.dataset'))
if (mi_model is None):
print_error_message('Model cannot be None. Please check')
return mi_model
|
def get_model_opts(parser):
'Model details'
group = parser.add_argument_group('Medical Imaging Model Details')
group.add_argument('--out-features', type=int, default=128, help='Number of output features after merging bags and words')
group.add_argument('--checkpoint', type=str, default='', help='Checkpoint directory. If argument files existin this directory, then arguments will be automaticallyloaded from that file')
group.add_argument('--attn-heads', default=2, type=int, help='Number of attention heads')
group.add_argument('--dropout', default=0.4, type=float, help='Dropout value')
group.add_argument('--weights-test', default='', type=str, help='Weights file')
group.add_argument('--max-bsz-cnn-gpu0', type=int, default=100, help='Max. batch size on GPU0')
group.add_argument('--attn-type', type=str, default='l2', choices=['avg', 'l1', 'l2'], help='How to compute attention scores')
group.add_argument('--attn-p', type=float, default=0.2, help='Proability to drop bag and word attention weights')
group.add_argument('--attn-fn', type=str, default='softmax', choices=['tanh', 'sigmoid', 'softmax'], help='Proability to drop bag and word attention weights')
group.add_argument('--keep-best-k-models', default=(- 1), type=int, help='Number of best checkpoints to be saved')
return parser
|
def build_optimizer(opts, model):
'\n Creates the optimizer\n :param opts: Arguments\n :param model: Medical imaging model.\n :return: Optimizer\n '
optimizer = None
params = [p for p in model.parameters() if p.requires_grad]
if (opts.optim == 'sgd'):
print_info_message('Using SGD optimizer')
optimizer = optim.SGD(params, lr=opts.lr, weight_decay=opts.weight_decay)
elif (opts.optim == 'adam'):
print_info_message('Using ADAM optimizer')
beta1 = (0.9 if (opts.adam_beta1 is None) else opts.adam_beta1)
beta2 = (0.999 if (opts.adam_beta2 is None) else opts.adam_beta2)
optimizer = optim.Adam(params, lr=opts.lr, betas=(beta1, beta2), weight_decay=opts.weight_decay, eps=1e-09)
else:
print_error_message('{} optimizer not yet supported'.format(opts.optim))
if (optimizer is None):
print_error_message('Optimizer cannot be None. Please check')
return optimizer
|
def update_optimizer(optimizer, lr_value):
'\n Update the Learning rate in optimizer\n :param optimizer: Optimizer\n :param lr_value: Learning rate value to be used\n :return: Updated Optimizer\n '
optimizer.param_groups[0]['lr'] = lr_value
return optimizer
|
def read_lr_from_optimzier(optimizer):
'\n Utility to read the current LR value of an optimizer\n :param optimizer: Optimizer\n :return: learning rate\n '
return optimizer.param_groups[0]['lr']
|
def get_optimizer_opts(parser):
'Loss function details'
group = parser.add_argument_group('Optimizer options')
group.add_argument('--optim', default='sgd', type=str, choices=supported_optimziers, help='Optimizer')
group.add_argument('--adam-beta1', default=0.9, type=float, help='Beta1 for ADAM')
group.add_argument('--adam-beta2', default=0.999, type=float, help='Beta2 for ADAM')
group.add_argument('--lr', default=0.0005, type=float, help='Initial learning rate for the optimizer')
group.add_argument('--weight-decay', default=4e-06, type=float, help='Weight decay')
group = parser.add_argument_group('Optimizer accumulation options')
group.add_argument('--accum-count', type=int, default=1, help='After how many iterations shall we update the weights')
return parser
|
class ColorEncoder(object):
def __init__(self):
super(ColorEncoder, self).__init__()
def get_colors(self, dataset_name):
if (dataset_name == 'bbwsi'):
class_colors = [((228 / 255.0), (26 / 255.0), (28 / 255.0)), ((55 / 255.0), (126 / 255.0), (184 / 255.0)), ((77 / 255.0), (175 / 255.0), (74 / 255.0)), ((152 / 255.0), (78 / 255.0), (163 / 255.0))]
class_linestyle = ['solid', 'solid', 'solid', 'solid']
return (class_colors, class_linestyle)
else:
raise NotImplementedError
|
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
|
class CyclicLR(object):
'\n CLass that defines cyclic learning rate with warm restarts that decays the learning rate linearly till the end of cycle and then restarts\n at the maximum value.\n See https://arxiv.org/abs/1811.11431 for more details\n '
def __init__(self, min_lr=0.1, cycle_len=5, steps=[51, 101, 131, 161, 191, 221, 251, 281], gamma=0.5, step=True):
super(CyclicLR, self).__init__()
assert (len(steps) > 0), 'Please specify step intervals.'
assert (0 < gamma <= 1), 'Learing rate decay factor should be between 0 and 1'
self.min_lr = min_lr
self.m = cycle_len
self.steps = steps
self.warm_up_interval = 1
self.counter = 0
self.decayFactor = gamma
self.count_cycles = 0
self.step_counter = 0
self.stepping = step
def step(self, epoch):
if (((epoch % self.steps[self.step_counter]) == 0) and (epoch > 1) and self.stepping):
self.min_lr = (self.min_lr * self.decayFactor)
self.count_cycles = 0
if (self.step_counter < (len(self.steps) - 1)):
self.step_counter += 1
else:
self.stepping = False
current_lr = self.min_lr
if (self.count_cycles < self.warm_up_interval):
self.count_cycles += 1
if (self.count_cycles == self.warm_up_interval):
self.warm_up_interval = 0
else:
if (self.counter >= self.m):
self.counter = 0
current_lr = round(((self.min_lr * self.m) - (self.counter * self.min_lr)), 5)
self.counter += 1
self.count_cycles += 1
return current_lr
def __repr__(self):
fmt_str = (('Scheduler ' + self.__class__.__name__) + '\n')
fmt_str += ' Min. base LR: {}\n'.format(self.min_lr)
fmt_str += ' Max. base LR: {}\n'.format((self.min_lr * self.m))
fmt_str += ' Step interval: {}\n'.format(self.steps)
fmt_str += ' Decay lr at each step by {}\n'.format(self.decayFactor)
return fmt_str
|
class MultiStepLR(object):
'\n Fixed LR scheduler with steps\n '
def __init__(self, base_lr=0.1, steps=[30, 60, 90], gamma=0.1, step=True):
super(MultiStepLR, self).__init__()
assert (len(steps) >= 1), 'Please specify step intervals.'
self.base_lr = base_lr
self.steps = steps
self.decayFactor = gamma
self.stepping = step
print('Using Fixed LR Scheduler')
def step(self, epoch):
return round((self.base_lr * (self.decayFactor ** bisect.bisect(self.steps, epoch))), 5)
def __repr__(self):
fmt_str = (('Scheduler ' + self.__class__.__name__) + '\n')
fmt_str += ' Base LR: {}\n'.format(self.base_lr)
fmt_str += ' Step interval: {}\n'.format(self.steps)
fmt_str += ' Decay lr at each step by {}\n'.format(self.decayFactor)
return fmt_str
|
class PolyLR(object):
'\n Polynomial LR scheduler with steps\n '
def __init__(self, base_lr, max_epochs, power=0.99):
super(PolyLR, self).__init__()
assert (0 < power < 1)
self.base_lr = base_lr
self.power = power
self.max_epochs = max_epochs
def step(self, epoch):
curr_lr = (self.base_lr * ((1 - (float(epoch) / self.max_epochs)) ** self.power))
return round(curr_lr, 6)
def __repr__(self):
fmt_str = (('Scheduler ' + self.__class__.__name__) + '\n')
fmt_str += ' Total Epochs: {}\n'.format(self.max_epochs)
fmt_str += ' Base LR: {}\n'.format(self.base_lr)
fmt_str += ' Power: {}\n'.format(self.power)
return fmt_str
|
class LinearLR(object):
def __init__(self, base_lr, max_epochs):
super(LinearLR, self).__init__()
self.base_lr = base_lr
self.max_epochs = max_epochs
def step(self, epoch):
curr_lr = (self.base_lr - (self.base_lr * (epoch / self.max_epochs)))
return round(curr_lr, 6)
def __repr__(self):
fmt_str = (('Scheduler ' + self.__class__.__name__) + '\n')
fmt_str += ' Total Epochs: {}\n'.format(self.max_epochs)
fmt_str += ' Base LR: {}\n'.format(self.base_lr)
return fmt_str
|
class HybirdLR(object):
def __init__(self, base_lr, clr_max, max_epochs, cycle_len=5):
super(HybirdLR, self).__init__()
self.linear_epochs = ((max_epochs - clr_max) + 1)
steps = [clr_max]
self.clr = CyclicLR(min_lr=base_lr, cycle_len=cycle_len, steps=steps, gamma=1)
self.decay_lr = LinearLR(base_lr=base_lr, max_epochs=self.linear_epochs)
self.cyclic_epochs = clr_max
self.base_lr = base_lr
self.max_epochs = max_epochs
self.clr_max = clr_max
self.cycle_len = cycle_len
def step(self, epoch):
if (epoch < self.cyclic_epochs):
curr_lr = self.clr.step(epoch)
else:
curr_lr = self.decay_lr.step(((epoch - self.cyclic_epochs) + 1))
return round(curr_lr, 6)
def __repr__(self):
fmt_str = (('Scheduler ' + self.__class__.__name__) + '\n')
fmt_str += ' Total Epochs: {}\n'.format(self.max_epochs)
fmt_str += ' Cycle with length of {}: {}\n'.format(self.cycle_len, int((self.clr_max / self.cycle_len)))
fmt_str += ' Base LR with {} cycle length: {}\n'.format(self.cycle_len, self.base_lr)
fmt_str += ' Cycle with length of {}: {}\n'.format(self.linear_epochs, 1)
fmt_str += ' Base LR with {} cycle length: {}\n'.format(self.linear_epochs, self.base_lr)
return fmt_str
|
class CosineLR(object):
def __init__(self, base_lr, max_epochs):
super(CosineLR, self).__init__()
self.base_lr = base_lr
self.max_epochs = max_epochs
def step(self, epoch):
return round(((self.base_lr * (1 + math.cos(((math.pi * epoch) / self.max_epochs)))) / 2), 6)
def __repr__(self):
fmt_str = (('Scheduler ' + self.__class__.__name__) + '\n')
fmt_str += ' Total Epochs: {}\n'.format(self.max_epochs)
fmt_str += ' Base LR : {}\n'.format(self.base_lr)
return fmt_str
|
class FixedLR(object):
def __init__(self, base_lr):
self.base_lr = base_lr
def step(self, epoch):
return self.base_lr
def __repr__(self):
fmt_str = (('Scheduler ' + self.__class__.__name__) + '\n')
fmt_str += ' Base LR : {}\n'.format(self.base_lr)
return fmt_str
|
def get_lr_scheduler(opts):
if (opts.scheduler == 'multistep'):
step_size = (opts.step_size if isinstance(opts.step_size, list) else [opts.step_size])
if (len(step_size) == 1):
step_size = step_size[0]
step_sizes = [(step_size * i) for i in range(1, int(math.ceil((opts.epochs / step_size))))]
else:
step_sizes = step_size
lr_scheduler = MultiStepLR(base_lr=opts.lr, steps=step_sizes, gamma=opts.lr_decay)
elif (opts.scheduler == 'fixed'):
lr_scheduler = FixedLR(base_lr=opts.lr)
elif (opts.scheduler == 'clr'):
step_size = (opts.step_size if isinstance(opts.step_size, list) else [opts.step_size])
if (len(step_size) == 1):
step_size = step_size[0]
step_sizes = [(step_size * i) for i in range(1, int(math.ceil((opts.epochs / step_size))))]
else:
step_sizes = step_size
lr_scheduler = CyclicLR(min_lr=opts.lr, cycle_len=opts.cycle_len, steps=step_sizes, gamma=opts.lr_decay)
elif (opts.scheduler == 'poly'):
lr_scheduler = PolyLR(base_lr=opts.lr, max_epochs=opts.epochs, power=opts.power)
elif (opts.scheduler == 'hybrid'):
lr_scheduler = HybirdLR(base_lr=opts.lr, max_epochs=opts.epochs, clr_max=opts.clr_max, cycle_len=opts.cycle_len)
elif (opts.scheduler == 'linear'):
lr_scheduler = LinearLR(base_lr=opts.lr, max_epochs=opts.epochs)
else:
print_error_message('{} scheduler Not supported'.format(opts.scheduler))
print_info_message(lr_scheduler)
return lr_scheduler
|
def get_scheduler_opts(parser):
' Scheduler Details'
group = parser.add_argument_group('Learning rate scheduler')
group.add_argument('--scheduler', default='hybrid', choices=supported_schedulers, help='Learning rate scheduler (e.g. fixed, clr, poly)')
group.add_argument('--step-size', default=[51], type=int, nargs='+', help='Step sizes')
group.add_argument('--lr-decay', default=0.5, type=float, help='factor by which lr should be decreased')
group = parser.add_argument_group('CLR relating settings')
group.add_argument('--cycle-len', default=5, type=int, help='Cycle length')
group.add_argument('--clr-max', default=61, type=int, help='Max number of epochs for cylic LR before changing last cycle to linear')
group = parser.add_argument_group('Poly LR related settings')
group.add_argument('--power', default=0.9, type=float, help='power factor for Polynomial LR')
group = parser.add_argument_group('Warm-up settings')
group.add_argument('--warm-up', action='store_true', default=False, help='Warm-up')
group.add_argument('--warm-up-min-lr', default=1e-07, help='Warm-up minimum lr')
group.add_argument('--warm-up-iterations', default=2000, type=int, help='Number of warm-up iterations')
return parser
|
def get_curr_time_stamp():
return time.strftime('%Y-%m-%d %H:%M:%S')
|
def print_error_message(message):
time_stamp = get_curr_time_stamp()
error_str = (((text_colors['error'] + text_colors['bold']) + 'ERROR ') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, error_str, message))
print('{} - {} - {}'.format(time_stamp, error_str, 'Exiting!!!'))
exit((- 1))
|
def print_log_message(message):
time_stamp = get_curr_time_stamp()
log_str = (((text_colors['logs'] + text_colors['bold']) + 'LOGS ') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, log_str, message))
|
def print_warning_message(message):
time_stamp = get_curr_time_stamp()
warn_str = (((text_colors['warning'] + text_colors['bold']) + 'WARNING') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, warn_str, message))
|
def print_info_message(message):
time_stamp = get_curr_time_stamp()
info_str = (((text_colors['info'] + text_colors['bold']) + 'INFO ') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, info_str, message))
|
class DictWriter(object):
def __init__(self, file_name, format='csv'):
super(DictWriter, self).__init__()
assert (format in ['csv', 'json', 'txt'])
self.file_name = '{}.{}'.format(file_name, format)
self.format = format
def write(self, data_dict: dict):
if (self.format == 'csv'):
import csv
with open(self.file_name, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
for (key, value) in data_dict.items():
writer.writerow([key, value])
elif (self.format == 'json'):
import json
with open(self.file_name, 'w') as fp:
json.dump(data_dict, fp, indent=4, sort_keys=True)
else:
with open(self.file_name, 'w') as txt_file:
for (key, value) in data_dict.items():
line = '{} : {}\n'.format(key, value)
txt_file.write(line)
|
class SummaryWriter(object):
def __init__(self, log_dir, format='csv', *args, **kwargs):
super(SummaryWriter, self).__init__()
self.summary_dict = dict()
if (not os.path.isdir(log_dir)):
os.makedirs(log_dir)
self.log_dir = log_dir
self.file_name = '{}/logs'.format(log_dir)
self.dict_writer = DictWriter(file_name=self.file_name, format=format)
self.step = 20
def add_scalar(self, tag, value, step=None, *args, **kwargs):
if (tag not in self.summary_dict):
self.summary_dict[tag] = [(value, step)]
else:
self.summary_dict[tag].append((value, step))
def close(self, *args, **kwargs):
self.dict_writer.write(self.summary_dict)
try:
from matplotlib import pyplot as plt
for (k, v) in self.summary_dict.items():
y_axis = []
x_axis = []
for (val, step) in v:
y_axis.append(val)
if (step is not None):
x_axis.append(step)
plt.title(k)
plt.plot(y_axis)
if (len(x_axis) != 0):
assert (len(y_axis) == len(x_axis))
x_axis = x_axis[0::self.step]
plt.xticks(x_axis, rotation=90)
f_name = '{}/{}.png'.format(self.log_dir, k)
plt.savefig(f_name, dpi=300, bbox_inches='tight')
plt.clf()
except:
print_warning_message('Matplotlib is not installed so unable to draw plots')
|
def save_checkpoint(epoch, model_state, optimizer_state, best_perf, save_dir, is_best, keep_best_k_models=(- 1)):
best_perf = round(best_perf, 3)
checkpoint = {'epoch': epoch, 'state_dict': model_state, 'optim_dict': optimizer_state, 'best_perf': best_perf}
ckpt_fname = '{}/checkpoint_last.pth'.format(save_dir)
torch.save(checkpoint, ckpt_fname)
ep_ckpt_fname = '{}/model_{}.pth'.format(save_dir, epoch)
torch.save(checkpoint['state_dict'], ep_ckpt_fname)
if (keep_best_k_models > 0):
checkpoint_files = glob.glob('{}/model_best_*')
n_best_chkpts = len(checkpoint_files)
if (n_best_chkpts >= keep_best_k_models):
perf_tie = dict()
for f_name in checkpoint_files:
perf = float(f_name.split('/')[(- 1)].split('_')[(- 1)].split('.pth')[0])
if (perf not in perf_tie):
perf_tie[perf] = [f_name]
else:
perf_tie[perf].append(f_name)
min_perf_k_checks = min(list(perf_tie.keys()))
if (best_perf >= min_perf_k_checks):
best_ckpt_fname = '{}/model_best_{}_{}.pth'.format(save_dir, epoch, best_perf)
torch.save(checkpoint['state_dict'], best_ckpt_fname)
min_check_loc = perf_tie[min_acc][0]
if os.path.isfile(min_check_loc):
os.remove(min_check_loc)
else:
best_ckpt_fname = '{}/model_best_{}_{}.pth'.format(save_dir, epoch, best_perf)
torch.save(checkpoint['state_dict'], best_ckpt_fname)
if is_best:
best_model_fname = '{}/model_best.pth'.format(save_dir)
torch.save(model_state, best_model_fname)
print_info_message('Checkpoint saved at: {}'.format(ep_ckpt_fname))
|
def load_checkpoint(checkpoint_dir, device='cpu'):
ckpt_fname = '{}/checkpoint_last.pth'.format(checkpoint_dir)
checkpoint = torch.load(ckpt_fname, map_location=device)
epoch = checkpoint['epoch']
model_state = checkpoint['state_dict']
optim_state = checkpoint['optim_dict']
best_perf = checkpoint['best_perf']
return (epoch, model_state, optim_state, best_perf)
|
def save_arguments(args, save_loc, json_file_name='arguments.json'):
argparse_dict = vars(args)
arg_fname = '{}/{}'.format(save_loc, json_file_name)
writer = DictWriter(file_name=arg_fname, format='json')
writer.write(argparse_dict)
print_log_message('Arguments are dumped here: {}'.format(arg_fname))
|
def load_arguments(parser, dumped_arg_loc, json_file_name='arguments.json'):
arg_fname = '{}/{}'.format(dumped_arg_loc, json_file_name)
parser = argparse.ArgumentParser(parents=[parser], add_help=False)
with open(arg_fname, 'r') as fp:
json_dict = json.load(fp)
parser.set_defaults(**json_dict)
updated_args = parser.parse_args()
return updated_args
|
def load_arguments_file(parser, arg_fname):
parser = argparse.ArgumentParser(parents=[parser], add_help=False)
with open(arg_fname, 'r') as fp:
json_dict = json.load(fp)
parser.set_defaults(**json_dict)
updated_args = parser.parse_args()
return updated_args
|
def shuffle_samples(X, y):
zipped = list(zip(X, y))
np.random.shuffle(zipped)
(X_result, y_result) = zip(*zipped)
return (np.asarray(X_result), np.asarray(y_result))
|
def prepare_dataset(K):
(n_clusters, N, L, dt) = (4, 150, 100, 0.1)
t = np.arange(0, (L * dt), dt)[:L]
(seq_list, label_list) = ([], [])
for i in range(n_clusters):
n_sinusoids = np.random.random_integers(1, 4)
sample_parameters = [[np.random.normal(loc=1, scale=2, size=K), np.random.normal(loc=10, scale=5, size=K)] for _ in range(n_sinusoids)]
for j in range(N):
seq = np.vstack([(np.sum([(coef[k] * np.sin((((2 * np.pi) * freq[k]) * t))) for (coef, freq) in sample_parameters], axis=0) + np.random.randn(L)) for k in range(K)]).reshape(L, K)
seq_list.append(seq)
label_list.append(i)
return (seq_list, label_list)
|
class Dataset(object):
'docstring for Dataset'
def __init__(self, dataset):
self.K = 3
if (dataset == 'synthetic'):
(seq_list, label_list) = prepare_dataset(self.K)
else:
assert False, 'does not exists dataset: {}.'.format(dataset)
self.L = seq_list[0].shape[0]
(self.seq_list, self.label_list) = shuffle_samples(seq_list, label_list)
n_training = int((len(self.seq_list) * 0.8))
(self.train_seq, self.train_label) = (np.array(self.seq_list[:n_training]), self.label_list[:n_training])
(self.test_seq, self.test_label) = (np.array(self.seq_list[n_training:]), self.label_list[n_training:])
print('dataset size: train={}, test={}'.format(len(self.train_seq), len(self.test_seq)))
def gen_next_batch(self, batch_size, is_train_set=True, epoch=None, iteration=None):
if (is_train_set == True):
x = self.train_seq
y = self.train_label
else:
x = self.test_seq
y = self.test_label
assert (len(x) >= batch_size), 'batch size must be smaller than data size: {}.'.format(len(x))
if (epoch != None):
until = math.ceil((float((epoch * len(x))) / float(batch_size)))
elif (iteration != None):
until = iteration
else:
assert False, 'epoch or iteration must be set.'
iter_ = 0
index_list = [i for i in range(len(x))]
while (iter_ <= until):
idxs = random.sample(index_list, batch_size)
iter_ += 1
(yield (x[idxs], y[idxs], idxs))
|
def print_shape(name, tensor):
print('shape of {} is {}'.format(name, tensor.shape))
|
class AutoEncoder(object):
'docstring for AutoEncoder'
def __init__(self, args):
self.__dict__ = args.copy()
self.input_ = tf.placeholder(tf.float32, shape=[None, self.L, self.K])
self.input_batch_size = tf.placeholder(tf.int32, shape=[])
self.layers = []
with tf.name_scope('encoder'):
self.encoder = self._encoder_network()
with tf.name_scope('decoder'):
self.decoder = self._decoder_network()
with tf.name_scope('ae-train'):
self.loss = tf.losses.mean_squared_error(self.input_, self.decoder)
learning_rate = tf.train.exponential_decay(learning_rate=0.1, global_step=tf.train.get_or_create_global_step(), decay_steps=20000, decay_rate=0.1, staircase=True)
self.optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(self.loss)
def _encoder_network(self):
print_shape('input', self.input_)
used = tf.sign(tf.reduce_max(tf.abs(self.input_), reduction_indices=2))
self.length = tf.cast(tf.reduce_sum(used, reduction_indices=1), tf.int32)
if (self.K == 1):
conv_out = layers.convolution1d(inputs=self.input_, num_outputs=self.n_filters_CNN, kernel_size=self.kernel_size, activation_fn=tf.nn.leaky_relu)
print_shape('conv out', conv_out)
max_pooled = tf.layers.max_pooling1d(inputs=conv_out, pool_size=self.kernel_size, strides=self.P)
print_shape('max pooled', max_pooled)
else:
W_conv_enc = tf.get_variable('W_conv_enc', shape=[self.kernel_size, self.K, 1, self.n_filters_CNN])
conv_out = tf.nn.conv2d(input=tf.expand_dims(self.input_, axis=3), filter=W_conv_enc, strides=[1, self.P, self.P, 1], padding='SAME')
print_shape('conv out', conv_out)
max_pooled = tf.reshape(conv_out, shape=[self.input_batch_size, conv_out.shape[1], conv_out.shape[3]])
print_shape('max pooled', max_pooled)
cell_fw_list = [rnn.LSTMCell(n_filters_RNN) for n_filters_RNN in self.n_filters_RNN_list]
cell_bw_list = [rnn.LSTMCell(n_filters_RNN) for n_filters_RNN in self.n_filters_RNN_list]
(encoder, encoder_state_fw, encoder_state_bw) = rnn.stack_bidirectional_dynamic_rnn(cells_fw=cell_fw_list, cells_bw=cell_bw_list, inputs=max_pooled, dtype=tf.float32, time_major=False, scope=None)
print_shape('encoder', encoder)
return encoder
def _decoder_network(self):
if (self.K == 1):
encoder_tmp = tf.expand_dims(self.encoder, axis=3)
upsampled_tmp = tf.image.resize_images(encoder_tmp, size=[((self.L + self.kernel_size) - 1), 1])
upsampled = tf.reshape(upsampled_tmp, shape=[(- 1), upsampled_tmp.shape[1], upsampled_tmp.shape[2]])
print_shape('upsampled', upsampled)
decoder = layers.convolution1d(inputs=upsampled, num_outputs=self.K, kernel_size=self.kernel_size, activation_fn=None)
else:
encoder_tmp = tf.expand_dims(self.encoder, axis=2)
print_shape('encoder tmp', encoder_tmp)
W_conv_dec = tf.get_variable('W_conv_dec', shape=[self.kernel_size, self.K, 1, encoder_tmp.shape[3]])
decoder_tmp = tf.nn.conv2d_transpose(value=encoder_tmp, filter=W_conv_dec, output_shape=[self.input_batch_size, self.L, self.K, 1], strides=[1, self.P, self.P, 1], padding='SAME')
decoder = tf.reshape(decoder_tmp, shape=[self.input_batch_size, self.L, self.K])
print_shape('decoder', decoder)
return decoder
|
class DeepTemporalClustering(object):
'docstring for DeepTemporalClustering'
def __init__(self, params):
self.__dict__ = params.copy()
self.kmeans = KMeans(n_clusters=self.n_clusters, n_init=20)
self.auto_encoder = AutoEncoder(self.__dict__)
self.z = self.auto_encoder.encoder
self.y = self.auto_encoder.decoder
z_shape = self.z.shape
self.mu = tf.Variable(tf.zeros(shape=[self.n_clusters, z_shape[1], z_shape[2]]), name='mu')
with tf.name_scope('distribution'):
self.q = self._soft_assignment(self.z, self.mu)
self.p = tf.placeholder(tf.float32, shape=(None, self.n_clusters))
self.pred = tf.argmax(self.q, axis=1)
with tf.name_scope('dtc-train'):
self.loss_kl = self._kl_divergence(self.p, self.q)
self.loss = (self.loss_kl + (0.01 * self.auto_encoder.loss))
self.optimizer = tf.train.AdamOptimizer(0.001).minimize(self.loss)
def _soft_assignment(self, embeddings, cluster_centers):
'Implemented a soft assignment as the probability of assigning sample i to cluster j.\n\n\t\tArgs:\n\t\t\t- embeddings: (N, L_tmp, dim)\n\t\t\t- cluster_centers: (n_clusters, L_tmp, dim)\n\n\t\tReturn:\n\t\t\t- q_ij: (N, n_clusters)\n\t\t'
def _pairwise_euclidean_distance(a, b):
return tf.norm((tf.expand_dims(a, axis=1) - b), 'euclidean', axis=(2, 3))
dist = _pairwise_euclidean_distance(embeddings, cluster_centers)
q = (1.0 / ((1.0 + ((dist ** 2) / self.alpha)) ** ((self.alpha + 1.0) / 2.0)))
q = (q / tf.reduce_sum(q, axis=1, keepdims=True))
return q
def target_distribution(self, q):
p = ((q ** 2) / q.sum(axis=0))
p = (p / p.sum(axis=1, keepdims=True))
return p
def _kl_divergence(self, target, pred):
return tf.reduce_mean(tf.reduce_sum((target * tf.log((target / pred))), axis=1))
def get_assign_cluster_centers_op(self, features):
print('Start training KMeans')
kmeans = self.kmeans.fit(features.reshape(len(features), (- 1)))
print('Finish training KMeans')
return tf.assign(self.mu, kmeans.cluster_centers_.reshape(self.n_clusters, features.shape[1], features.shape[2]))
|
class InferenceLearnedModel():
'docstring for InferenceLearnedModel'
def __init__(self, args):
self.__dict__ = args.copy()
self.data = Dataset(self.dataset)
model = DeepTemporalClustering(params={'n_clusters': 4, 'L': self.data.L, 'K': self.data.K, 'n_filters_CNN': 100, 'kernel_size': 10, 'P': 10, 'n_filters_RNN_list': [50, 50], 'alpha': 1.0})
ae_ckpt_path = os.path.join('ae_ckpt', 'model.ckpt')
saver = tf.train.Saver(var_list=tf.trainable_variables())
with tf.Session() as sess:
saver.restore(sess, ae_ckpt_path)
init_decoded_list = sess.run(model.auto_encoder.decoder, feed_dict={model.auto_encoder.input_: self.data.seq_list, model.auto_encoder.input_batch_size: len(self.data.seq_list)})
dc_ckpt_path = os.path.join('dtc_ckpt', 'model.ckpt')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, dc_ckpt_path)
(decoded_list, self.cluster_list, self.z_list) = sess.run([model.auto_encoder.decoder, model.pred, model.z], feed_dict={model.auto_encoder.input_: self.data.seq_list, model.auto_encoder.input_batch_size: len(self.data.seq_list)})
self.decoded_list_list = [init_decoded_list, decoded_list]
def plot_decoded_sequences(self, stop_idx=None):
savedir = os.path.join('.', '_fig')
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.mkdir(savedir)
print('writing ...')
N = (len(self.data.seq_list) if (stop_idx == None) else stop_idx)
p = ProgressBar(maxval=N).start()
for idx in range(N):
_dir = 'dat{}'.format(idx)
os.mkdir(os.path.join(savedir, _dir))
(fig, ax) = plt.subplots(nrows=(1 + len(self.decoded_list_list)), ncols=1, figsize=(20, 15), sharex=True, sharey=True)
fig.suptitle('[{}] upper: original sequence, lower: decoded sequence'.format(_dir), fontsize=10)
plt.subplots_adjust(hspace=0.2)
X = self.data.seq_list[idx]
ax[0].plot(X)
for (i, decoded_list) in enumerate(self.decoded_list_list):
decoded = decoded_list[idx]
ax[(i + 1)].plot(decoded)
plt.savefig(os.path.join(savedir, _dir, 'result.png'))
plt.close()
with open(os.path.join(savedir, _dir, 'cluster.txt'), 'w') as fo:
fo.write('{}'.format(self.cluster_list[idx]))
if (idx == (N - 1)):
break
p.update((idx + 1))
p.finish()
|
def main():
args = generate_args()
ilm = InferenceLearnedModel(args)
ilm.plot_decoded_sequences(stop_idx=10)
params = {'dimensions': 2, 'perplexity': 30.0, 'theta': 0.5, 'rand_seed': (- 1)}
bhtsne = BHTSNE(params)
bhtsne.fit_and_plot(ilm.z_list.reshape((len(ilm.z_list), (- 1))), ilm.data.label_list, ilm.cluster_list)
|
def print_result(cur, total, loss_all_train, loss_seq_train, loss_train, loss_all_val, loss_seq_val, loss_val):
print('{0:d} / {1:d}\t train ({2:5.3f}, {3:5.3f}, {4:5.3f})\t val({5:5.3f}, {6:5.3f}, {7:5.3f})\t in order (total, seq, lat)'.format(cur, total, loss_all_train, loss_seq_train, loss_train, loss_all_val, loss_seq_val, loss_val))
|
def train(args, batch_size=8, finetune_iteration=100, optimization_iteration=100, pretrained_ae_ckpt_path=None):
dataset = args['dataset']
data = Dataset(dataset)
model = DeepTemporalClustering(params={'n_clusters': args['n_clusters'], 'L': data.L, 'K': data.K, 'n_filters_CNN': 100, 'kernel_size': 10, 'P': 10, 'n_filters_RNN_list': [50, 50], 'alpha': 1.0})
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=None)
log_interval = int((finetune_iteration / 100))
if (pretrained_ae_ckpt_path == None):
ae_ckpt_path = os.path.join('ae_ckpt', 'model.ckpt')
print('Parameter(AE) finetuning')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for (_iter, (batch_seq, batch_label, _)) in enumerate(data.gen_next_batch(batch_size=batch_size, iteration=finetune_iteration)):
(_, loss) = sess.run([model.auto_encoder.optimizer, model.auto_encoder.loss], feed_dict={model.auto_encoder.input_: batch_seq, model.auto_encoder.input_batch_size: batch_size})
if ((_iter % log_interval) == 0):
print('[AE-finetune] iter: {}\tloss: {}'.format(_iter, loss))
saver.save(sess, ae_ckpt_path)
else:
ae_ckpt_path = pretrained_ae_ckpt_path
dec_ckpt_path = os.path.join('dtc_ckpt', 'model.ckpt')
print('Parameter(DTC) optimization')
saver = tf.train.Saver(var_list=tf.trainable_variables())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, ae_ckpt_path)
z = sess.run(model.z, feed_dict={model.auto_encoder.input_: data.train_seq, model.auto_encoder.input_batch_size: len(data.train_seq)})
assign_mu_op = model.get_assign_cluster_centers_op(z)
_ = sess.run(assign_mu_op)
log_interval = int((optimization_iteration / 10))
for cur_epoch in range(optimization_iteration):
q = sess.run(model.q, feed_dict={model.auto_encoder.input_: data.train_seq, model.auto_encoder.input_batch_size: len(data.train_seq)})
p = model.target_distribution(q)
(loss_train_list, loss_kl_train_list, loss_seq_train_list) = ([], [], [])
for (_iter, (batch_seq, batch_label, batch_idxs)) in enumerate(data.gen_next_batch(batch_size=batch_size, epoch=1)):
batch_p = p[batch_idxs]
(_, loss, loss_kl, loss_seq, pred, decoder) = sess.run([model.optimizer, model.loss, model.loss_kl, model.auto_encoder.loss, model.pred, model.y], feed_dict={model.auto_encoder.input_: batch_seq, model.auto_encoder.input_batch_size: len(batch_seq), model.p: batch_p})
loss_train_list.append(loss)
loss_kl_train_list.append(loss_kl)
loss_seq_train_list.append(loss_seq)
if ((cur_epoch % 10) == 0):
q = sess.run(model.q, feed_dict={model.auto_encoder.input_: data.test_seq, model.auto_encoder.input_batch_size: len(data.test_seq)})
p = model.target_distribution(q)
(loss_val, loss_kl_val, loss_seq_val) = sess.run([model.loss, model.loss_kl, model.auto_encoder.loss], feed_dict={model.auto_encoder.input_: data.test_seq, model.auto_encoder.input_batch_size: len(data.test_seq), model.p: p})
print_result(cur_epoch, optimization_iteration, loss, loss_seq, loss_kl, loss_val, loss_seq_val, loss_kl_val)
saver.save(sess, dec_ckpt_path)
|
def main():
args = generate_args()
train(args)
|
class AttentionWeightedAverage(Layer):
'\n Computes a weighted average of the different channels across timesteps.\n Uses 1 parameter pr. channel to compute the attention value for a single timestep.\n '
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert (len(input_shape) == 3)
self.W = self.add_weight(shape=(input_shape[2], 1), name='{}_W'.format(self.name), initializer=self.init)
self.trainable_weights = [self.W]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, x, mask=None):
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp((logits - K.max(logits, axis=(- 1), keepdims=True)))
if (mask is not None):
mask = K.cast(mask, K.floatx())
ai = (ai * mask)
att_weights = (ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon()))
weighted_input = (x * K.expand_dims(att_weights))
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return ([None] * len(input_mask))
else:
return None
|
def elsa_doc_model(hidden_dim=64, dropout=0.5, mode='train'):
I_en = Input(shape=(nb_maxlen[0], nb_feature[1]), dtype='float32')
en_out = AttentionWeightedAverage()(I_en)
I_ot = Input(shape=(nb_maxlen[1], nb_feature[0]), dtype='float32')
jp_out = AttentionWeightedAverage()(I_ot)
O_to = concatenate([jp_out, en_out])
O_to = Dense(hidden_dim, activation='selu')(O_to)
if (mode == 'train'):
O_to = Dropout(dropout)(O_to)
O_out = Dense(1, activation='sigmoid', name='softmax')(O_to)
model = Model(inputs=[I_ot, I_en], outputs=O_out)
return model
|
def elsa_architecture(nb_classes, nb_tokens, maxlen, feature_output=False, embed_dropout_rate=0, final_dropout_rate=0, embed_dim=300, embed_l2=1e-06, return_attention=False, load_embedding=False, pre_embedding=None, high=False, LSTM_hidden=512, LSTM_drop=0.5):
'\n Returns the DeepMoji architecture uninitialized and\n without using the pretrained model weights.\n # Arguments:\n nb_classes: Number of classes in the dataset.\n nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).\n maxlen: Maximum length of a token.\n feature_output: If True the model returns the penultimate\n feature vector rather than Softmax probabilities\n (defaults to False).\n embed_dropout_rate: Dropout rate for the embedding layer.\n final_dropout_rate: Dropout rate for the final Softmax layer.\n embed_l2: L2 regularization for the embedding layerl.\n high: use or not the highway network\n # Returns:\n Model with the given parameters.\n '
class NonMasking(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(NonMasking, self).__init__(**kwargs)
def build(self, input_shape):
input_shape = input_shape
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
return x
def get_output_shape_for(self, input_shape):
return input_shape
model_input = Input(shape=(maxlen,), dtype='int32')
embed_reg = (L1L2(l2=embed_l2) if (embed_l2 != 0) else None)
if ((not load_embedding) and (pre_embedding is None)):
embed = Embedding(input_dim=nb_tokens, output_dim=embed_dim, mask_zero=True, input_length=maxlen, embeddings_regularizer=embed_reg, name='embedding')
else:
embed = Embedding(input_dim=nb_tokens, output_dim=embed_dim, mask_zero=True, input_length=maxlen, weights=[pre_embedding], embeddings_regularizer=embed_reg, trainable=True, name='embedding')
if high:
x = NonMasking()(embed(model_input))
else:
x = embed(model_input)
x = Activation('tanh')(x)
if (embed_dropout_rate != 0):
embed_drop = SpatialDropout1D(embed_dropout_rate, name='embed_drop')
x = embed_drop(x)
lstm_0_output = Bidirectional(LSTM(LSTM_hidden, return_sequences=True, dropout=LSTM_drop), name='bi_lstm_0')(x)
lstm_1_output = Bidirectional(LSTM(LSTM_hidden, return_sequences=True, dropout=LSTM_drop), name='bi_lstm_1')(lstm_0_output)
x = concatenate([lstm_1_output, lstm_0_output, x])
if high:
x = TimeDistributed(Highway(activation='tanh', name='high'))(x)
weights = None
x = AttentionWeightedAverage(name='attlayer', return_attention=return_attention)(x)
if return_attention:
(x, weights) = x
if (not feature_output):
if (final_dropout_rate != 0):
x = Dropout(final_dropout_rate)(x)
if (nb_classes > 2):
outputs = [Dense(nb_classes, activation='softmax', name='softmax')(x)]
else:
outputs = [Dense(1, activation='sigmoid', name='softmax')(x)]
else:
outputs = [x]
if return_attention:
outputs.append(weights)
return Model(inputs=[model_input], outputs=outputs)
|
class VocabBuilder():
' Create vocabulary with words extracted from sentences as fed from a\n word generator.\n '
def __init__(self, word_gen):
self.word_counts = defaultdict((lambda : 0), {})
self.word_length_limit = 30
for token in SPECIAL_TOKENS:
assert (len(token) < self.word_length_limit)
self.word_counts[token] = 0
self.word_gen = word_gen
def count_words_in_sentence(self, words):
' Generates word counts for all tokens in the given sentence.\n\n # Arguments:\n words: Tokenized sentence whose words should be counted.\n '
for word in words:
if ((0 < len(word)) and (len(word) <= self.word_length_limit)):
try:
self.word_counts[word] += 1
except KeyError:
self.word_counts[word] = 1
def save_vocab(self, path=None):
' Saves the vocabulary into a file.\n\n # Arguments:\n path: Where the vocabulary should be saved. If not specified, a\n randomly generated filename is used instead.\n '
dtype = [('word', '|S{}'.format(self.word_length_limit)), ('count', 'int')]
np_dict = np.array(self.word_counts.items(), dtype=dtype)
np_dict[::(- 1)].sort(order='count')
data = np_dict
if (path is None):
path = str(uuid.uuid4())
np.savez_compressed(path, data=data)
print('Saved dict to {}'.format(path))
def get_next_word(self):
' Returns next tokenized sentence from the word geneerator.\n\n # Returns:\n List of strings, representing the next tokenized sentence.\n '
return self.word_gen.__iter__().next()
def count_all_words(self):
' Generates word counts for all words in all sentences of the word\n generator.\n '
for words in self.word_gen:
words = json.loads(words)
self.count_words_in_sentence(words)
|
class MasterVocab():
' Combines vocabularies.\n '
def __init__(self):
self.master_vocab = {}
def populate_master_vocab(self, vocab_path, min_words=1, force_appearance=None):
' Populates the master vocabulary using all vocabularies found in the\n given path. Vocabularies should be named *.npz. Expects the\n vocabularies to be numpy arrays with counts. Normalizes the counts\n and combines them.\n\n # Arguments:\n vocab_path: Path containing vocabularies to be combined.\n min_words: Minimum amount of occurences a word must have in order\n to be included in the master vocabulary.\n force_appearance: Optional vocabulary filename that will be added\n to the master vocabulary no matter what. This vocabulary must\n be present in vocab_path.\n '
paths = glob.glob((vocab_path + '*.npz'))
sizes = {path: 0 for path in paths}
dicts = {path: {} for path in paths}
for path in paths:
np_data = np.load(path)['data']
for entry in np_data:
(word, count) = entry
if (count < min_words):
continue
if is_special_token(word):
continue
dicts[path][word] = count
sizes[path] = sum(dicts[path].values())
print('Overall word count for {} -> {}'.format(path, sizes[path]))
print('Overall word number for {} -> {}'.format(path, len(dicts[path])))
vocab_of_max_size = max(sizes, key=sizes.get)
max_size = sizes[vocab_of_max_size]
print('Min: {}, {}, {}'.format(sizes, vocab_of_max_size, max_size))
if (force_appearance is not None):
force_appearance_path = [p for p in paths if (force_appearance in p)][0]
force_appearance_vocab = deepcopy(dicts[force_appearance_path])
print(force_appearance_path)
else:
(force_appearance_path, force_appearance_vocab) = (None, None)
for path in paths:
normalization_factor = (max_size / sizes[path])
print('Norm factor for path {} -> {}'.format(path, normalization_factor))
for word in dicts[path]:
if is_special_token(word):
print('SPECIAL - ', word)
continue
normalized_count = (dicts[path][word] * normalization_factor)
if (force_appearance_vocab is not None):
try:
force_word_count = force_appearance_vocab[word]
except KeyError:
continue
if (word in self.master_vocab):
self.master_vocab[word] += normalized_count
else:
self.master_vocab[word] = normalized_count
print('Size of master_dict {}'.format(len(self.master_vocab)))
print('Hashes for master dict: {}'.format(len([w for w in self.master_vocab if ('#' in w[0])])))
def save_vocab(self, path_count, path_vocab, word_limit=100000):
' Saves the master vocabulary into a file.\n '
words = OrderedDict()
for token in SPECIAL_TOKENS:
words[token] = (- 1)
desc_order = OrderedDict(sorted(self.master_vocab.items(), key=(lambda kv: kv[1]), reverse=True))
words.update(desc_order)
np_vocab = np.array(words.items(), dtype=[('word', '|S30'), ('count', 'float')])
counts = []
final_words = OrderedDict()
for (i, w) in enumerate(words.keys()):
try:
final_words.update({str(w).decode('utf8'): i})
except:
print(w, i)
continue
word_limit -= 1
counts.append(np_vocab[i])
if (word_limit == 0):
break
np.savez_compressed(path_count, counts=counts)
print(len(final_words), len(counts), sorted(self.master_vocab.items(), key=(lambda kv: kv[1]), reverse=True)[:10])
with open(path_vocab, 'w') as f:
f.write(json.dumps(final_words, indent=4, separators=(',', ': ')))
|
def all_words_in_sentences(sentences):
' Extracts all unique words from a given list of sentences.\n\n # Arguments:\n sentences: List or word generator of sentences to be processed.\n\n # Returns:\n List of all unique words contained in the given sentences.\n '
vocab = []
if isinstance(sentences, WordGenerator):
sentences = [s for (s, _) in sentences]
for sentence in sentences:
for word in sentence:
if (word not in vocab):
vocab.append(word)
return vocab
|
def extend_vocab_in_file(vocab, max_tokens=10000, vocab_path=VOCAB_PATH):
' Extends JSON-formatted vocabulary with words from vocab that are not\n present in the current vocabulary. Adds up to max_tokens words.\n Overwrites file in vocab_path.\n\n # Arguments:\n new_vocab: Vocabulary to be added. MUST have word_counts populated, i.e.\n must have run count_all_words() previously.\n max_tokens: Maximum number of words to be added.\n vocab_path: Path to the vocabulary json which is to be extended.\n '
try:
with open(vocab_path, 'r') as f:
current_vocab = json.load(f)
except IOError:
print(('Vocabulary file not found, expected at ' + vocab_path))
return
extend_vocab(current_vocab, vocab, max_tokens)
with open(vocab_path, 'w') as f:
json.dump(current_vocab, f, sort_keys=True, indent=4, separators=(',', ': '))
|
def extend_vocab(current_vocab, new_vocab, max_tokens=10000):
' Extends current vocabulary with words from vocab that are not\n present in the current vocabulary. Adds up to max_tokens words.\n\n # Arguments:\n current_vocab: Current dictionary of tokens.\n new_vocab: Vocabulary to be added. MUST have word_counts populated, i.e.\n must have run count_all_words() previously.\n max_tokens: Maximum number of words to be added.\n\n # Returns:\n How many new tokens have been added.\n '
if (max_tokens < 0):
max_tokens = 10000
words = OrderedDict()
desc_order = OrderedDict(sorted(new_vocab.word_counts.items(), key=(lambda kv: kv[1]), reverse=True))
words.update(desc_order)
base_index = len(current_vocab.keys())
added = 0
for word in words:
if (added >= max_tokens):
break
if (word not in current_vocab.keys()):
current_vocab[word] = (base_index + added)
added += 1
return added
|
def is_special_token(word):
equal = False
for spec in SPECIAL_TOKENS:
if (word == spec):
equal = True
break
return equal
|
def mostly_english(words, english, pct_eng_short=0.5, pct_eng_long=0.6, ignore_special_tokens=True, min_length=2):
' Ensure text meets threshold for containing English words '
n_words = 0
n_english = 0
if (english is None):
return (True, 0, 0)
for w in words:
if (len(w) < min_length):
continue
if punct_word(w):
continue
if (ignore_special_tokens and is_special_token(w)):
continue
n_words += 1
if (w in english):
n_english += 1
if (n_words < 2):
return (True, n_words, n_english)
if (n_words < 5):
valid_english = (n_english >= (n_words * pct_eng_short))
else:
valid_english = (n_english >= (n_words * pct_eng_long))
return (valid_english, n_words, n_english)
|
def correct_length(words, min_words, max_words, ignore_special_tokens=True):
" Ensure text meets threshold for containing English words\n and that it's within the min and max words limits. "
if (min_words is None):
min_words = 0
if (max_words is None):
max_words = 99999
n_words = 0
for w in words:
if punct_word(w):
continue
if (ignore_special_tokens and is_special_token(w)):
continue
n_words += 1
valid = ((min_words <= n_words) and (n_words <= max_words))
return valid
|
def punct_word(word, punctuation=string.punctuation):
return all([(True if (c in punctuation) else False) for c in word])
|
def load_non_english_user_set():
non_english_user_set = set(np.load('uids.npz')['data'])
return non_english_user_set
|
def non_english_user(userid, non_english_user_set):
neu_found = (int(userid) in non_english_user_set)
return neu_found
|
def separate_emojis_and_text(text):
emoji_chars = []
non_emoji_chars = []
for c in text:
if (c in emoji.UNICODE_EMOJI):
emoji_chars.append(c)
else:
non_emoji_chars.append(c)
return (''.join(emoji_chars), ''.join(non_emoji_chars))
|
def extract_emojis(text, wanted_emojis):
text = remove_variation_selectors(text)
return [c for c in text if (c in wanted_emojis)]
|
def remove_variation_selectors(text):
' Remove styling glyph variants for Unicode characters.\n For instance, remove skin color from emojis.\n '
for var in VARIATION_SELECTORS:
text = text.replace(var, u'')
return text
|
def shorten_word(word):
" Shorten groupings of 3+ identical consecutive chars to 2, e.g. '!!!!' --> '!!'\n "
isascii = (lambda s: (len(s) == len(s.encode())))
if (not isascii):
return word
if (len(word) < 3):
return word
letter_groups = [list(g) for (k, g) in groupby(word)]
triple_or_more = [''.join(g) for g in letter_groups if (len(g) >= 3)]
if (len(triple_or_more) == 0):
return word
short_word = word
for trip in triple_or_more:
short_word = short_word.replace(trip, (trip[0] * 2))
return short_word
|
def detect_special_tokens(word):
try:
int(word)
word = SPECIAL_TOKENS[4]
except ValueError:
if AtMentionRegex.findall(word):
word = SPECIAL_TOKENS[2]
elif urlRegex.findall(word):
word = SPECIAL_TOKENS[3]
return word
|
def process_word(word):
' Shortening and converting the word to a special token if relevant.\n '
word = shorten_word(word)
word = detect_special_tokens(word)
return word
|
def remove_control_chars(text):
return CONTROL_CHAR_REGEX.sub('', text)
|
def convert_nonbreaking_space(text):
for r in [u'\\\\xc2', u'\\xc2', u'Â', u'\\\\xa0', u'\\xa0', u'\xa0']:
text = text.replace(r, u' ')
return text
|
def convert_linebreaks(text):
for r in [u'\\\\n', u'\\n', u'\n', u'\\\\r', u'\\r', u'\r', '<br>']:
text = text.replace(r, ((u' ' + SPECIAL_TOKENS[5]) + u' '))
return text
|
def tokenize(text):
'Splits given input string into a list of tokens.\n\n # Arguments:\n text: Input string to be tokenized.\n\n # Returns:\n List of strings (tokens).\n '
result = RE_PATTERN.findall(text)
result = [t for t in result if t.strip()]
return result
|
def check_ascii(word):
try:
word.decode('ascii')
return True
except (UnicodeDecodeError, UnicodeEncodeError):
return False
|
class TDrumorGCN(torch.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(TDrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv((hid_feats + in_feats), out_feats)
def forward(self, data):
(x, edge_index) = (data.x, data.edge_index)
x1 = cp.copy(x.float())
x = self.conv1(x, edge_index)
x2 = cp.copy(x)
rootindex = data.root_index
root_extend = torch.zeros(len(data.batch), x1.size(1)).to(rootindex.device)
batch_size = (max(data.batch) + 1)
for num_batch in range(batch_size):
index = torch.eq(data.batch, num_batch)
root_extend[index] = x1[rootindex[num_batch]]
x = torch.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = torch.zeros(len(data.batch), x2.size(1)).to(rootindex.device)
for num_batch in range(batch_size):
index = torch.eq(data.batch, num_batch)
root_extend[index] = x2[rootindex[num_batch]]
x = torch.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
|
class BUrumorGCN(torch.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(BUrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv((hid_feats + in_feats), out_feats)
def forward(self, data):
(x, edge_index) = (data.x, data.BU_edge_index)
x1 = cp.copy(x.float())
x = self.conv1(x, edge_index)
x2 = cp.copy(x)
rootindex = data.root_index
root_extend = torch.zeros(len(data.batch), x1.size(1)).to(rootindex.device)
batch_size = (max(data.batch) + 1)
for num_batch in range(batch_size):
index = torch.eq(data.batch, num_batch)
root_extend[index] = x1[rootindex[num_batch]]
x = torch.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = torch.zeros(len(data.batch), x2.size(1)).to(rootindex.device)
for num_batch in range(batch_size):
index = torch.eq(data.batch, num_batch)
root_extend[index] = x2[rootindex[num_batch]]
x = torch.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
|
class Net(torch.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(Net, self).__init__()
self.TDrumorGCN = TDrumorGCN(in_feats, hid_feats, out_feats)
self.BUrumorGCN = BUrumorGCN(in_feats, hid_feats, out_feats)
self.fc = torch.nn.Linear(((out_feats + hid_feats) * 2), 2)
def forward(self, data):
TD_x = self.TDrumorGCN(data)
BU_x = self.BUrumorGCN(data)
x = torch.cat((TD_x, BU_x), 1)
x = self.fc(x)
x = F.log_softmax(x, dim=1)
return x
|
def compute_test(loader, verbose=False):
model.eval()
loss_test = 0.0
out_log = []
with torch.no_grad():
for data in loader:
if (not args.multi_gpu):
data = data.to(args.device)
out = model(data)
if args.multi_gpu:
y = torch.cat([d.y for d in data]).to(out.device)
else:
y = data.y
if verbose:
print(F.softmax(out, dim=1).cpu().numpy())
out_log.append([F.softmax(out, dim=1), y])
loss_test += F.nll_loss(out, y).item()
return (eval_deep(out_log, loader), loss_test)
|
class Net(torch.nn.Module):
def __init__(self, concat=False):
super(Net, self).__init__()
self.num_features = dataset.num_features
self.num_classes = args.num_classes
self.nhid = args.nhid
self.concat = concat
self.conv1 = GATConv(self.num_features, (self.nhid * 2))
self.conv2 = GATConv((self.nhid * 2), (self.nhid * 2))
self.fc1 = Linear((self.nhid * 2), self.nhid)
if self.concat:
self.fc0 = Linear(self.num_features, self.nhid)
self.fc1 = Linear((self.nhid * 2), self.nhid)
self.fc2 = Linear(self.nhid, self.num_classes)
def forward(self, data):
(x, edge_index, batch) = (data.x, data.edge_index, data.batch)
x = F.selu(self.conv1(x, edge_index))
x = F.selu(self.conv2(x, edge_index))
x = F.selu(global_mean_pool(x, batch))
x = F.selu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
if self.concat:
news = torch.stack([data.x[(data.batch == idx).nonzero().squeeze()[0]] for idx in range(data.num_graphs)])
news = F.relu(self.fc0(news))
x = torch.cat([x, news], dim=1)
x = F.relu(self.fc1(x))
x = F.log_softmax(self.fc2(x), dim=(- 1))
return x
|
@torch.no_grad()
def compute_test(loader, verbose=False):
model.eval()
loss_test = 0.0
out_log = []
for data in loader:
if (not args.multi_gpu):
data = data.to(args.device)
out = model(data)
if args.multi_gpu:
y = torch.cat([d.y.unsqueeze(0) for d in data]).squeeze().to(out.device)
else:
y = data.y
if verbose:
print(F.softmax(out, dim=1).cpu().numpy())
out_log.append([F.softmax(out, dim=1), y])
loss_test += F.nll_loss(out, y).item()
return (eval_deep(out_log, loader), loss_test)
|
class Model(torch.nn.Module):
def __init__(self, args, concat=False):
super(Model, self).__init__()
self.args = args
self.num_features = args.num_features
self.nhid = args.nhid
self.num_classes = args.num_classes
self.dropout_ratio = args.dropout_ratio
self.model = args.model
self.concat = concat
if (self.model == 'gcn'):
self.conv1 = GCNConv(self.num_features, self.nhid)
elif (self.model == 'sage'):
self.conv1 = SAGEConv(self.num_features, self.nhid)
elif (self.model == 'gat'):
self.conv1 = GATConv(self.num_features, self.nhid)
if self.concat:
self.lin0 = torch.nn.Linear(self.num_features, self.nhid)
self.lin1 = torch.nn.Linear((self.nhid * 2), self.nhid)
self.lin2 = torch.nn.Linear(self.nhid, self.num_classes)
def forward(self, data):
(x, edge_index, batch) = (data.x, data.edge_index, data.batch)
edge_attr = None
x = F.relu(self.conv1(x, edge_index, edge_attr))
x = gmp(x, batch)
if self.concat:
news = torch.stack([data.x[(data.batch == idx).nonzero().squeeze()[0]] for idx in range(data.num_graphs)])
news = F.relu(self.lin0(news))
x = torch.cat([x, news], dim=1)
x = F.relu(self.lin1(x))
x = F.log_softmax(self.lin2(x), dim=(- 1))
return x
|
@torch.no_grad()
def compute_test(loader, verbose=False):
model.eval()
loss_test = 0.0
out_log = []
for data in loader:
if (not args.multi_gpu):
data = data.to(args.device)
out = model(data)
if args.multi_gpu:
y = torch.cat([d.y.unsqueeze(0) for d in data]).squeeze().to(out.device)
else:
y = data.y
if verbose:
print(F.softmax(out, dim=1).cpu().numpy())
out_log.append([F.softmax(out, dim=1), y])
loss_test += F.nll_loss(out, y).item()
return (eval_deep(out_log, loader), loss_test)
|
class GNN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, normalize=False, lin=True):
super(GNN, self).__init__()
self.conv1 = DenseSAGEConv(in_channels, hidden_channels, normalize)
self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels, normalize)
self.bn2 = torch.nn.BatchNorm1d(hidden_channels)
self.conv3 = DenseSAGEConv(hidden_channels, out_channels, normalize)
self.bn3 = torch.nn.BatchNorm1d(out_channels)
if (lin is True):
self.lin = torch.nn.Linear(((2 * hidden_channels) + out_channels), out_channels)
else:
self.lin = None
def bn(self, i, x):
(batch_size, num_nodes, num_channels) = x.size()
x = x.view((- 1), num_channels)
x = getattr(self, 'bn{}'.format(i))(x)
x = x.view(batch_size, num_nodes, num_channels)
return x
def forward(self, x, adj, mask=None):
(batch_size, num_nodes, in_channels) = x.size()
x0 = x
x1 = self.bn(1, F.relu(self.conv1(x0, adj, mask)))
x2 = self.bn(2, F.relu(self.conv2(x1, adj, mask)))
x3 = self.bn(3, F.relu(self.conv3(x2, adj, mask)))
x = torch.cat([x1, x2, x3], dim=(- 1))
if (self.lin is not None):
x = F.relu(self.lin(x))
return x
|
class Net(torch.nn.Module):
def __init__(self, in_channels=3, num_classes=6):
super(Net, self).__init__()
num_nodes = ceil((0.25 * max_nodes))
self.gnn1_pool = GNN(in_channels, 64, num_nodes)
self.gnn1_embed = GNN(in_channels, 64, 64, lin=False)
num_nodes = ceil((0.25 * num_nodes))
self.gnn2_pool = GNN((3 * 64), 64, num_nodes)
self.gnn2_embed = GNN((3 * 64), 64, 64, lin=False)
self.gnn3_embed = GNN((3 * 64), 64, 64, lin=False)
self.lin1 = torch.nn.Linear((3 * 64), 64)
self.lin2 = torch.nn.Linear(64, num_classes)
def forward(self, x, adj, mask=None):
s = self.gnn1_pool(x, adj, mask)
x = self.gnn1_embed(x, adj, mask)
(x, adj, l1, e1) = dense_diff_pool(x, adj, s, mask)
s = self.gnn2_pool(x, adj)
x = self.gnn2_embed(x, adj)
(x, adj, l2, e2) = dense_diff_pool(x, adj, s)
x = self.gnn3_embed(x, adj)
x = x.mean(dim=1)
x = F.relu(self.lin1(x))
x = self.lin2(x)
return (F.log_softmax(x, dim=(- 1)), (l1 + l2), (e1 + e2))
|
def train():
model.train()
loss_all = 0
out_log = []
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
(out, _, _) = model(data.x, data.adj, data.mask)
out_log.append([F.softmax(out, dim=1), data.y])
loss = F.nll_loss(out, data.y.view((- 1)))
loss.backward()
loss_all += (data.y.size(0) * loss.item())
optimizer.step()
return (eval_deep(out_log, train_loader), (loss_all / len(train_loader.dataset)))
|
@torch.no_grad()
def test(loader):
model.eval()
loss_test = 0
out_log = []
for data in loader:
data = data.to(device)
(out, _, _) = model(data.x, data.adj, data.mask)
out_log.append([F.softmax(out, dim=1), data.y])
loss_test += (data.y.size(0) * F.nll_loss(out, data.y.view((- 1))).item())
return (eval_deep(out_log, loader), loss_test)
|
def read_file(folder, name, dtype=None):
path = osp.join(folder, '{}.txt'.format(name))
return read_txt_array(path, sep=',', dtype=dtype)
|
def split(data, batch):
'\n\tPyG util code to create graph batches\n\t'
node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
node_slice = torch.cat([torch.tensor([0]), node_slice])
(row, _) = data.edge_index
edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
edge_slice = torch.cat([torch.tensor([0]), edge_slice])
data.edge_index -= node_slice[batch[row]].unsqueeze(0)
data.__num_nodes__ = torch.bincount(batch).tolist()
slices = {'edge_index': edge_slice}
if (data.x is not None):
slices['x'] = node_slice
if (data.edge_attr is not None):
slices['edge_attr'] = edge_slice
if (data.y is not None):
if (data.y.size(0) == batch.size(0)):
slices['y'] = node_slice
else:
slices['y'] = torch.arange(0, (batch[(- 1)] + 2), dtype=torch.long)
return (data, slices)
|
def read_graph_data(folder, feature):
'\n\tPyG util code to create PyG data instance from raw graph data\n\t'
node_attributes = sp.load_npz((folder + f'new_{feature}_feature.npz'))
edge_index = read_file(folder, 'A', torch.long).t()
node_graph_id = np.load((folder + 'node_graph_id.npy'))
graph_labels = np.load((folder + 'graph_labels.npy'))
edge_attr = None
x = torch.from_numpy(node_attributes.todense()).to(torch.float)
node_graph_id = torch.from_numpy(node_graph_id).to(torch.long)
y = torch.from_numpy(graph_labels).to(torch.long)
(_, y) = y.unique(sorted=True, return_inverse=True)
num_nodes = ((edge_index.max().item() + 1) if (x is None) else x.size(0))
(edge_index, edge_attr) = add_self_loops(edge_index, edge_attr)
(edge_index, edge_attr) = coalesce(edge_index, edge_attr, num_nodes, num_nodes)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)
(data, slices) = split(data, node_graph_id)
return (data, slices)
|
class ToUndirected():
def __init__(self):
'\n\t\tPyG util code to transform the graph to the undirected graph\n\t\t'
pass
def __call__(self, data):
edge_attr = None
edge_index = to_undirected(data.edge_index, data.x.size(0))
num_nodes = ((edge_index.max().item() + 1) if (data.x is None) else data.x.size(0))
(edge_index, edge_attr) = coalesce(edge_index, edge_attr, num_nodes, num_nodes)
data.edge_index = edge_index
data.edge_attr = edge_attr
return data
|
class DropEdge():
def __init__(self, tddroprate, budroprate):
'\n\t\tDrop edge operation from BiGCN (Rumor Detection on Social Media with Bi-Directional Graph Convolutional Networks)\n\t\t1) Generate TD and BU edge indices\n\t\t2) Drop out edges\n\t\tCode from https://github.com/TianBian95/BiGCN/blob/master/Process/dataset.py\n\t\t'
self.tddroprate = tddroprate
self.budroprate = budroprate
def __call__(self, data):
edge_index = data.edge_index
if (self.tddroprate > 0):
row = list(edge_index[0])
col = list(edge_index[1])
length = len(row)
poslist = random.sample(range(length), int((length * (1 - self.tddroprate))))
poslist = sorted(poslist)
row = list(np.array(row)[poslist])
col = list(np.array(col)[poslist])
new_edgeindex = [row, col]
else:
new_edgeindex = edge_index
burow = list(edge_index[1])
bucol = list(edge_index[0])
if (self.budroprate > 0):
length = len(burow)
poslist = random.sample(range(length), int((length * (1 - self.budroprate))))
poslist = sorted(poslist)
row = list(np.array(burow)[poslist])
col = list(np.array(bucol)[poslist])
bunew_edgeindex = [row, col]
else:
bunew_edgeindex = [burow, bucol]
data.edge_index = torch.LongTensor(new_edgeindex)
data.BU_edge_index = torch.LongTensor(bunew_edgeindex)
data.root = torch.FloatTensor(data.x[0])
data.root_index = torch.LongTensor([0])
return data
|
class FNNDataset(InMemoryDataset):
'\n\t\tThe Graph datasets built upon FakeNewsNet data\n\n\tArgs:\n\t\troot (string): Root directory where the dataset should be saved.\n\t\tname (string): The `name\n\t\t\t<https://chrsmrrs.github.io/datasets/docs/datasets/>`_ of the\n\t\t\tdataset.\n\t\ttransform (callable, optional): A function/transform that takes in an\n\t\t\t:obj:`torch_geometric.data.Data` object and returns a transformed\n\t\t\tversion. The data object will be transformed before every access.\n\t\t\t(default: :obj:`None`)\n\t\tpre_transform (callable, optional): A function/transform that takes in\n\t\t\tan :obj:`torch_geometric.data.Data` object and returns a\n\t\t\ttransformed version. The data object will be transformed before\n\t\t\tbeing saved to disk. (default: :obj:`None`)\n\t\tpre_filter (callable, optional): A function that takes in an\n\t\t\t:obj:`torch_geometric.data.Data` object and returns a boolean\n\t\t\tvalue, indicating whether the data object should be included in the\n\t\t\tfinal dataset. (default: :obj:`None`)\n\t'
def __init__(self, root, name, feature='spacy', empty=False, transform=None, pre_transform=None, pre_filter=None):
self.name = name
self.root = root
self.feature = feature
super(FNNDataset, self).__init__(root, transform, pre_transform, pre_filter)
if (not empty):
(self.data, self.slices, self.train_idx, self.val_idx, self.test_idx) = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
name = 'raw/'
return osp.join(self.root, self.name, name)
@property
def processed_dir(self):
name = 'processed/'
return osp.join(self.root, self.name, name)
@property
def num_node_attributes(self):
if (self.data.x is None):
return 0
return self.data.x.size(1)
@property
def raw_file_names(self):
names = ['node_graph_id', 'graph_labels']
return ['{}.npy'.format(name) for name in names]
@property
def processed_file_names(self):
if (self.pre_filter is None):
return f'{self.name[:3]}_data_{self.feature}.pt'
else:
return f'{self.name[:3]}_data_{self.feature}_prefiler.pt'
def download(self):
raise NotImplementedError('Must indicate valid location of raw data. No download allowed')
def process(self):
(self.data, self.slices) = read_graph_data(self.raw_dir, self.feature)
if (self.pre_filter is not None):
data_list = [self.get(idx) for idx in range(len(self))]
data_list = [data for data in data_list if self.pre_filter(data)]
(self.data, self.slices) = self.collate(data_list)
if (self.pre_transform is not None):
data_list = [self.get(idx) for idx in range(len(self))]
data_list = [self.pre_transform(data) for data in data_list]
(self.data, self.slices) = self.collate(data_list)
self.train_idx = torch.from_numpy(np.load((self.raw_dir + 'train_idx.npy'))).to(torch.long)
self.val_idx = torch.from_numpy(np.load((self.raw_dir + 'val_idx.npy'))).to(torch.long)
self.test_idx = torch.from_numpy(np.load((self.raw_dir + 'test_idx.npy'))).to(torch.long)
torch.save((self.data, self.slices, self.train_idx, self.val_idx, self.test_idx), self.processed_paths[0])
def __repr__(self):
return '{}({})'.format(self.name, len(self))
|
class FdGars(Algorithm):
def __init__(self, session, nodes, class_size, gcn_output1, gcn_output2, meta, embedding, encoding):
self.nodes = nodes
self.meta = meta
self.class_size = class_size
self.gcn_output1 = gcn_output1
self.embedding = embedding
self.encoding = encoding
self.placeholders = {'a': tf.placeholder(tf.float32, [self.meta, self.nodes, self.nodes], 'adj'), 'x': tf.placeholder(tf.float32, [self.nodes, self.embedding], 'nxf'), 'batch_index': tf.placeholder(tf.int32, [None], 'index'), 't': tf.placeholder(tf.float32, [None, self.class_size], 'labels'), 'lr': tf.placeholder(tf.float32, [], 'learning_rate'), 'mom': tf.placeholder(tf.float32, [], 'momentum'), 'num_features_nonzero': tf.placeholder(tf.int32)}
(loss, probabilities) = self.forward_propagation()
(self.loss, self.probabilities) = (loss, probabilities)
self.l2 = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.01), tf.trainable_variables())
self.pred = tf.one_hot(tf.argmax(self.probabilities, 1), class_size)
print(self.pred.shape)
self.correct_prediction = tf.equal(tf.argmax(self.probabilities, 1), tf.argmax(self.placeholders['t'], 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, 'float'))
print('Forward propagation finished.')
self.sess = session
self.optimizer = tf.train.AdamOptimizer(self.placeholders['lr'])
gradients = self.optimizer.compute_gradients((self.loss + self.l2))
capped_gradients = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in gradients if (grad is not None)]
self.train_op = self.optimizer.apply_gradients(capped_gradients)
self.init = tf.global_variables_initializer()
print('Backward propagation finished.')
def forward_propagation(self):
with tf.variable_scope('gcn'):
gcn_emb = []
for i in range(self.meta):
gcn_out = tf.reshape(GCN(self.placeholders, self.gcn_output1, self.embedding, self.encoding, index=i).embedding(), [1, (self.nodes * self.encoding)])
gcn_emb.append(gcn_out)
gcn_emb = tf.concat(gcn_emb, 0)
gcn_emb = tf.reshape(gcn_emb, [self.nodes, self.encoding])
print('GCN embedding over!')
with tf.variable_scope('classification'):
batch_data = tf.matmul(tf.one_hot(self.placeholders['batch_index'], self.nodes), gcn_emb)
logits = tf.nn.softmax(batch_data)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.placeholders['t'], logits=logits)
return (loss, tf.nn.sigmoid(logits))
def train(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
outs = self.sess.run([self.train_op, self.loss, self.accuracy, self.pred, self.probabilities], feed_dict=feed_dict)
loss = outs[1]
acc = outs[2]
pred = outs[3]
prob = outs[4]
return (loss, acc, pred, prob)
def test(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
(acc, pred, probabilities, tags) = self.sess.run([self.accuracy, self.pred, self.probabilities, self.correct_prediction], feed_dict=feed_dict)
return (acc, pred, probabilities, tags)
|
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
parser.add_argument('--hidden1', default=16, help='Number of units in GCN hidden layer 1.')
parser.add_argument('--hidden2', default=16, help='Number of units in GCN hidden layer 2.')
parser.add_argument('--gcn_output', default=4, help='gcn output size.')
args = parser.parse_args()
return args
|
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
|
def get_data(ix, int_batch, train_size):
if ((ix + int_batch) >= train_size):
ix = (train_size - int_batch)
end = train_size
else:
end = (ix + int_batch)
return (train_data[ix:end], train_label[ix:end])
|
def load_data(args):
if (args.dataset_str == 'dblp'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_dblp()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras)
|
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = [normalize_adj(adj) for adj in adj_list]
meta_size = len(adj_list)
net = FdGars(session=sess, class_size=paras[2], gcn_output1=args.hidden1, gcn_output2=args.hidden2, meta=meta_size, nodes=paras[0], embedding=paras[1], encoding=args.gcn_output)
sess.run(tf.global_variables_initializer())
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
(batch_data, batch_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(features, adj_data, batch_label, batch_data, args.learning_rate, args.momentum)
print('batch loss: {:.4f}, batch acc: {:.4f}'.format(loss, acc))
train_loss += loss
train_acc += acc
count += 1
train_loss = (train_loss / count)
train_acc = (train_acc / count)
print('epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}'.format(epoch, train_loss, train_acc))
t_end = time.clock()
print('train time=', '{:.5f}'.format((t_end - t_start)))
print('Train end!')
(test_acc, test_pred, test_probabilities, test_tags) = net.test(features, adj_data, test_label, test_data)
print('test acc:', test_acc)
|
class GAS(Algorithm):
def __init__(self, session, nodes, class_size, embedding_i, embedding_u, embedding_r, h_u_size, h_i_size, encoding1, encoding2, encoding3, encoding4, gcn_dim, meta=1, concat=True, **kwargs):
super().__init__(**kwargs)
self.meta = meta
self.nodes = nodes
self.class_size = class_size
self.embedding_i = embedding_i
self.embedding_u = embedding_u
self.embedding_r = embedding_r
self.encoding1 = encoding1
self.encoding2 = encoding2
self.encoding3 = encoding3
self.encoding4 = encoding4
self.gcn_dim = gcn_dim
self.h_i_size = h_i_size
self.h_u_size = h_u_size
self.concat = concat
self.build_placeholders()
(loss, probabilities) = self.forward_propagation()
(self.loss, self.probabilities) = (loss, probabilities)
self.l2 = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.01), tf.trainable_variables())
self.pred = tf.one_hot(tf.argmax(self.probabilities, 1), class_size)
print(self.pred.shape)
self.correct_prediction = tf.equal(tf.argmax(self.probabilities, 1), tf.argmax(self.t, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, 'float'))
print('Forward propagation finished.')
self.sess = session
self.optimizer = tf.train.AdamOptimizer(self.lr)
gradients = self.optimizer.compute_gradients((self.loss + self.l2))
capped_gradients = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in gradients if (grad is not None)]
self.train_op = self.optimizer.apply_gradients(capped_gradients)
self.init = tf.global_variables_initializer()
print('Backward propagation finished.')
def build_placeholders(self):
self.user_review_adj = tf.placeholder(tf.float32, [None, None], 'adjlist1')
self.user_item_adj = tf.placeholder(tf.float32, [None, None], 'adjlist2')
self.item_review_adj = tf.placeholder(tf.float32, [None, None], 'adjlist3')
self.item_user_adj = tf.placeholder(tf.float32, [None, None], 'adjlist4')
self.review_user_adj = tf.placeholder(tf.float32, [None], 'adjlist5')
self.review_item_adj = tf.placeholder(tf.float32, [None], 'adjlist6')
self.homo_adj = tf.placeholder(tf.float32, [self.nodes, self.nodes], 'comment_adj')
self.review_vecs = tf.placeholder(tf.float32, [None, None], 'init_embedding1')
self.user_vecs = tf.placeholder(tf.float32, [None, None], 'init_embedding2')
self.item_vecs = tf.placeholder(tf.float32, [None, None], 'init_embedding3')
self.batch_index = tf.placeholder(tf.int32, [None], 'index')
self.t = tf.placeholder(tf.float32, [None, self.class_size], 'labels')
self.lr = tf.placeholder(tf.float32, [], 'learning_rate')
self.mom = tf.placeholder(tf.float32, [], 'momentum')
def forward_propagation(self):
with tf.variable_scope('hete_gcn'):
r_aggregator = ConcatenationAggregator(input_dim=((self.embedding_r + self.embedding_u) + self.embedding_i), output_dim=self.encoding1, review_item_adj=self.review_item_adj, review_user_adj=self.review_user_adj, review_vecs=self.review_vecs, user_vecs=self.user_vecs, item_vecs=self.item_vecs)
h_r = r_aggregator(inputs=None)
iu_aggregator = AttentionAggregator(input_dim1=self.h_u_size, input_dim2=self.h_i_size, output_dim=self.encoding3, hid_dim=self.encoding2, user_review_adj=self.user_review_adj, user_item_adj=self.user_item_adj, item_review_adj=self.item_review_adj, item_user_adj=self.item_user_adj, review_vecs=self.review_vecs, user_vecs=self.user_vecs, item_vecs=self.item_vecs, concat=True)
(h_u, h_i) = iu_aggregator(inputs=None)
print('Nodes embedding over!')
with tf.variable_scope('homo_gcn'):
x = self.review_vecs
print('Comment graph embedding over!')
with tf.variable_scope('classification'):
concatenator = GASConcatenation(review_user_adj=self.review_user_adj, review_item_adj=self.review_item_adj, review_vecs=h_r, homo_vecs=self.homo_adj, user_vecs=h_u, item_vecs=h_i)
concated_hr = concatenator(inputs=None)
batch_data = tf.matmul(tf.one_hot(self.batch_index, self.nodes), concated_hr)
W = tf.get_variable(name='weights', shape=[(((self.encoding1 + (2 * self.encoding2)) + (2 * self.nodes)) + self.nodes), self.class_size], initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bias', shape=[1, self.class_size], initializer=tf.zeros_initializer())
tf.transpose(batch_data, perm=[0, 1])
logits = (tf.matmul(batch_data, W) + b)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.t, logits=logits)
return (loss, tf.nn.sigmoid(logits))
def train(self, h, adj_info, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = {self.user_review_adj: adj_info[0], self.user_item_adj: adj_info[1], self.item_review_adj: adj_info[2], self.item_user_adj: adj_info[3], self.review_user_adj: adj_info[4], self.review_item_adj: adj_info[5], self.homo_adj: adj_info[6], self.review_vecs: h[0], self.user_vecs: h[1], self.item_vecs: h[2], self.t: t, self.batch_index: b, self.lr: learning_rate, self.mom: momentum}
outs = self.sess.run([self.train_op, self.loss, self.accuracy, self.pred, self.probabilities], feed_dict=feed_dict)
loss = outs[1]
acc = outs[2]
pred = outs[3]
prob = outs[4]
return (loss, acc, pred, prob)
def test(self, h, adj_info, t, b):
feed_dict = {self.user_review_adj: adj_info[0], self.user_item_adj: adj_info[1], self.item_review_adj: adj_info[2], self.item_user_adj: adj_info[3], self.review_user_adj: adj_info[4], self.review_item_adj: adj_info[5], self.homo_adj: adj_info[6], self.review_vecs: h[0], self.user_vecs: h[1], self.item_vecs: h[2], self.t: t, self.batch_index: b}
(acc, pred, probabilities, tags) = self.sess.run([self.accuracy, self.pred, self.probabilities, self.correct_prediction], feed_dict=feed_dict)
return (acc, pred, probabilities, tags)
|
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='example', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
parser.add_argument('--review_num sample', default=7, help='review number.')
parser.add_argument('--gcn_dim', type=int, default=5, help='gcn layer size.')
parser.add_argument('--encoding1', type=int, default=64)
parser.add_argument('--encoding2', type=int, default=64)
parser.add_argument('--encoding3', type=int, default=64)
parser.add_argument('--encoding4', type=int, default=64)
args = parser.parse_args()
return args
|
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
|
def get_data(ix, int_batch, train_size):
if ((ix + int_batch) >= train_size):
ix = (train_size - int_batch)
end = train_size
else:
end = (ix + int_batch)
return (train_data[ix:end], train_label[ix:end])
|
def load_data(args):
if (args.dataset_str == 'example'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_gas()
node_embedding_r = features[0].shape[1]
node_embedding_u = features[1].shape[1]
node_embedding_i = features[2].shape[1]
node_size = features[0].shape[0]
h_u_size = (adj_list[0].shape[1] * (node_embedding_r + node_embedding_u))
h_i_size = (adj_list[2].shape[1] * (node_embedding_r + node_embedding_i))
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding_r, node_embedding_u, node_embedding_i, class_size, train_size, h_u_size, h_i_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.