code
stringlengths
17
6.64M
def validationXXreverse(args, model, device, data_loader, batch_size): logger.info('Starting Validation') model.eval() total_loss = 0 total_acc = 0 with torch.no_grad(): for [data, data_r] in data_loader: data = data.float().unsqueeze(1).to(device) data_r = data_r.float().unsqueeze(1).to(device) hidden1 = model.init_hidden1(len(data)) hidden2 = model.init_hidden2(len(data)) (acc, loss, hidden1, hidden2) = model(data, data_r, hidden1, hidden2) total_loss += (len(data) * loss) total_acc += (len(data) * acc) total_loss /= len(data_loader.dataset) total_acc /= len(data_loader.dataset) logger.info('===> Validation set: Average loss: {:.4f}\tAccuracy: {:.4f}\n'.format(total_loss, total_acc)) return (total_acc, total_loss)
def validation_spk(args, cdc_model, spk_model, device, data_loader, batch_size, frame_window): logger.info('Starting Validation') cdc_model.eval() spk_model.eval() total_loss = 0 total_acc = 0 with torch.no_grad(): for [data, target] in data_loader: data = data.float().unsqueeze(1).to(device) target = target.to(device) hidden = cdc_model.init_hidden(len(data)) (output, hidden) = cdc_model.predict(data, hidden) data = output.contiguous().view(((- 1), 256)) target = target.view(((- 1),)) output = spk_model.forward(data) total_loss += F.nll_loss(output, target, size_average=False).item() pred = output.max(1, keepdim=True)[1] total_acc += pred.eq(target.view_as(pred)).sum().item() total_loss /= (len(data_loader.dataset) * frame_window) total_acc /= ((1.0 * len(data_loader.dataset)) * frame_window) logger.info('===> Validation set: Average loss: {:.4f}\tAccuracy: {:.4f}\n'.format(total_loss, total_acc)) return (total_acc, total_loss)
def validation(args, model, device, data_loader, batch_size): logger.info('Starting Validation') model.eval() total_loss = 0 total_acc = 0 with torch.no_grad(): for data in data_loader: data = data.float().transpose(1, 2).to(device) hidden = model.init_hidden(len(data), use_gpu=torch.cuda.is_available()) (acc, loss, hidden) = model(data, hidden) total_loss += (len(data) * loss) total_acc += (len(data) * acc) total_loss /= len(data_loader.dataset) total_acc /= len(data_loader.dataset) logger.info('===> Validation set: Average loss: {:.4f}\tAccuracy: {:.4f}\n'.format(total_loss, total_acc)) return (total_acc, total_loss)
class DeepSVDDConf(DetectorConfig): _default_transform = MeanVarNormalize() @initializer def __init__(self, net_name='merlion', xp_path='./results/deepsvdd', load_model='./results/deepsvdd/deepsvdd.pkl', objective='one-class', nu=0.1, device='cpu', seed=(- 1), optimizer_name='adam', lr=0.001, n_epochs=300, lr_milestone=None, batch_size=32, weight_decay=0.001, pretrain=True, ae_optimizer_name='adam', ae_lr=0.001, ae_n_epochs=300, ae_lr_milestone=None, ae_batch_size=32, ae_weight_decay=1e-06, n_jobs_dataloader=0, normal_class=0, input_channels=None, final_out_channels=None, sequence_length=8, n_layers=3, dropout=0.1, hidden_size=5, kernel_size=3, stride=1, **kwargs): super(DeepSVDDConf, self).__init__(**kwargs)
class DeepSVDDModule(object): "A class for the Deep SVDD method.\n\n Attributes:\n objective: A string specifying the Deep SVDD objective (either 'one-class' or 'soft-boundary').\n nu: Deep SVDD hyperparameter nu (must be 0 < nu <= 1).\n R: Hypersphere radius R.\n c: Hypersphere center c.\n net_name: A string indicating the name of the neural network to use.\n net: The neural network \\phi.\n ae_net: The autoencoder network corresponding to \\phi for network weights pretraining.\n trainer: DeepSVDDTrainer to train a Deep SVDD model.\n optimizer_name: A string indicating the optimizer to use for training the Deep SVDD network.\n ae_trainer: AETrainer to train an autoencoder in pretraining.\n ae_optimizer_name: A string indicating the optimizer to use for pretraining the autoencoder.\n results: A dictionary to save the results.\n " def __init__(self, config, objective: str='one-class', nu: float=0.1): 'Inits DeepSVDD with one of the two objectives and hyperparameter nu.' assert (objective in ('one-class', 'soft-boundary')), "Objective must be either 'one-class' or 'soft-boundary'." self.objective = objective assert ((0 < nu) & (nu <= 1)), 'For hyperparameter nu, it must hold: 0 < nu <= 1.' self.nu = nu self.R = 0.0 self.c = None self.net_name = None self.net = None self.trainer = None self.optimizer_name = None self.ae_net = None self.ae_trainer = None self.ae_optimizer_name = None self.config = config self.results = {'train_time': None, 'test_auc': None, 'test_time': None, 'test_scores': None} def set_network(self, net_name): 'Builds the neural network \\phi.' self.net_name = net_name self.net = build_network(self.config) def train(self, dataset: BaseADDataset, optimizer_name: str='adam', lr: float=0.001, n_epochs: int=50, lr_milestones: tuple=(), batch_size: int=128, weight_decay: float=1e-06, device: str='cuda', n_jobs_dataloader: int=0): 'Trains the Deep SVDD model on the training data.' self.optimizer_name = optimizer_name self.trainer = DeepSVDDTrainer(self.objective, self.R, self.c, self.nu, optimizer_name, lr=lr, n_epochs=n_epochs, lr_milestones=lr_milestones, batch_size=batch_size, weight_decay=weight_decay, device=device, n_jobs_dataloader=n_jobs_dataloader) self.net = self.trainer.train(dataset, self.net) self.R = float(self.trainer.R.cpu().data.numpy()) self.c = self.trainer.c.cpu().data.numpy().tolist() self.results['train_time'] = self.trainer.train_time def test(self, dataset: BaseADDataset, device: str='cuda', n_jobs_dataloader: int=0): 'Tests the Deep SVDD model on the test data.' if (self.trainer is None): self.trainer = DeepSVDDTrainer(self.objective, self.R, self.c, self.nu, device=device, n_jobs_dataloader=n_jobs_dataloader) self.trainer.test(dataset, self.net) return self.trainer.test_scores def pretrain(self, dataset: BaseADDataset, optimizer_name: str='adam', lr: float=0.001, n_epochs: int=100, lr_milestones: tuple=(), batch_size: int=128, weight_decay: float=1e-06, device: str='cuda', n_jobs_dataloader: int=0): 'Pretrains the weights for the Deep SVDD network \\phi via autoencoder.' self.ae_net = build_autoencoder(self.config) self.ae_optimizer_name = optimizer_name self.ae_trainer = AETrainer(optimizer_name, lr=lr, n_epochs=n_epochs, lr_milestones=lr_milestones, batch_size=batch_size, weight_decay=weight_decay, device=device, n_jobs_dataloader=n_jobs_dataloader) self.ae_net = self.ae_trainer.train(dataset, self.ae_net) self.init_network_weights_from_pretraining() def init_network_weights_from_pretraining(self): 'Initialize the Deep SVDD network weights from the encoder weights of the pretraining autoencoder.' net_dict = self.net.state_dict() ae_net_dict = self.ae_net.state_dict() ae_net_dict = {k: v for (k, v) in ae_net_dict.items() if (k in net_dict)} net_dict.update(ae_net_dict) self.net.load_state_dict(net_dict) def save_model(self, export_model, save_ae=True): 'Save Deep SVDD model to export_model.' net_dict = self.net.state_dict() ae_net_dict = (self.ae_net.state_dict() if save_ae else None) torch.save({'R': self.R, 'c': self.c, 'net_dict': net_dict, 'ae_net_dict': ae_net_dict}, export_model) def load_model(self, model_path, load_ae=False): 'Load Deep SVDD model from model_path.' model_dict = torch.load(model_path) self.R = model_dict['R'] self.c = model_dict['c'] self.net.load_state_dict(model_dict['net_dict']) if load_ae: if (self.ae_net is None): self.ae_net = build_autoencoder(self.config) self.ae_net.load_state_dict(model_dict['ae_net_dict']) def save_results(self, export_json): 'Save results dict to a JSON-file.' with open(export_json, 'w') as fp: json.dump(self.results, fp)
class BaseADDataset(ABC): 'Anomaly detection dataset base class.' def __init__(self, root: str): super().__init__() self.root = root self.n_classes = 2 self.normal_classes = None self.outlier_classes = None self.train_set = None self.test_set = None @abstractmethod def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int=0) -> (DataLoader, DataLoader): 'Implement data loaders of type torch.utils.data.DataLoader for train_set and test_set.' pass def __repr__(self): return self.__class__.__name__
class BaseNet(nn.Module): 'Base class for all neural networks.' def __init__(self): super().__init__() self.logger = logging.getLogger(self.__class__.__name__) self.rep_dim = None def forward(self, *input): '\n Forward pass logic\n :return: Network output\n ' raise NotImplementedError def summary(self): 'Network summary.' net_parameters = filter((lambda p: p.requires_grad), self.parameters()) params = sum([np.prod(p.size()) for p in net_parameters]) self.logger.info('Trainable parameters: {}'.format(params)) self.logger.info(self)
class BaseTrainer(ABC): 'Trainer base class.' def __init__(self, optimizer_name: str, lr: float, n_epochs: int, lr_milestones: tuple, batch_size: int, weight_decay: float, device: str, n_jobs_dataloader: int): super().__init__() self.optimizer_name = optimizer_name self.lr = lr self.n_epochs = n_epochs self.lr_milestones = lr_milestones self.batch_size = batch_size self.weight_decay = weight_decay self.device = device self.n_jobs_dataloader = n_jobs_dataloader @abstractmethod def train(self, dataset: BaseADDataset, net: BaseNet) -> BaseNet: '\n Implement train method that trains the given network using the train_set of dataset.\n :return: Trained net\n ' pass @abstractmethod def test(self, dataset: BaseADDataset, net: BaseNet): '\n Implement test method that evaluates the test_set of dataset on the given network.\n ' pass
class TorchvisionDataset(BaseADDataset): 'TorchvisionDataset class for datasets already implemented in torchvision.datasets.' def __init__(self, root: str): super().__init__(root) def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int=0) -> (DataLoader, DataLoader): train_loader = DataLoader(dataset=self.train_set, batch_size=batch_size, shuffle=shuffle_train, num_workers=num_workers) test_loader = DataLoader(dataset=self.test_set, batch_size=batch_size, shuffle=shuffle_test, num_workers=num_workers) return (train_loader, test_loader)
def build_network(config): 'Builds the neural network.' implemented_networks = 'merlion' assert (config.net_name in implemented_networks) net = None if (config.net_name == 'merlion'): net = Merlion_MLP(config) return net
def build_autoencoder(config): 'Builds the corresponding autoencoder network.' implemented_networks = 'merlion' assert (config.net_name in implemented_networks) ae_net = None if (config.net_name == 'merlion'): ae_net = Merlion_MLP_Autoencoder(config) return ae_net
def build_network(config): 'Builds the neural network.' implemented_networks = ('merlion', 'lstmae') assert (config.net_name in implemented_networks) net = None if (config.net_name == 'merlion'): net = Merlion_MLP(config) elif (config.net_name == 'lstmae'): net = LSTMEncoder(config, config.device) return net
def build_autoencoder(config): 'Builds the corresponding autoencoder network.' implemented_networks = ('merlion', 'lstmae') assert (config.net_name in implemented_networks) ae_net = None if (config.net_name == 'merlion'): ae_net = Merlion_MLP_Autoencoder(config) elif (config.net_name == 'lstmae'): ae_net = LSTMAutoEncoder(config, config.device) return ae_net
class AETrainer(BaseTrainer): def __init__(self, optimizer_name: str='adam', lr: float=0.001, n_epochs: int=150, lr_milestones: tuple=(), batch_size: int=128, weight_decay: float=1e-06, device: str='cpu', n_jobs_dataloader: int=0): super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device, n_jobs_dataloader) def train(self, dataset: BaseADDataset, ae_net: BaseNet): ae_net = ae_net.to(self.device, dtype=torch.double) train_loader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.n_jobs_dataloader) optimizer = optim.Adam(ae_net.parameters(), lr=self.lr, weight_decay=self.weight_decay, amsgrad=(self.optimizer_name == 'amsgrad')) ae_net.train() for epoch in range(self.n_epochs): loss_epoch = 0.0 n_batches = 0 for data in train_loader: inputs = data inputs = inputs.to(self.device) optimizer.zero_grad() outputs = ae_net(inputs) scores = torch.sum(((outputs - inputs) ** 2), dim=tuple(range(1, outputs.dim()))) loss = torch.mean(scores) loss.backward() optimizer.step() loss_epoch += loss.item() n_batches += 1 return ae_net def test(self, dataset: BaseADDataset, ae_net: BaseNet): ae_net = ae_net.to(self.device, dtype=torch.double) test_loader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.n_jobs_dataloader) loss_epoch = 0.0 n_batches = 0 start_time = time.time() idx_label_score = [] ae_net.eval() with torch.no_grad(): for data in test_loader: (inputs, labels, idx) = data inputs = inputs.to(self.device) outputs = ae_net(inputs) scores = torch.sum(((outputs - inputs) ** 2), dim=tuple(range(1, outputs.dim()))) loss = torch.mean(scores) idx_label_score += list(zip(idx.cpu().data.numpy().tolist(), labels.cpu().data.numpy().tolist(), scores.cpu().data.numpy().tolist())) loss_epoch += loss.item() n_batches += 1 (_, labels, scores) = zip(*idx_label_score) labels = np.array(labels) scores = np.array(scores) auc = roc_auc_score(labels, scores) test_time = (time.time() - start_time)
class DeepSVDDTrainer(BaseTrainer): def __init__(self, objective, R, c, nu: float, optimizer_name: str='adam', lr: float=0.001, n_epochs: int=150, lr_milestones: tuple=(), batch_size: int=128, weight_decay: float=1e-06, device: str='cuda', n_jobs_dataloader: int=0): super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device, n_jobs_dataloader) assert (objective in ('one-class', 'soft-boundary')), "Objective must be either 'one-class' or 'soft-boundary'." self.objective = objective self.R = torch.tensor(R, device=self.device) self.c = (torch.tensor(c, device=self.device) if (c is not None) else None) self.nu = nu self.warm_up_n_epochs = 10 self.train_time = None self.test_auc = None self.test_time = None self.test_scores = None def train(self, dataset: BaseADDataset, net: BaseNet): net = net.to(self.device, dtype=torch.double) train_loader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.n_jobs_dataloader) optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay, amsgrad=(self.optimizer_name == 'amsgrad')) if (self.c is None): self.c = self.init_center_c(train_loader, net) start_time = time.time() net.train() for epoch in range(self.n_epochs): loss_epoch = 0.0 n_batches = 0 for inputs in train_loader: inputs = inputs.to(self.device) optimizer.zero_grad() outputs = net(inputs) dist = torch.sum(((outputs - self.c) ** 2), dim=1) if (self.objective == 'soft-boundary'): scores = (dist - (self.R ** 2)) loss = ((self.R ** 2) + ((1 / self.nu) * torch.mean(torch.max(torch.zeros_like(scores), scores)))) else: loss = torch.mean(dist) loss.backward() optimizer.step() if ((self.objective == 'soft-boundary') and (epoch >= self.warm_up_n_epochs)): self.R.data = torch.tensor(get_radius(dist, self.nu), device=self.device) loss_epoch += loss.item() n_batches += 1 self.train_time = (time.time() - start_time) return net def test(self, dataset: BaseADDataset, net: BaseNet): net = net.to(self.device, dtype=torch.float32) test_loader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.n_jobs_dataloader, shuffle=False) start_time = time.time() idx_label_score = [] net.eval() with torch.no_grad(): for inputs in test_loader: inputs = inputs.to(self.device, dtype=torch.float32) outputs = net(inputs) dist = torch.sum(((outputs - self.c) ** 2), dim=1) if (self.objective == 'soft-boundary'): scores = (dist - (self.R ** 2)) else: scores = dist idx_label_score.append(scores.detach().cpu().numpy().reshape((inputs.shape[0], (- 1)))) self.test_time = (time.time() - start_time) self.test_scores = idx_label_score return self.test_scores def init_center_c(self, train_loader: DataLoader, net: BaseNet, eps=0.1): 'Initialize hypersphere center c as the mean from an initial forward pass on the data.' n_samples = 0 c = torch.zeros(net.hidden_size, device=self.device) net.eval() with torch.no_grad(): for data in train_loader: inputs = data inputs = inputs.to(self.device) outputs = net(inputs) n_samples += outputs.shape[0] c += torch.sum(outputs, dim=0) c /= n_samples c[((abs(c) < eps) & (c < 0))] = (- eps) c[((abs(c) < eps) & (c > 0))] = eps return c
def get_radius(dist: torch.Tensor, nu: float): 'Optimally solve for radius R via the (1-nu)-quantile of distances.' return np.quantile(np.sqrt(dist.clone().data.cpu().numpy()), (1 - nu))
class OCSVMConf(DetectorConfig): _default_transform = MeanVarNormalize() @initializer def __init__(self, kernel='rbf', nu=0.005, degree=3, gamma='scale', sequence_len=32, **kwargs): super(OCSVMConf, self).__init__(**kwargs)
class OCSVM(DetectorBase): '\n The OC-SVM-based time series anomaly detector.\n ' config_class = OCSVMConf def __init__(self, config: OCSVMConf): super().__init__(config) self.kernel = config.kernel self.nu = config.nu self.degree = config.degree self.gamma = config.gamma self.sequence_length = config.sequence_len self.clf = None def _build_model(self): clf = OneClassSVM(kernel=self.kernel, nu=self.nu, degree=self.degree, gamma=self.gamma) return clf def _train(self, X): '\n :param X: The input time series, a numpy array.\n ' self.clf = self._build_model() self.clf.fit(X) def _detect(self, X): '\n :param X: The input time series, a numpy array.\n ' return self.clf.decision_function(X) def _get_sequence_len(self): return self.sequence_length def train(self, train_data: TimeSeries, anomaly_labels: TimeSeries=None, train_config=None, post_rule_train_config=None) -> TimeSeries: "\n Train a multivariate time series anomaly detector.\n\n :param train_data: A `TimeSeries` of metric values to train the model.\n :param anomaly_labels: A `TimeSeries` indicating which timestamps are\n anomalous. Optional.\n :param train_config: Additional training configs, if needed. Only\n required for some models.\n :param post_rule_train_config: The config to use for training the\n model's post-rule. The model's default post-rule train config is\n used if none is supplied here.\n\n :return: A `TimeSeries` of the model's anomaly scores on the training\n data.\n " train_data = self.train_pre_process(train_data, require_even_sampling=False, require_univariate=False) train_df = train_data.align().to_pd() self._train(train_df.values) scores = batch_detect(self, train_df.values) train_scores = TimeSeries({'anom_score': UnivariateTimeSeries(train_data.time_stamps, scores)}) self.train_post_rule(anomaly_scores=train_scores, anomaly_labels=anomaly_labels, post_rule_train_config=post_rule_train_config) return train_scores def get_anomaly_score(self, time_series: TimeSeries, time_series_prev: TimeSeries=None) -> TimeSeries: '\n :param time_series: The `TimeSeries` we wish to predict anomaly scores for.\n :param time_series_prev: A `TimeSeries` immediately preceding ``time_series``.\n :return: A univariate `TimeSeries` of anomaly scores\n ' (time_series, time_series_prev) = self.transform_time_series(time_series, time_series_prev) ts = ((time_series_prev + time_series) if (time_series_prev is not None) else time_series) scores = batch_detect(self, ts.align().to_pd().values) timestamps = time_series.time_stamps return TimeSeries({'anom_score': UnivariateTimeSeries(timestamps, scores[(- len(timestamps)):])})
def set_requires_grad(model, dict_, requires_grad=True): for param in model.named_parameters(): if (param[0] in dict_): param[1].requires_grad = requires_grad
def fix_randomness(SEED): random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True
def epoch_time(start_time, end_time): elapsed_time = (end_time - start_time) elapsed_mins = int((elapsed_time / 60)) elapsed_secs = int((elapsed_time - (elapsed_mins * 60))) return (elapsed_mins, elapsed_secs)
def _calc_metrics(pred_labels, true_labels, log_dir, home_path): pred_labels = np.array(pred_labels).astype(int) true_labels = np.array(true_labels).astype(int) labels_save_path = os.path.join(log_dir, 'labels') os.makedirs(labels_save_path, exist_ok=True) np.save(os.path.join(labels_save_path, 'predicted_labels.npy'), pred_labels) np.save(os.path.join(labels_save_path, 'true_labels.npy'), true_labels) r = classification_report(true_labels, pred_labels, digits=6, output_dict=True) cm = confusion_matrix(true_labels, pred_labels) df = pd.DataFrame(r) df['cohen'] = cohen_kappa_score(true_labels, pred_labels) df['accuracy'] = accuracy_score(true_labels, pred_labels) df = (df * 100) exp_name = os.path.split(os.path.dirname(log_dir))[(- 1)] training_mode = os.path.basename(log_dir) file_name = f'{exp_name}_{training_mode}_classification_report.xlsx' report_Save_path = os.path.join(home_path, log_dir, file_name) df.to_excel(report_Save_path) cm_file_name = f'{exp_name}_{training_mode}_confusion_matrix.torch' cm_Save_path = os.path.join(home_path, log_dir, cm_file_name) torch.save(cm, cm_Save_path)
def _logger(logger_name, level=logging.DEBUG): '\n Method to return a custom logger with the given name and level\n ' logger = logging.getLogger(logger_name) logger.setLevel(level) format_string = '%(message)s' log_format = logging.Formatter(format_string) console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(log_format) logger.addHandler(console_handler) file_handler = logging.FileHandler(logger_name, mode='a') file_handler.setFormatter(log_format) logger.addHandler(file_handler) return logger
def copy_Files(destination, data_type): destination_dir = os.path.join(destination, 'model_files') os.makedirs(destination_dir, exist_ok=True)
class Config(object): def __init__(self): self.input_channels = 1 self.kernel_size = 8 self.stride = 1 self.final_out_channels = 128 self.num_classes = 2 self.dropout = 0.35 self.features_len = 24 self.num_epoch = 40 self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = True self.batch_size = 128 self.Context_Cont = Context_Cont_configs() self.TC = TC() self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.jitter_scale_ratio = 0.001 self.jitter_ratio = 0.001 self.max_seg = 5
class Context_Cont_configs(object): def __init__(self): self.temperature = 0.2 self.use_cosine_similarity = True
class TC(object): def __init__(self): self.hidden_dim = 100 self.timesteps = 10
class Config(object): def __init__(self): self.input_channels = 9 self.kernel_size = 8 self.stride = 1 self.final_out_channels = 128 self.num_classes = 6 self.dropout = 0.35 self.features_len = 18 self.num_epoch = 40 self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = True self.batch_size = 128 self.Context_Cont = Context_Cont_configs() self.TC = TC() self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.jitter_scale_ratio = 1.1 self.jitter_ratio = 0.8 self.max_seg = 8
class Context_Cont_configs(object): def __init__(self): self.temperature = 0.2 self.use_cosine_similarity = True
class TC(object): def __init__(self): self.hidden_dim = 100 self.timesteps = 6
class Config(object): def __init__(self): self.input_channels = 1 self.kernel_size = 8 self.stride = 1 self.final_out_channels = 32 self.num_classes = 2 self.dropout = 0.35 self.features_len = 4 self.window_size = 18 self.time_step = 18 self.num_epoch = 40 self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = True self.batch_size = 64 self.Context_Cont = Context_Cont_configs() self.TC = TC() self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.jitter_scale_ratio = 1.1 self.jitter_ratio = 0.8 self.max_seg = 8
class Context_Cont_configs(object): def __init__(self): self.temperature = 0.2 self.use_cosine_similarity = True
class TC(object): def __init__(self): self.hidden_dim = 100 self.timesteps = 2
class Config(object): def __init__(self): self.input_channels = 1 self.kernel_size = 8 self.stride = 1 self.final_out_channels = 8 self.num_classes = 2 self.dropout = 0.35 self.features_len = 4 self.window_size = 18 self.time_step = 18 self.num_epoch = 40 self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = False self.batch_size = 64 self.Context_Cont = Context_Cont_configs() self.TC = TC() self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.jitter_scale_ratio = 1.1 self.jitter_ratio = 0.8 self.max_seg = 8
class Context_Cont_configs(object): def __init__(self): self.temperature = 0.2 self.use_cosine_similarity = True
class TC(object): def __init__(self): self.hidden_dim = 100 self.timesteps = 2
class Config(object): def __init__(self): self.input_channels = 18 self.kernel_size = 8 self.stride = 1 self.final_out_channels = 128 self.num_classes = 2 self.dropout = 0.35 self.features_len = 7 self.window_size = 18 self.time_step = 1 self.num_epoch = 40 self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = True self.batch_size = 64 self.Context_Cont = Context_Cont_configs() self.TC = TC() self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.jitter_scale_ratio = 1.1 self.jitter_ratio = 0.8 self.max_seg = 8
class Context_Cont_configs(object): def __init__(self): self.temperature = 0.2 self.use_cosine_similarity = True
class TC(object): def __init__(self): self.hidden_dim = 100 self.timesteps = 2
class Config(object): def __init__(self): self.input_channels = 1 self.kernel_size = 32 self.stride = 4 self.final_out_channels = 128 self.features_len = 162 self.num_classes = 3 self.dropout = 0.35 self.corruption_prob = 0.3 self.num_epoch = 40 self.batch_size = 64 self.optimizer = 'adam' self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = True self.Context_Cont = Context_Cont_configs() self.TC = TC() self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.jitter_scale_ratio = 2 self.jitter_ratio = 0.1 self.max_seg = 5
class Context_Cont_configs(object): def __init__(self): self.temperature = 0.2 self.use_cosine_similarity = True
class TC(object): def __init__(self): self.hidden_dim = 64 self.timesteps = 50
class Config(object): def __init__(self): self.input_channels = 1 self.final_out_channels = 128 self.num_classes = 5 self.dropout = 0.35 self.kernel_size = 25 self.stride = 3 self.features_len = 127 self.num_epoch = 40 self.optimizer = 'adam' self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = True self.batch_size = 128 self.Context_Cont = Context_Cont_configs() self.TC = TC() self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.jitter_scale_ratio = 1.5 self.jitter_ratio = 2 self.max_seg = 12
class Context_Cont_configs(object): def __init__(self): self.temperature = 0.2 self.use_cosine_similarity = True
class TC(object): def __init__(self): self.hidden_dim = 64 self.timesteps = 50
class Load_Dataset(Dataset): def __init__(self, dataset, config, training_mode): super(Load_Dataset, self).__init__() self.training_mode = training_mode X_train = dataset['samples'] y_train = dataset['labels'] if (len(X_train.shape) < 3): X_train = X_train.unsqueeze(2) if (X_train.shape.index(min(X_train.shape)) != 1): X_train = X_train.permute(0, 2, 1) if isinstance(X_train, np.ndarray): self.x_data = torch.from_numpy(X_train) self.y_data = torch.from_numpy(y_train).long() else: self.x_data = X_train self.y_data = y_train self.len = X_train.shape[0] if (training_mode == 'self_supervised'): (self.aug1, self.aug2) = DataTransform(self.x_data, config) def __getitem__(self, index): if (self.training_mode == 'self_supervised'): return (self.x_data[index], self.y_data[index], self.aug1[index], self.aug2[index]) else: return (self.x_data[index], self.y_data[index], self.x_data[index], self.x_data[index]) def __len__(self): return self.len
def data_generator(data_path, configs, training_mode): train_dataset = torch.load(os.path.join(data_path, 'train.pt')) valid_dataset = torch.load(os.path.join(data_path, 'val.pt')) test_dataset = torch.load(os.path.join(data_path, 'test.pt')) train_dataset = Load_Dataset(train_dataset, configs, training_mode) valid_dataset = Load_Dataset(valid_dataset, configs, training_mode) test_dataset = Load_Dataset(test_dataset, configs, training_mode) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size, shuffle=True, drop_last=configs.drop_last, num_workers=0) valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=configs.drop_last, num_workers=0) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0) return (train_loader, valid_loader, test_loader)
def data_generator1(time_series, time_series_label, configs, training_mode): time_series = time_series.to_numpy() time_series_label = time_series_label.to_numpy() augments = ts_augmentation(time_series) time_series = np.concatenate([time_series, augments], axis=0) time_series_label = np.concatenate(([time_series_label] * (len(time_series) // len(time_series_label))), axis=0) time_series = subsequences(time_series, configs.window_size, configs.time_step) time_series_label = subsequences(time_series_label, configs.window_size, configs.time_step) (x_train, x_test, y_train, y_test) = train_test_split(time_series, time_series_label, test_size=0.1, random_state=42) (x_train, x_val, y_train, y_val) = train_test_split(x_train, y_train, test_size=0.1, random_state=42) x_train.swapaxes(1, 2) x_val.swapaxes(1, 2) x_test.swapaxes(1, 2) x_train = torch.from_numpy(x_train) x_val = torch.from_numpy(x_val) x_test = torch.from_numpy(x_test) train_dat_dict = dict() train_dat_dict['samples'] = x_train train_dat_dict['labels'] = torch.from_numpy(y_train) val_dat_dict = dict() val_dat_dict['samples'] = x_val val_dat_dict['labels'] = torch.from_numpy(y_val) test_dat_dict = dict() test_dat_dict['samples'] = x_test test_dat_dict['labels'] = torch.from_numpy(y_test) train_dataset = Load_Dataset(train_dat_dict, configs, training_mode) valid_dataset = Load_Dataset(val_dat_dict, configs, training_mode) test_dataset = Load_Dataset(test_dat_dict, configs, training_mode) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size, shuffle=True, drop_last=configs.drop_last, num_workers=0) valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=configs.drop_last, num_workers=0) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0) return (train_loader, valid_loader, test_loader)
def data_generator4(train_data, test_data, train_labels, test_labels, configs, training_mode): train_time_series_ts = train_data test_time_series_ts = test_data mvn = MeanVarNormalize() mvn.train((train_time_series_ts + test_time_series_ts)) (bias, scale) = (mvn.bias, mvn.scale) train_time_series = train_time_series_ts.to_pd().to_numpy() train_time_series = ((train_time_series - bias) / scale) test_time_series = test_time_series_ts.to_pd().to_numpy() test_time_series = ((test_time_series - bias) / scale) train_labels = train_labels.to_pd().to_numpy() test_labels = test_labels.to_pd().to_numpy() test_anomaly_window_num = int((len(np.where((test_labels[1:] != test_labels[:(- 1)]))[0]) / 2)) train_x = subsequences(train_time_series, configs.window_size, configs.time_step) test_x = subsequences(test_time_series, configs.window_size, configs.time_step) train_y = subsequences(train_labels, configs.window_size, configs.time_step) test_y = subsequences(test_labels, configs.window_size, configs.time_step) train_y_window = np.zeros(train_x.shape[0]) test_y_window = np.zeros(test_x.shape[0]) train_anomaly_window_num = 0 for (i, item) in enumerate(train_y[:]): if (sum(item[:configs.time_step]) >= 1): train_anomaly_window_num += 1 train_y_window[i] = 1 else: train_y_window[i] = 0 for (i, item) in enumerate(test_y[:]): if (sum(item[:configs.time_step]) >= 1): test_y_window[i] = 1 else: test_y_window[i] = 0 (train_x, val_x, train_y, val_y) = train_test_split(train_x, train_y_window, test_size=0.2, shuffle=False) train_x = train_x.transpose((0, 2, 1)) val_x = val_x.transpose((0, 2, 1)) test_x = test_x.transpose((0, 2, 1)) train_dat_dict = dict() train_dat_dict['samples'] = train_x train_dat_dict['labels'] = train_y val_dat_dict = dict() val_dat_dict['samples'] = val_x val_dat_dict['labels'] = val_y test_dat_dict = dict() test_dat_dict['samples'] = test_x test_dat_dict['labels'] = test_y_window train_dataset = Load_Dataset(train_dat_dict, configs, training_mode) val_dataset = Load_Dataset(val_dat_dict, configs, training_mode) test_dataset = Load_Dataset(test_dat_dict, configs, training_mode) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size, shuffle=True, drop_last=configs.drop_last, num_workers=0) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0) return (train_loader, val_loader, test_loader, test_anomaly_window_num)
class NTXentLoss(torch.nn.Module): def __init__(self, device, batch_size, temperature, use_cosine_similarity): super(NTXentLoss, self).__init__() self.batch_size = batch_size self.temperature = temperature self.device = device self.softmax = torch.nn.Softmax(dim=(- 1)) self.mask_samples_from_same_repr = self._get_correlated_mask().type(torch.bool) self.similarity_function = self._get_similarity_function(use_cosine_similarity) self.criterion = torch.nn.CrossEntropyLoss(reduction='sum') def _get_similarity_function(self, use_cosine_similarity): if use_cosine_similarity: self._cosine_similarity = torch.nn.CosineSimilarity(dim=(- 1)) return self._cosine_simililarity else: return self._dot_simililarity def _get_correlated_mask(self): diag = np.eye((2 * self.batch_size)) l1 = np.eye((2 * self.batch_size), (2 * self.batch_size), k=(- self.batch_size)) l2 = np.eye((2 * self.batch_size), (2 * self.batch_size), k=self.batch_size) mask = torch.from_numpy(((diag + l1) + l2)) mask = (1 - mask).type(torch.bool) return mask.to(self.device) @staticmethod def _dot_simililarity(x, y): v = torch.tensordot(x.unsqueeze(1), y.T.unsqueeze(0), dims=2) return v def _cosine_simililarity(self, x, y): v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0)) return v def forward(self, zis, zjs): representations = torch.cat([zjs, zis], dim=0) similarity_matrix = self.similarity_function(representations, representations) l_pos = torch.diag(similarity_matrix, self.batch_size) r_pos = torch.diag(similarity_matrix, (- self.batch_size)) positives = torch.cat([l_pos, r_pos]).view((2 * self.batch_size), 1) negatives = similarity_matrix[self.mask_samples_from_same_repr].view((2 * self.batch_size), (- 1)) logits = torch.cat((positives, negatives), dim=1) logits /= self.temperature labels = torch.zeros((2 * self.batch_size)).to(self.device).long() loss = self.criterion(logits, labels) return (loss / (2 * self.batch_size))
class base_Model(nn.Module): def __init__(self, configs): super(base_Model, self).__init__() self.conv_block1 = nn.Sequential(nn.Conv1d(configs.input_channels, 32, kernel_size=configs.kernel_size, stride=configs.stride, bias=False, padding=(configs.kernel_size // 2)), nn.BatchNorm1d(32), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1), nn.Dropout(configs.dropout)) self.conv_block2 = nn.Sequential(nn.Conv1d(32, 64, kernel_size=8, stride=1, bias=False, padding=4), nn.BatchNorm1d(64), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1)) self.conv_block3 = nn.Sequential(nn.Conv1d(64, configs.final_out_channels, kernel_size=8, stride=1, bias=False, padding=4), nn.BatchNorm1d(configs.final_out_channels), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1)) model_output_dim = configs.features_len self.logits = nn.Linear((model_output_dim * configs.final_out_channels), configs.num_classes) def forward(self, x_in): x = self.conv_block1(x_in) x = self.conv_block2(x) x = self.conv_block3(x) x_flat = x.reshape(x.shape[0], (- 1)) logits = self.logits(x_flat) return (logits, x)
def Trainer(model, model_optimizer, train_dl, val_dl, test_dl, device, logger, config): logger.debug('one-class classfication fine-tune started ....') scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min') (all_epoch_train_loss, all_epoch_test_loss) = ([], []) center = torch.zeros(config.final_out_channels, device=device) center = center_c(train_dl, model, device, center, config, eps=config.center_eps) length = torch.tensor(0, device=device) for epoch in range(1, (config.num_epoch + 1)): (train_target, train_score, train_loss, length) = model_train(model, model_optimizer, train_dl, center, length, config, device, epoch) (val_target, val_score_origin, val_loss, all_projection) = model_evaluate(model, val_dl, center, length, config, device, epoch) (test_target, test_score_origin, test_loss, all_projection) = model_evaluate(model, test_dl, center, length, config, device, epoch) scheduler.step(train_loss) all_epoch_train_loss.append(train_loss.item()) all_epoch_test_loss.append(val_loss.item()) (val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu) (test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu) if (config.dataset == 'UCR'): score_reasonable = tsad_reasonable(test_target, predict, config.time_step) print('Test accuracy metrics') logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f} ''') else: score_reasonable = reasonable_accumulator(1, 0) logger.debug('\n################## Training is Done! #########################') val_f1 = val_score.f1(ScoreType.RevisedPointAdjusted) val_precision = val_score.precision(ScoreType.RevisedPointAdjusted) val_recall = val_score.recall(ScoreType.RevisedPointAdjusted) print('Valid affiliation-metrics') logger.debug(f'''Test precision: {val_affiliation['precision']:2.4f} | Test recall: {val_affiliation['recall']:2.4f} ''') print('Valid RAP F1') logger.debug(f'''Valid F1: {val_f1:2.4f} | Valid precision: {val_precision:2.4f} | Valid recall: {val_recall:2.4f} ''') test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted) test_precision = test_score.precision(ScoreType.RevisedPointAdjusted) test_recall = test_score.recall(ScoreType.RevisedPointAdjusted) print('Test affiliation-metrics') logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f} ''') print('Test RAP F1') logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f} ''') return (test_score_origin, test_affiliation, test_score, score_reasonable, predict)
def model_train(model, model_optimizer, train_loader, center, length, config, device, epoch): (total_loss, total_f1, total_precision, total_recall) = ([], [], [], []) (all_target, all_predict) = ([], []) model.train() for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader): (data, target) = (data.float().to(device), target.long().to(device)) model_optimizer.zero_grad() (_, features) = model(data) features = features.permute(0, 2, 1) features = features.reshape((- 1), config.final_out_channels) (loss, score) = train(features, center, length, epoch, config, device) if ((config.objective == 'soft-boundary') and (epoch >= config.freeze_length_epoch)): length = torch.tensor(get_radius(score, config.nu), device=device) total_loss.append(loss.item()) loss.backward() model_optimizer.step() target = target.reshape((- 1)) predict = score.detach().cpu().numpy() target = target.detach().cpu().numpy() all_target.extend(target) all_predict.extend(predict) total_loss = torch.tensor(total_loss).mean() return (all_target, all_predict, total_loss, length)
def model_evaluate(model, test_dl, center, length, config, device, epoch): model.eval() (total_loss, total_f1, total_precision, total_recall) = ([], [], [], []) (all_target, all_predict) = ([], []) all_projection = [] with torch.no_grad(): for (data, target, aug1, aug2) in test_dl: (data, target) = (data.float().to(device), target.long().to(device)) (_, features) = model(data) features = features.permute(0, 2, 1) features = features.reshape((- 1), config.final_out_channels) (loss, score) = train(features, center, length, epoch, config, device) total_loss.append(loss.item()) predict = score.detach().cpu().numpy() target = target.reshape((- 1)) all_target.extend(target.detach().cpu().numpy()) all_predict.extend(predict) all_projection.append(features) total_loss = torch.tensor(total_loss).mean() all_projection = torch.cat(all_projection, dim=0) all_target = np.array(all_target) return (all_target, all_predict, total_loss, all_projection)
def train(feature1, center, length, epoch, config, device): center = center.unsqueeze(0) center = F.normalize(center, dim=1) feature1 = F.normalize(feature1, dim=1) distance1 = F.cosine_similarity(feature1, center, eps=1e-06) distance1 = (1 - distance1) score = distance1 if (config.objective == 'soft-boundary'): diff1 = (score - length) loss_oc = (length + ((1 / config.nu) * torch.mean(torch.max(torch.zeros_like(diff1), diff1)))) else: loss_oc = torch.mean(score) loss = loss_oc return (loss, score)
def center_c(train_loader, model, device, center, config, eps=0.1): 'Initialize hypersphere center c as the mean from an initial forward pass on the data.' n_samples = 0 c = center model.eval() with torch.no_grad(): for data in train_loader: (data, target, aug1, aug2) = data data = data.float().to(device) (_, features) = model(data) features = features.permute(0, 2, 1) features = features.reshape((- 1), config.final_out_channels) n_samples += features.shape[0] c += torch.sum(features, dim=0) c /= n_samples c[((abs(c) < eps) & (c < 0))] = (- eps) c[((abs(c) < eps) & (c > 0))] = eps return c
def get_radius(dist: torch.Tensor, nu: float): 'Optimally solve for radius R via the (1-nu)-quantile of distances.' dist = dist.reshape((- 1)) return np.quantile(dist.clone().data.cpu().numpy(), (1 - nu))
def Trainer(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, train_dl, valid_dl, test_dl, device, logger, config, experiment_log_dir, training_mode): logger.debug('Training started ....') criterion = nn.CrossEntropyLoss() scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min') for epoch in range(1, (config.num_epoch + 1)): (train_loss, train_acc) = model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_dl, config, device, training_mode) (valid_loss, valid_acc, _, _) = model_evaluate(model, temporal_contr_model, valid_dl, device, training_mode) if (training_mode != 'self_supervised'): scheduler.step(valid_loss) logger.debug(f''' Epoch : {epoch} Train Loss : {train_loss:.4f} | Train Accuracy : {train_acc:2.4f} Valid Loss : {valid_loss:.4f} | Valid Accuracy : {valid_acc:2.4f} ''') os.makedirs(os.path.join(experiment_log_dir, 'saved_models'), exist_ok=True) chkpoint = {'model_state_dict': model.state_dict(), 'temporal_contr_model_state_dict': temporal_contr_model.state_dict()} torch.save(chkpoint, os.path.join(experiment_log_dir, 'saved_models', f'ckp_last.pt')) if (training_mode != 'self_supervised'): logger.debug('\nEvaluate on the Test set:') (test_loss, test_acc, _, _) = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode) logger.debug(f'Test loss :{test_loss:0.4f} | Test Accuracy : {test_acc:0.4f}') logger.debug('\n################## Training is Done! #########################')
def model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_loader, config, device, training_mode): total_loss = [] total_acc = [] model.train() temporal_contr_model.train() for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader): (data, target) = (data.float().to(device), target.long().to(device)) (aug1, aug2) = (aug1.float().to(device), aug2.float().to(device)) model_optimizer.zero_grad() temp_cont_optimizer.zero_grad() if (training_mode == 'self_supervised'): (predictions1, features1) = model(aug1) (predictions2, features2) = model(aug2) features1 = F.normalize(features1, dim=1) features2 = F.normalize(features2, dim=1) (temp_cont_loss1, temp_cont_lstm_feat1) = temporal_contr_model(features1, features2) (temp_cont_loss2, temp_cont_lstm_feat2) = temporal_contr_model(features2, features1) zis = temp_cont_lstm_feat1 zjs = temp_cont_lstm_feat2 else: output = model(data) if (training_mode == 'self_supervised'): lambda1 = 1 lambda2 = 0.7 nt_xent_criterion = NTXentLoss(device, config.batch_size, config.Context_Cont.temperature, config.Context_Cont.use_cosine_similarity) loss = (((temp_cont_loss1 + temp_cont_loss2) * lambda1) + (nt_xent_criterion(zis, zjs) * lambda2)) else: (predictions, features) = output loss = criterion(predictions, target) total_acc.append(target.eq(predictions.detach().argmax(dim=1)).float().mean()) total_loss.append(loss.item()) loss.backward() model_optimizer.step() temp_cont_optimizer.step() total_loss = torch.tensor(total_loss).mean() if (training_mode == 'self_supervised'): total_acc = 0 else: total_acc = torch.tensor(total_acc).mean() return (total_loss, total_acc)
def model_evaluate(model, temporal_contr_model, test_dl, device, training_mode): model.eval() temporal_contr_model.eval() total_loss = [] total_acc = [] criterion = nn.CrossEntropyLoss() outs = np.array([]) trgs = np.array([]) with torch.no_grad(): for (data, target, _, _) in test_dl: (data, target) = (data.float().to(device), target.long().to(device)) if (training_mode == 'self_supervised'): pass else: output = model(data) if (training_mode != 'self_supervised'): (predictions, features) = output loss = criterion(predictions, target) total_acc.append(target.eq(predictions.detach().argmax(dim=1)).float().mean()) total_loss.append(loss.item()) if (training_mode != 'self_supervised'): pred = predictions.max(1, keepdim=True)[1] outs = np.append(outs, pred.cpu().numpy()) trgs = np.append(trgs, target.data.cpu().numpy()) if (training_mode != 'self_supervised'): total_loss = torch.tensor(total_loss).mean() else: total_loss = 0 if (training_mode == 'self_supervised'): total_acc = 0 return (total_loss, total_acc, [], []) else: total_acc = torch.tensor(total_acc).mean() return (total_loss, total_acc, outs, trgs)
def ad_predict(target, scores, mode, nu): if_aff = np.count_nonzero(target) if (if_aff != 0): events_gt = convert_vector_to_events(target) target = TimeSeries.from_pd(pd.DataFrame(target)) scores = np.array(scores) mean = np.mean(scores) std = np.std(scores) if (std != 0): scores = ((scores - mean) / std) if (mode == 'one-anomaly'): mount = 0 threshold = np.max(scores, axis=0) max_number = np.sum((scores == threshold)) predict = np.zeros(len(scores)) if (max_number <= 10): for (index, r2) in enumerate(scores): if (r2.item() >= threshold): predict[index] = 1 mount += 1 if (if_aff != 0): events_pred = convert_vector_to_events(predict) Trange = (0, len(predict)) affiliation_max = pr_from_events(events_pred, events_gt, Trange) else: affiliation_max = dict() affiliation_max['precision'] = 0 affiliation_max['recall'] = 0 predict_ts = TimeSeries.from_pd(pd.DataFrame(predict)) score_max = accumulate_tsad_score(ground_truth=target, predict=predict_ts) elif (mode == 'fix'): detect_nu = (100 * (1 - nu)) threshold = np.percentile(scores, detect_nu) mount = 0 predict = np.zeros(len(scores)) for (index, r2) in enumerate(scores): if (r2.item() > threshold): predict[index] = 1 mount += 1 if (if_aff != 0): events_pred = convert_vector_to_events(predict) Trange = (0, len(predict)) affiliation_max = pr_from_events(events_pred, events_gt, Trange) else: affiliation_max = dict() affiliation_max['precision'] = 0 affiliation_max['recall'] = 0 predict_ts = TimeSeries.from_pd(pd.DataFrame(predict)) score_max = accumulate_tsad_score(ground_truth=target, predict=predict_ts) else: nu_list = (np.arange(1, 301) / 1000.0) (f1_list, score_list, f1_list2, affiliation_list) = ([], [], [], []) for detect_nu in nu_list: threshold = np.percentile(scores, (100 - detect_nu)) mount = 0 predict = np.zeros(len(scores)) for (index, r2) in enumerate(scores): if (r2.item() > threshold): predict[index] = 1 mount += 1 if (if_aff != 0): events_pred = convert_vector_to_events(predict) Trange = (0, len(predict)) dic = pr_from_events(events_pred, events_gt, Trange) affiliation_f1 = ((2 * (dic['precision'] * dic['recall'])) / (dic['precision'] + dic['recall'])) f1_list2.append(affiliation_f1) else: dic = dict() dic['precision'] = 0 dic['recall'] = 0 f1_list2.append(0) affiliation_list.append(dic) predict_ts = TimeSeries.from_pd(pd.DataFrame(predict)) score = accumulate_tsad_score(ground_truth=target, predict=predict_ts) f1 = score.f1(ScoreType.RevisedPointAdjusted) f1_list.append(f1) score_list.append(score) index_max1 = np.argmax(f1_list2, axis=0) affiliation_max = affiliation_list[index_max1] nu_max1 = nu_list[index_max1] print('Best affiliation quantile:', nu_max1) index_max2 = np.argmax(f1_list, axis=0) score_max = score_list[index_max2] nu_max2 = nu_list[index_max2] print('Best anomaly quantile:', nu_max2) threshold = np.percentile(scores, (100 - nu_max1)) mount = 0 predict = np.zeros(len(scores)) for (index, r2) in enumerate(scores): if (r2.item() > threshold): predict[index] = 1 mount += 1 return (affiliation_max, score_max, predict)
class reasonable_accumulator(): def __init__(self, cnt=0, correct_num=0): self.cnt = cnt self.correct_num = correct_num def __add__(self, acc): kwargs = {'cnt': (self.cnt + acc.cnt), 'correct_num': (self.correct_num + acc.correct_num)} return reasonable_accumulator(**kwargs) def get_all_metrics(self): return {'accuracy': (self.correct_num / self.cnt)}
def tsad_reasonable(ground_truth, predict, time_step): '\n Computes the components required to compute multiple different types of\n performance metrics for time series anomaly detection.\n\n Anomaly detection metrics for UCR Time Series Anomaly Detection datasets by\n `Lu, Yue, et al. "Matrix Profile XXIV: Scaling Time Series Anomaly Detection to Trillions of Datapoints and\n Ultra-fast Arriving Data Streams." Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and\n Data Mining. 2022. <https://www.cs.ucr.edu/~eamonn/DAMP_long_version.pdf>`.\n ' if (isinstance(ground_truth, TimeSeries) and isinstance(predict, TimeSeries)): assert ((ground_truth.dim == 1) and (predict.dim == 1)), 'Can only evaluate anomaly scores when ground truth and prediction are single-variable time series.' ground_truth = ground_truth.univariates[ground_truth.names[0]] ys = list(map(int, ground_truth.np_values.astype(bool))) predict = predict.univariates[predict.names[0]] ys_pred = list(map(int, predict.np_values.astype(bool))) elif (isinstance(ground_truth, np.ndarray) and isinstance(predict, np.ndarray)): ys = list(map(int, ground_truth.astype(bool))) ys_pred = list(map(int, predict.astype(bool))) (begin, end) = ((- 1), (- 1)) P = (- 100) for (idx, val) in enumerate(ys): if (idx and (ys[(idx - 1)] == 0) and (ys[idx] == 1)): begin = idx if (idx and (ys[(idx - 1)] == 1) and (ys[idx] == 0)): end = idx break for (idx, val) in enumerate(ys_pred): if (val > 0): P = idx break L = (end - begin) L_limit = int((100 / time_step)) if (min((begin - L), (begin - L_limit)) <= P <= max((end + L), (end + L_limit))): return reasonable_accumulator(1, 1) return reasonable_accumulator(1, 0)
def get_dataset(dataset_name: str, rootdir: str=None) -> TSADBaseDataset: '\n :param dataset_name: the name of the dataset to load, formatted as\n ``<name>`` or ``<name>_<subset>``, e.g. ``IOPsCompetition``\n or ``NAB_realAWSCloudwatch``\n :param rootdir: the directory where the desired dataset is stored. Not\n required if the package :py:mod:`ts_datasets` is installed in editable\n mode, i.e. with flag ``-e``.\n :return: the data loader for the desired dataset (and subset) desired\n ' name_subset = dataset_name.split('_', maxsplit=1) valid_datasets = set(__all__).difference({'TSADBaseDataset', 'get_dataset'}) if (name_subset[0] in valid_datasets): cls = globals()[name_subset[0]] else: raise KeyError(f'Dataset should be formatted as <name> or <name>_<subset>, where <name> is one of {valid_datasets}. Got {dataset_name} instead.') if ((not hasattr(cls, 'valid_subsets')) and (len(name_subset) == 2)): raise ValueError(f'Dataset {name_subset[0]} does not have any subsets, but attempted to load subset {name_subset[1]} by specifying dataset name {dataset_name}.') kwargs = (dict() if (len(name_subset) == 1) else dict(subset=name_subset[1])) return cls(rootdir=rootdir, **kwargs)
class TSADBaseDataset(BaseDataset): __doc__ = ((_intro_docstr + _main_fns_docstr) + _extra_note) @property def max_lead_sec(self): '\n The maximum number of seconds an anomaly may be detected early, for\n this dataset. ``None`` signifies no early detections allowed, or that\n the user may override this value with something better suited for their\n purposes.\n ' return None @property def max_lag_sec(self): '\n The maximum number of seconds after the start of an anomaly, that we\n consider detections to be accurate (and not ignored for being too late).\n ``None`` signifies that any detection in the window is acceptable, or\n that the user may override this value with something better suited for\n their purposes.\n ' return None def describe(self): anom_bds = [] anom_locs = [] anom_in_trainval = [] for (ts, md) in self: boundaries = (md.anomaly.iloc[1:] != md.anomaly.values[:(- 1)]) boundaries = boundaries[boundaries].index if (len(boundaries) == 0): continue ts_len = (ts.index[(- 1)] - ts.index[0]) if md.anomaly.iloc[0]: anom_bds.append((ts.index[0], boundaries[0])) anom_locs.append(((boundaries[0] - ts.index[0]) / ts_len)) anom_in_trainval.append(True) for (t0, tf) in zip(boundaries[:(- 1)], boundaries[1:]): if md.anomaly[t0]: anom_bds.append((t0, tf)) anom_locs.append(((tf - ts.index[0]) / ts_len)) anom_in_trainval.append(bool(md.trainval[t0])) if md.anomaly[boundaries[(- 1)]]: anom_bds.append((boundaries[(- 1)], ts.index[(- 1)])) anom_locs.append(1.0) anom_in_trainval.append(False) print(('=' * 80)) print(f'Time series in dataset have average length {int(np.mean([len(ts) for (ts, md) in self]))}.') print(f'Time series in dataset have {(len(anom_bds) / len(self)):.1f} anomalies on average.') print(f'{((sum(anom_in_trainval) / len(anom_in_trainval)) * 100):.1f}% of anomalies are in the train/val split of their respective time series.') print(f'Anomalies in dataset have average length {pd.Timedelta(np.mean([(tf - t0) for (t0, tf) in anom_bds]))}.') print(f'Average anomaly occurs {(np.mean(anom_locs) * 100):.1f}% (+/- {(np.std(anom_locs) * 100):.1f}%) of the way through its respective time series.') print(('=' * 80))
class IOpsCompetition(TSADBaseDataset): '\n Wrapper to load the dataset used for the final round of the IOPs competition\n (http://iops.ai/competition_detail/?competition_id=5).\n\n The dataset contains 29 time series of KPIs gathered from large tech\n companies (Alibaba, Sogou, Tencent, Baidu, and eBay). These time series are\n sampled at either 1min or 5min intervals, and are split into train and test\n sections.\n\n Note that the original competition prohibited algorithms which directly\n hard-coded the KPI ID to set model parameters. So training a new model for\n each time series was against competition rules. They did, however, allow\n algorithms which analyzed each time series (in an automated way), and used\n the results of that automated analysis to perform algorithm/model selection.\n ' def __init__(self, rootdir=None): '\n :param rootdir: The root directory at which the dataset can be found.\n ' super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'iops_competition') train = os.path.join(rootdir, 'phase2_train.csv') test = os.path.join(rootdir, 'phase2_test.csv') if ((not os.path.isfile(train)) or (not os.path.isfile(test))): z = os.path.join(rootdir, 'phase2.zip') if os.path.isfile(z): with zipfile.ZipFile(z, 'r') as zip_ref: zip_ref.extractall(rootdir) else: raise FileNotFoundError(f'Directory {rootdir} contains neither the extracted files phase2_train.csv and phase2_ground_truth.hdf, nor the compressed archive phase2.zip') (train_df, test_df) = (pd.read_csv(train), pd.read_csv(test)) for df in [train_df, test_df]: df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s') new_columns = df.columns.values new_columns[2] = 'anomaly' df.columns = new_columns df['anomaly'] = df['anomaly'].astype(bool) self.kpi_ids = sorted(train_df['KPI ID'].unique()) for kpi in self.kpi_ids: train = train_df[(train_df['KPI ID'] == kpi)].drop(columns='KPI ID') train.insert(3, 'trainval', np.ones(len(train), dtype=bool)) test = test_df[(test_df['KPI ID'] == kpi)].drop(columns='KPI ID') test.insert(3, 'trainval', np.zeros(len(test), dtype=bool)) full = pd.concat([train, test]).set_index('timestamp') md_cols = ['anomaly', 'trainval'] self.metadata.append(full[md_cols]) self.time_series.append(full[[c for c in full.columns if (c not in md_cols)]]) @property def max_lag_sec(self): '\n The IOps competition allows anomalies to be detected up to 35min after\n they start. We are currently not using this, but we are leaving the\n override here as a placeholder, if we want to change it later.\n ' return None
class MSL(TSADBaseDataset): '\n Soil Moisture Active Passive (SMAP) satellite and Mars Science Laboratory (MSL) rover Datasets.\n SMAP and MSL are two realworld public datasets, which are two real-world datasets expert-labeled by NASA.\n\n - source: https://github.com/khundman/telemanom\n ' url = 'https://www.dropbox.com/s/uv9ojw353qwzqht/SMAP.tar.gz?dl=1' def __init__(self, subset=None, rootdir=None): super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'smap') download(_logger, rootdir, MSL.url, 'SMAP') preprocess(_logger, os.path.join(rootdir, 'SMAP'), dataset='MSL') (df, metadata) = combine_train_test_datasets(*load_data(os.path.join(rootdir, 'SMAP'), 'MSL')) self.time_series.append(df) self.metadata.append(metadata)
class SMAP(TSADBaseDataset): '\n Soil Moisture Active Passive (SMAP) satellite and Mars Science Laboratory (MSL) rover Datasets.\n SMAP and MSL are two realworld public datasets, which are two real-world datasets expert-labeled by NASA.\n\n - source: https://github.com/khundman/telemanom\n ' url = 'https://www.dropbox.com/s/uv9ojw353qwzqht/SMAP.tar.gz?dl=1' def __init__(self, subset=None, rootdir=None): super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'smap') download(_logger, rootdir, SMAP.url, 'SMAP') preprocess(_logger, os.path.join(rootdir, 'SMAP'), dataset='SMAP') (df, metadata) = combine_train_test_datasets(*load_data(os.path.join(rootdir, 'SMAP'), 'SMAP')) self.time_series.append(df) self.metadata.append(metadata)
def preprocess(logger, data_folder, dataset): if (os.path.exists(os.path.join(data_folder, f'{dataset}_test_label.pkl')) and os.path.exists(os.path.join(data_folder, f'{dataset}_train.pkl')) and os.path.exists(os.path.join(data_folder, f'{dataset}_test.pkl'))): return logger.info(f'Preprocessing {dataset}') with open(os.path.join(data_folder, 'labeled_anomalies.csv'), 'r') as f: csv_reader = csv.reader(f, delimiter=',') res = [row for row in csv_reader][1:] res = sorted(res, key=(lambda k: k[0])) labels = [] data_info = [row for row in res if ((row[1] == dataset) and (row[0] != 'P-2'))] for row in data_info: anomalies = ast.literal_eval(row[2]) length = int(row[(- 1)]) label = np.zeros([length], dtype=bool) for anomaly in anomalies: label[anomaly[0]:(anomaly[1] + 1)] = True labels.extend(label) labels = np.asarray(labels) with open(os.path.join(data_folder, f'{dataset}_test_label.pkl'), 'wb') as f: pickle.dump(labels, f) for category in ['train', 'test']: data = [] for row in data_info: data.extend(np.load(os.path.join(data_folder, category, (row[0] + '.npy')))) data = np.asarray(data) with open(os.path.join(data_folder, f'{dataset}_{category}.pkl'), 'wb') as f: pickle.dump(data, f)
def load_data(directory, dataset): with open(os.path.join(directory, f'{dataset}_test.pkl'), 'rb') as f: test_data = pickle.load(f) with open(os.path.join(directory, f'{dataset}_test_label.pkl'), 'rb') as f: test_labels = pickle.load(f) with open(os.path.join(directory, f'{dataset}_train.pkl'), 'rb') as f: train_data = pickle.load(f) (train_df, test_df) = (pd.DataFrame(train_data), pd.DataFrame(test_data)) return (train_df, test_df, test_labels.astype(int))
class SMD(TSADBaseDataset): '\n The Server Machine Dataset (SMD) is a new 5-week-long dataset from\n a large Internet company collected and made publicly available.\n It contains data from 28 server machines and each machine is monitored by 33 metrics.\n SMD is divided into training set and testing set of equal size.\n\n - source: https://github.com/NetManAIOps/OmniAnomaly\n ' filename = 'ServerMachineDataset' url = 'https://www.dropbox.com/s/x53ph5cru62kv0f/ServerMachineDataset.tar.gz?dl=1' valid_subsets = (([f'machine-1-{i}' for i in range(1, 9)] + [f'machine-2-{i}' for i in range(1, 10)]) + [f'machine-3-{i}' for i in range(1, 12)]) def __init__(self, subset='all', rootdir=None): super().__init__() if (subset == 'all'): subset = self.valid_subsets elif (type(subset) == str): assert (subset in self.valid_subsets), f'subset should be in {self.valid_subsets}, but got {subset}' subset = [subset] if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'smd') download(_logger, rootdir, SMD.url, SMD.filename) for s in subset: (df, metadata) = combine_train_test_datasets(*SMD._load_data(directory=os.path.join(rootdir, SMD.filename), sequence_name=s)) self.time_series.append(df) self.metadata.append(metadata) @staticmethod def _load_data(directory, sequence_name): with open(os.path.join(directory, 'test', f'{sequence_name}.txt'), 'r') as f: test_data = np.genfromtxt(f, dtype=np.float32, delimiter=',') with open(os.path.join(directory, 'test_label', f'{sequence_name}.txt'), 'r') as f: test_labels = np.genfromtxt(f, dtype=np.float32, delimiter=',') with open(os.path.join(directory, 'train', f'{sequence_name}.txt'), 'r') as f: train_data = np.genfromtxt(f, dtype=np.float32, delimiter=',') return (pd.DataFrame(train_data), pd.DataFrame(test_data), test_labels.astype(int))
def combine_train_test_datasets(train_df, test_df, test_labels): train_df.columns = [str(c) for c in train_df.columns] test_df.columns = [str(c) for c in test_df.columns] df = pd.concat([train_df, test_df]).reset_index() if ('index' in df): df.drop(columns=['index'], inplace=True) df.index = pd.to_datetime((df.index * 60), unit='s') df.index.rename('timestamp', inplace=True) metadata = pd.DataFrame({'trainval': (df.index < df.index[train_df.shape[0]]), 'anomaly': np.concatenate([np.zeros(train_df.shape[0], dtype=int), test_labels])}, index=df.index) return (df, metadata)
def download(logger, datapath, url, filename): os.makedirs(datapath, exist_ok=True) compressed_file = os.path.join(datapath, f'{filename}.tar.gz') if (not os.path.exists(compressed_file)): logger.info(('Downloading ' + url)) with requests.get(url, stream=True) as r: with open(compressed_file, 'wb') as f: for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))): if chunk: f.write(chunk) f.flush() if (not os.path.exists(os.path.join(datapath, '_SUCCESS'))): logger.info(f'Uncompressing {compressed_file}') tar = tarfile.open(compressed_file, 'r:gz') tar.extractall(path=datapath) tar.close() Path(os.path.join(datapath, '_SUCCESS')).touch()
class Synthetic(TSADBaseDataset): '\n Wrapper to load a sythetically generated dataset.\n The dataset was generated using three base time series, each of which\n was separately injected with shocks, spikes, dips and level shifts, making\n a total of 15 time series (including the base time series without anomalies).\n Subsets can are defined by the base time series used ("horizontal",\n "seasonal", "upward_downward"), or the type of injected anomaly ("shock",\n "spike", "dip", "level"). The "anomaly" subset refers to all times series with\n injected anomalies (12) while "base" refers to all time series without them (3).\n ' base_ts_subsets = ['horizontal', 'seasonal', 'upward_downward'] anomaly_subsets = ['shock', 'spike', 'dip', 'level', 'trend'] valid_subsets = ((['anomaly', 'all', 'base'] + base_ts_subsets) + anomaly_subsets) def __init__(self, subset='anomaly', rootdir=None): super().__init__() assert (subset in self.valid_subsets), f'subset should be in {self.valid_subsets}, but got {subset}' self.subset = subset if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'synthetic_anomaly') csvs = sorted(glob.glob(f'{rootdir}/*.csv')) if (subset == 'base'): csvs = [csv for csv in csvs if ('anom' not in os.path.basename(csv))] elif (subset != 'all'): csvs = [csv for csv in csvs if ('anom' in os.path.basename(csv))] if (subset in (self.base_ts_subsets + self.anomaly_subsets)): csvs = [csv for csv in csvs if (subset in os.path.basename(csv))] for csv in csvs: df = pd.read_csv(csv) df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s') df = df.set_index('timestamp') ts = df[df.columns[0:1]] metadata = pd.DataFrame({'anomaly': (df['anomaly'].astype(bool) if (df.shape[1] > 1) else ([False] * len(df))), 'trainval': [(j < (len(df) * 0.5)) for j in range(len(df))]}, index=df.index) self.time_series.append(ts) self.metadata.append(metadata)
class UCR(TSADBaseDataset): '\n Data loader for the Hexagon ML/UC Riverside Time Series Anomaly Archive.\n\n See `here <https://compete.hexagon-ml.com/practice/competition/39/>`_ for details.\n\n Hoang Anh Dau, Eamonn Keogh, Kaveh Kamgar, Chin-Chia Michael Yeh, Yan Zhu,\n Shaghayegh Gharghabi, Chotirat Ann Ratanamahatana, Yanping Chen, Bing Hu,\n Nurjahan Begum, Anthony Bagnall , Abdullah Mueen, Gustavo Batista, & Hexagon-ML (2019).\n The UCR Time Series Classification Archive. URL https://www.cs.ucr.edu/~eamonn/time_series_data_2018/\n ' def __init__(self, rootdir=None): super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'ucr') self.download(rootdir) self.time_series = sorted(glob.glob(os.path.join(rootdir, 'UCR_TimeSeriesAnomalyDatasets2021', 'FilesAreInHere', 'UCR_Anomaly_FullData', '*.txt'))) def __getitem__(self, i): fname = self.time_series[i] (split, anom_start, anom_end) = [int(x) for x in fname[:(- len('.txt'))].split('_')[(- 3):]] name = fname.split('_')[(- 4)] arr = np.loadtxt(fname) trainval = [(i < split) for i in range(len(arr))] anomaly = [(anom_start <= i <= anom_end) for i in range(len(arr))] index = pd.date_range(start=0, periods=len(arr), freq='1min') df = pd.DataFrame({name: arr}, index=index) return (df, pd.DataFrame({'anomaly': [((anom_start - 100) <= i <= (anom_end + 100)) for i in range(len(arr))], 'trainval': [(i < split) for i in range(len(arr))]}, index=index)) def download(self, rootdir): filename = 'UCR_TimeSeriesAnomalyDatasets2021.zip' url = f'https://www.cs.ucr.edu/~eamonn/time_series_data_2018/{filename}' os.makedirs(rootdir, exist_ok=True) compressed_file = os.path.join(rootdir, filename) if (not os.path.exists(compressed_file)): logger.info(('Downloading ' + url)) with requests.get(url, stream=True) as r: with open(compressed_file, 'wb') as f: for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))): if chunk: f.write(chunk) f.flush() if (not os.path.isfile(os.path.join(rootdir, '_SUCCESS'))): logger.info(f'Uncompressing {compressed_file}') with zipfile.ZipFile(compressed_file, 'r') as zip_ref: zip_ref.extractall(rootdir) Path(os.path.join(rootdir, '_SUCCESS')).touch()
class BaseDataset(): __doc__ = (_intro_docstr + _main_fns_docstr) time_series: list '\n A list of all individual time series contained in the dataset. Iterating over\n the dataset will iterate over this list. Note that for some large datasets, \n ``time_series`` may be a list of filenames, which are read lazily either during\n iteration, or whenever ``__getitem__`` is invoked.\n ' metadata: list '\n A list containing the metadata for all individual time series in the dataset.\n ' def __init__(self): self.subset = None self.time_series = [] self.metadata = [] def __getitem__(self, i) -> Tuple[(pd.DataFrame, pd.DataFrame)]: return (self.time_series[i], self.metadata[i]) def __len__(self): return len(self.time_series) def __iter__(self): return (self[i] for i in range(len(self))) def describe(self): for ts_df in self.time_series: print(f'length of the data: {len(ts_df)}') print(f'timestamp index name: {ts_df.index.name}') print(f'number of data columns: {len(ts_df.columns)}') print('data columns names (the first 20): ') print(ts_df.columns[:20]) print(f'number of null entries: {ts_df.isnull().sum()}')
def get_dataset(dataset_name: str, rootdir: str=None) -> BaseDataset: '\n :param dataset_name: the name of the dataset to load, formatted as\n ``<name>`` or ``<name>_<subset>``, e.g. ``EnergyPower`` or ``M4_Hourly``\n :param rootdir: the directory where the desired dataset is stored. Not\n required if the package :py:mod:`ts_datasets` is installed in editable\n mode, i.e. with flag ``-e``.\n :return: the data loader for the desired dataset (and subset) desired\n ' name_subset = dataset_name.split('_', maxsplit=1) valid_datasets = set(__all__).difference({'get_dataset'}) if (name_subset[0] in valid_datasets): cls = globals()[name_subset[0]] else: raise KeyError(f'Dataset should be formatted as <name> or <name>_<subset>, where <name> is one of {valid_datasets}. Got {dataset_name} instead.') if ((not hasattr(cls, 'valid_subsets')) and (len(name_subset) == 2)): raise ValueError(f'Dataset {name_subset[0]} does not have any subsets, but attempted to load subset {name_subset[1]} by specifying dataset name {dataset_name}.') kwargs = (dict() if (len(name_subset) == 1) else dict(subset=name_subset[1])) return cls(rootdir=rootdir, **kwargs)
class EnergyPower(BaseDataset): '\n Wrapper to load the open source energy grid power usage dataset.\n\n - source: https://www.kaggle.com/robikscube/hourly-energy-consumption\n - contains one 10-variable time series\n ' def __init__(self, rootdir=None): '\n :param rootdir: The root directory at which the dataset can be found.\n ' super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'multivariate', 'energy_power') assert ('energy_power' in rootdir.split('/')[(- 1)]), 'energy_power should be found as the last level of the directory for this dataset' dsetdirs = [rootdir] extension = 'csv.gz' fnames = sum([sorted(glob.glob(f'{d}/*.{extension}')) for d in dsetdirs], []) assert (len(fnames) == 1), f'rootdir {rootdir} does not contain dataset file.' start_timestamp = '2014-01-01 00:00:00' for (i, fn) in enumerate(sorted(fnames)): df = pd.read_csv(fn, index_col='Datetime', parse_dates=True) df = df[(df.index >= start_timestamp)] df.drop(['NI', 'PJM_Load'], axis=1, inplace=True) df.index.rename('timestamp', inplace=True) assert isinstance(df.index, pd.DatetimeIndex) df.sort_index(inplace=True) self.time_series.append(df) self.metadata.append({'trainval': pd.Series((df.index <= '2018-01-01 00:00:00'), index=df.index), 'start_timestamp': start_timestamp})
class SeattleTrail(BaseDataset): '\n Wrapper to load the open source Seattle Trail pedestrian/bike traffic\n dataset.\n\n - source: https://www.kaggle.com/city-of-seattle/seattle-burke-gilman-trail\n - contains one 5-variable time series\n ' def __init__(self, rootdir=None): '\n :param rootdir: The root directory at which the dataset can be found.\n ' super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'multivariate', 'seattle_trail') assert ('seattle_trail' in rootdir.split('/')[(- 1)]), 'seattle_trail should be found as the last level of the directory for this dataset' dsetdirs = [rootdir] extension = 'csv' fnames = sum([sorted(glob.glob(f'{d}/*.{extension}')) for d in dsetdirs], []) assert (len(fnames) == 1), f'rootdir {rootdir} does not contain dataset file.' for (i, fn) in enumerate(sorted(fnames)): df = pd.read_csv(fn) df['timestamp'] = pd.to_datetime(df['Date']) df.set_index('timestamp', inplace=True) df.drop('Date', axis=1, inplace=True) assert isinstance(df.index, pd.DatetimeIndex) df.sort_index(inplace=True) self.time_series.append(df) self.metadata.append({'trainval': pd.Series((df.index <= '2019-01-01 00:00:00'), index=df.index), 'quantile_clip': 300})
class SolarPlant(BaseDataset): '\n Wrapper to load the open source solar plant power dataset.\n\n - source: https://www.nrel.gov/grid/solar-power-data.html\n - contains one 405-variable time series\n\n .. note::\n\n The loader currently only includes the first 100 (of 405) variables.\n ' def __init__(self, rootdir=None, num_columns=100): '\n :param rootdir: The root directory at which the dataset can be found.\n :param num_columns: indicates how many univariate columns should be returned\n ' super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'multivariate', 'solar_plant') assert ('solar_plant' in rootdir.split('/')[(- 1)]), 'solar_plant should be found as the last level of the directory for this dataset' fnames = glob.glob(f'{rootdir}/*.csv') if ((len(fnames) == 0) and os.path.isfile(f'{rootdir}/merged.zip')): with zipfile.ZipFile(f'{rootdir}/merged.zip', 'r') as zip_ref: zip_ref.extractall(rootdir) fnames = glob.glob(f'{rootdir}/*.csv') assert (len(fnames) == 1), f'rootdir {rootdir} does not contain dataset file.' for (i, fn) in enumerate(sorted(fnames)): df = pd.read_csv(fn) df['timestamp'] = pd.to_datetime(df['Datetime']) df.set_index('timestamp', inplace=True) df.drop(['LocalTime', 'Datetime'], axis=1, inplace=True) num_columns = min(num_columns, len(df.columns)) cols = [f'Power_{i}' for i in range(num_columns)] df = df[cols] assert isinstance(df.index, pd.DatetimeIndex) df.sort_index(inplace=True) self.time_series.append(df) self.metadata.append({'trainval': pd.Series((df.index <= '2006-10-01 00:00:00'), index=df.index), 'granularity': '30min', 'aggregation': 'Sum'})
class affiliation_accumulator(): def __init__(self, cnt=0, precision=0, recall=0, individual_precision_probabilities=0, individual_recall_probabilities=0, individual_precision_distances=0, individual_recall_distances=0): self.cnt = cnt self.precision = precision self.recall = recall self.individual_precision_probabilities = individual_precision_probabilities self.individual_recall_probabilities = individual_recall_probabilities self.individual_precision_distances = individual_precision_distances self.individual_recall_distances = individual_recall_distances def __add__(self, acc): kwargs = {'cnt': (self.cnt + acc.cnt), 'precision': (self.precision + acc.precision), 'recall': (self.recall + acc.recall)} return affiliation_accumulator(**kwargs) def get_all_metrics(self): return {'precision': (self.precision / self.cnt), 'recall': (self.recall / self.cnt)}
def tsad_affiliation(ground_truth: TimeSeries, predict: TimeSeries): '\n Computes the components required to compute multiple different types of\n performance metrics for time series anomaly detection.\n ' if isinstance(ground_truth, TimeSeries): assert ((ground_truth.dim == 1) and (predict.dim == 1)), 'Can only evaluate anomaly scores when ground truth and prediction are single-variable time series.' ground_truth = ground_truth.univariates[ground_truth.names[0]] ys = list(map(int, ground_truth.np_values.astype(bool))) predict = predict.univariates[predict.names[0]] ys_pred = list(map(int, predict.np_values.astype(bool))) elif isinstance(ground_truth, np.ndarray): ys = list(map(int, ground_truth.astype(bool))) ys_pred = list(map(int, predict.astype(bool))) try: events_pred = convert_vector_to_events(ys_pred) events_gt = convert_vector_to_events(ys) if (len(events_gt) == 0): return affiliation_accumulator(cnt=0) if (len(events_pred) == 0): return affiliation_accumulator(cnt=1) result = pr_from_events(events_pred, events_gt, Trange=(0, max(([max(x) for x in events_pred] + [max(x) for x in events_gt])))) result['cnt'] = 1 except Exception as e: print(e) embed() return affiliation_accumulator(**result)
class reasonable_accumulator(): def __init__(self, cnt=0, correct_num=0): self.cnt = cnt self.correct_num = correct_num def __add__(self, acc): kwargs = {'cnt': (self.cnt + acc.cnt), 'correct_num': (self.correct_num + acc.correct_num)} return reasonable_accumulator(**kwargs) def get_all_metrics(self): return {'accuracy': (self.correct_num / self.cnt)}
def tsad_reasonable(ground_truth, predict): '\n Computes the components required to compute multiple different types of\n performance metrics for time series anomaly detection.\n ' if (isinstance(ground_truth, TimeSeries) and isinstance(predict, TimeSeries)): assert ((ground_truth.dim == 1) and (predict.dim == 1)), 'Can only evaluate anomaly scores when ground truth and prediction are single-variable time series.' ground_truth = ground_truth.univariates[ground_truth.names[0]] ys = list(map(int, ground_truth.np_values.astype(bool))) predict = predict.univariates[predict.names[0]] ys_pred = list(map(int, predict.np_values.astype(bool))) elif (isinstance(ground_truth, np.ndarray) and isinstance(predict, np.ndarray)): ys = list(map(int, ground_truth.astype(bool))) ys_pred = list(map(int, predict.astype(bool))) (begin, end) = ((- 1), (- 1)) P = (- 100) for (idx, val) in enumerate(ys): if (idx and (ys[(idx - 1)] == 0) and (ys[idx] == 1)): begin = idx if (idx and (ys[(idx - 1)] == 1) and (ys[idx] == 0)): end = idx break for (idx, val) in enumerate(ys_pred): if (val > 0): P = idx break L = (end - begin) if (min((begin - L), (begin - 100)) <= P <= max((end + L), (end + 100))): return reasonable_accumulator(1, 1) return reasonable_accumulator(1, 0)
def evaluate_predictions(model_names, dataset, all_model_preds, metric: TSADMetric, pointwise_metric: TSADMetric, point_adj_metric: TSADMetric, tune_on_test=False, unsupervised=False, debug=False): (scores_rpa, scores_pw, scores_pa) = ([], [], []) use_ucr_eval = (isinstance(dataset, UCR) and (unsupervised or (not tune_on_test))) for (i, (true, md)) in enumerate(tqdm(dataset)): idx = ((~ md.trainval) if tune_on_test else md.trainval) true_train = df_to_merlion(true[idx], md[idx], get_ground_truth=True) true_test = df_to_merlion(true[(~ md.trainval)], md[(~ md.trainval)], get_ground_truth=True) for (acc_id, (simple_threshold, opt_metric, scores)) in enumerate([((use_ucr_eval and (not tune_on_test)), metric, scores_rpa), (True, pointwise_metric, scores_pw), (True, point_adj_metric, scores_pa)]): if ((acc_id > 0) and use_ucr_eval): scores_pw = scores_rpa scores_pa = scores_rpa continue if (i >= min((len(p) for p in all_model_preds))): break pred = [model_preds[i] for model_preds in all_model_preds] pred_train = [(p[(~ p['trainval'])] if tune_on_test else p[p['trainval']]) for p in pred] pred_train = [TimeSeries.from_pd(p['y']) for p in pred_train] pred_test = [p[(~ p['trainval'])] for p in pred] pred_test = [TimeSeries.from_pd(p['y']) for p in pred_test] models = [] for (name, train, og_pred) in zip(model_names, pred_train, pred): (m, prtc) = get_model(model_name=name, dataset=dataset, metric=opt_metric, tune_on_test=tune_on_test, unsupervised=unsupervised) m.config.enable_threshold = (len(model_names) == 1) if simple_threshold: m.threshold = m.threshold.to_simple_threshold() if (tune_on_test and (not unsupervised)): m.calibrator.train(TimeSeries.from_pd(og_pred['y'][og_pred['trainval']])) m.train_post_rule(anomaly_scores=train, anomaly_labels=true_train, post_rule_train_config=prtc) models.append(m) (early, delay) = (dataset.max_lead_sec, dataset.max_lag_sec) if (early is None): leads = [getattr(m.threshold, 'suppress_secs', delay) for m in models] leads = [dt for dt in leads if (dt is not None)] early = (None if (len(leads) == 0) else max(leads)) if (len(models) == 1): model = models[0] pred_test_raw = pred_test[0] else: threshold = dataset_to_threshold(dataset, tune_on_test) ensemble_threshold_train_config = dict(metric=(opt_metric if tune_on_test else None), max_early_sec=early, max_delay_sec=delay, unsup_quantile=None) model = DetectorEnsemble(models=models) use_m = [(len(p) > 1) for p in zip(models, pred_train)] pred_train = [m.post_rule(p) for (m, p, use) in zip(models, pred_train, use_m) if use] pred_test = [m.post_rule(p) for (m, p, use) in zip(models, pred_test, use_m) if use] pred_train = model.train_combiner(pred_train, true_train) if simple_threshold: model.threshold = model.threshold.to_simple_threshold() model.threshold.alm_threshold = threshold model.train_post_rule(pred_train, true_train, ensemble_threshold_train_config) pred_test_raw = model.combiner(pred_test, true_test) if ((acc_id == 0) and use_ucr_eval and (not unsupervised)): df = pred_test_raw.to_pd() df[(np.abs(df) < df.max())] = 0 pred_test = TimeSeries.from_pd(df) else: pred_test = model.post_rule(pred_test_raw) score = accumulate_tsad_score(true_test, pred_test, max_early_sec=early, max_delay_sec=delay) if ((acc_id == 0) and use_ucr_eval): n_anom = (score.num_tp_anom + score.num_fn_anom) if (n_anom == 0): (score.num_tp_anom, score.num_fn_anom, score.num_fp) = (0, 0, 0) elif (score.num_tp_anom > 0): (score.num_tp_anom, score.num_fn_anom, score.num_fp) = (1, 0, 0) else: (score.num_tp_anom, score.num_fn_anom, score.num_fp) = (0, 1, 1) scores.append(score) score_rpa = sum(scores_rpa, ScoreAcc()) score_pw = sum(scores_pw, ScoreAcc()) score_pa = sum(scores_pa, ScoreAcc()) if (tune_on_test and (not unsupervised)): for s in sorted(scores_rpa, key=(lambda x: x.num_fp), reverse=True): stype = ScoreType.RevisedPointAdjusted sprime = copy.deepcopy(score_rpa) sprime.num_tp_anom -= s.num_tp_anom sprime.num_fn_anom += s.num_tp_anom sprime.num_fp -= s.num_fp sprime.tp_score -= s.tp_score sprime.fp_score -= s.fp_score if (score_rpa.f1(stype) < sprime.f1(stype)): for (duration, delay) in zip(s.tp_anom_durations, s.tp_detection_delays): sprime.tp_anom_durations.remove(duration) sprime.tp_detection_delays.remove(delay) score_rpa = sprime for s in sorted(scores_pw, key=(lambda x: x.num_fp), reverse=True): stype = ScoreType.Pointwise sprime = copy.deepcopy(score_pw) sprime.num_tp_pointwise -= s.num_tp_pointwise sprime.num_fn_pointwise += s.num_tp_pointwise sprime.num_fp -= s.num_fp if (score_pw.f1(stype) < sprime.f1(stype)): score_pw = sprime for s in sorted(scores_pa, key=(lambda x: x.num_fp), reverse=True): stype = ScoreType.PointAdjusted sprime = copy.deepcopy(score_pa) sprime.num_tp_point_adj -= s.num_tp_point_adj sprime.num_fn_point_adj += s.num_tp_point_adj sprime.num_fp -= s.num_fp if (score_pa.f1(stype) < sprime.f1(stype)): score_pa = sprime mttd = score_rpa.mean_time_to_detect() if (mttd < pd.to_timedelta(0)): mttd = f'-{(- mttd)}' print() print('Revised point-adjusted metrics') print(f'F1 score: {score_rpa.f1(ScoreType.RevisedPointAdjusted):.4f}') print(f'Precision: {score_rpa.precision(ScoreType.RevisedPointAdjusted):.4f}') print(f'Recall: {score_rpa.recall(ScoreType.RevisedPointAdjusted):.4f}') print() print(f'Mean Time To Detect Anomalies: {mttd}') print(f'Mean Detected Anomaly Duration: {score_rpa.mean_detected_anomaly_duration()}') print(f'Mean Anomaly Duration: {score_rpa.mean_anomaly_duration()}') print() if debug: print('Pointwise metrics') print(f'F1 score: {score_pw.f1(ScoreType.Pointwise):.4f}') print(f'Precision: {score_pw.precision(ScoreType.Pointwise):.4f}') print(f'Recall: {score_pw.recall(ScoreType.Pointwise):.4f}') print() print('Point-adjusted metrics') print(f'F1 score: {score_pa.f1(ScoreType.PointAdjusted):.4f}') print(f'Precision: {score_pa.precision(ScoreType.PointAdjusted):.4f}') print(f'Recall: {score_pa.recall(ScoreType.PointAdjusted):.4f}') print() print('NAB Scores') print(f'NAB Score (balanced): {score_rpa.nab_score():.4f}') print(f'NAB Score (high precision): {score_rpa.nab_score(fp_weight=0.22):.4f}') print(f'NAB Score (high recall): {score_rpa.nab_score(fn_weight=2.0):.4f}') print() return (score_rpa, score_pw, score_pa)
def df_to_merlion(df: pd.DataFrame, md: pd.DataFrame, get_ground_truth=False, transform=None) -> TimeSeries: 'Converts a pandas dataframe time series to the Merlion format.' if get_ground_truth: if (False and ('changepoint' in md.keys())): series = (md['anomaly'] | md['changepoint']) else: series = md['anomaly'] else: series = df time_series = TimeSeries.from_pd(series) if (transform is not None): time_series = transform(time_series) return time_series
def dataset_to_threshold(dataset: TSADBaseDataset, tune_on_test=False): if isinstance(dataset, IOpsCompetition): return 2.25 elif isinstance(dataset, NAB): return 3.5 elif isinstance(dataset, Synthetic): return 2 elif isinstance(dataset, MSL): return 3.0 elif isinstance(dataset, SMAP): return 3.5 elif isinstance(dataset, SMD): return (3 if (not tune_on_test) else 2.5) elif hasattr(dataset, 'default_threshold'): return dataset.default_threshold return 3
def get_model(model_name: str, dataset: TSADBaseDataset, metric: TSADMetric, tune_on_test=False, unsupervised=False) -> Tuple[(DetectorBase, dict)]: with open(CONFIG_JSON, 'r') as f: config_dict = json.load(f) if (model_name not in config_dict): raise NotImplementedError(f'Benchmarking not implemented for model {model_name}. Valid model names are {list(config_dict.keys())}') while ('alias' in config_dict[model_name]): model_name = config_dict[model_name]['alias'] model_configs = config_dict[model_name]['config'] model_type = config_dict[model_name].get('model_type', model_name) model_kwargs = model_configs['default'] model_kwargs.update(model_configs.get(type(dataset).__name__, {})) model = ModelFactory.create(name=model_type, **model_kwargs) post_rule_train_configs = config_dict[model_name].get('post_rule_train_config', {}) d = post_rule_train_configs.get('default', {}) d.update(post_rule_train_configs.get(type(dataset).__name__, {})) if (len(d) == 0): d = copy.copy(model._default_post_rule_train_config) d['metric'] = (None if unsupervised else metric) d.update({'max_early_sec': dataset.max_lead_sec, 'max_delay_sec': dataset.max_lag_sec}) t = dataset_to_threshold(dataset, tune_on_test) model.threshold.alm_threshold = t d['unsup_quantile'] = None return (model, d)
def resolve_model_name(model_name: str): with open(CONFIG_JSON, 'r') as f: config_dict = json.load(f) if (model_name not in config_dict): raise NotImplementedError(f'Benchmarking not implemented for model {model_name}. Valid model names are {list(config_dict.keys())}') while ('alias' in config_dict[model_name]): assert (model_name != config_dict[model_name]['alias']), 'Alias name cannot be the same as the model name' model_name = config_dict[model_name]['alias'] return model_name
def read_model_predictions(dataset: TSADBaseDataset, model_dir: str): "\n Returns a list of lists all_preds, where all_preds[i] is the model's raw\n anomaly scores for time series i in the dataset.\n " csv = os.path.join('results', 'anomaly', model_dir, f'pred_{dataset_to_name(dataset)}.csv.gz') preds = pd.read_csv(csv, dtype={'trainval': bool, 'idx': int}) preds['timestamp'] = to_pd_datetime(preds['timestamp']) return [preds[(preds['idx'] == i)].set_index('timestamp') for i in sorted(preds['idx'].unique())]
def dataset_to_name(dataset: TSADBaseDataset): if (dataset.subset is not None): return f'{type(dataset).__name__}_{dataset.subset}' return type(dataset).__name__
class ScheduledOptim(object): 'A simple wrapper class for learning rate scheduling' def __init__(self, optimizer, n_warmup_steps): self.optimizer = optimizer self.d_model = 128 self.n_warmup_steps = n_warmup_steps self.n_current_steps = 0 self.delta = 1 def state_dict(self): self.optimizer.state_dict() def step(self): 'Step by the inner optimizer' self.optimizer.step() def zero_grad(self): 'Zero out the gradients by the inner optimizer' self.optimizer.zero_grad() def increase_delta(self): self.delta *= 2 def update_learning_rate(self): 'Learning rate scheduling per step' self.n_current_steps += self.delta new_lr = (np.power(self.d_model, (- 0.5)) * np.min([np.power(self.n_current_steps, (- 0.5)), (np.power(self.n_warmup_steps, (- 1.5)) * self.n_current_steps)])) for param_group in self.optimizer.param_groups: param_group['lr'] = new_lr return new_lr
class Timer(object): def __init__(self): self.__start = time.time() def start(self): self.__start = time.time() def get_time(self, restart=True, format=False): end = time.time() span = (end - self.__start) if restart: self.__start = end if format: return self.format(span) else: return span def format(self, seconds): return datetime.timedelta(seconds=int(seconds)) def print(self, name): print(name, self.get_time())