code stringlengths 17 6.64M |
|---|
class augmentations(object):
def __init__(self):
self.scale_ratio = 0.8
self.jitter_ratio = 0.2
|
class Config(object):
def __init__(self):
self.dataset = 'IOpsCompetition'
self.input_channels = 1
self.kernel_size = 4
self.stride = 1
self.final_out_channels = 32
self.num_classes = 2
self.dropout = 0.45
self.features_len = 4
self.window_size = 16
self.time_step = 2
self.num_epoch = 40
self.freeze_length_epoch = 20
self.change_center_epoch = 10
self.nu = 0.01
self.center_eps = 0.15
self.beta1 = 0.9
self.beta2 = 0.99
self.lr = 0.0001
self.drop_last = False
self.batch_size = 256
self.threshold_determine = 'floating'
self.objective = 'soft-boundary'
self.Context_Cont = Context_Cont_configs()
self.TC = TC()
self.augmentation = augmentations()
|
class augmentations(object):
def __init__(self):
self.jitter_scale_ratio = 1.5
self.jitter_ratio = 0.4
self.max_seg = 4
|
class Context_Cont_configs(object):
def __init__(self):
self.temperature = 0.2
self.use_cosine_similarity = True
|
class TC(object):
def __init__(self):
self.hidden_dim = 64
self.timesteps = 2
|
class Config(object):
def __init__(self):
self.dataset = 'UCR'
self.input_channels = 1
self.kernel_size = 8
self.stride = 1
self.final_out_channels = 64
self.num_classes = 2
self.dropout = 0.45
self.features_len = 10
self.window_size = 64
self.time_step = 4
self.num_epoch = 40
self.freeze_length_epoch = 10
self.nu = 0.01
self.center_eps = 0.1
self.beta1 = 0.9
self.beta2 = 0.99
self.lr = 0.0003
self.drop_last = True
self.batch_size = 128
self.objective = 'one-class'
self.threshold_determine = 'one-anomaly'
self.detect_nu = 0.0005
self.Context_Cont = Context_Cont_configs()
self.TC = TC()
self.augmentation = augmentations()
|
class augmentations(object):
def __init__(self):
self.jitter_scale_ratio = 0.8
self.jitter_ratio = 0.2
self.max_seg = 8
|
class Context_Cont_configs(object):
def __init__(self):
self.temperature = 0.2
self.use_cosine_similarity = True
|
class TC(object):
def __init__(self):
self.hidden_dim = 100
self.timesteps = 2
|
class Load_Dataset(Dataset):
def __init__(self, dataset, config):
super(Load_Dataset, self).__init__()
X_train = dataset['samples']
y_train = dataset['labels']
if (len(X_train.shape) < 3):
X_train = X_train.unsqueeze(2)
if isinstance(X_train, np.ndarray):
self.x_data = torch.from_numpy(X_train)
self.y_data = torch.from_numpy(y_train).long()
else:
self.x_data = X_train
self.y_data = y_train
self.len = X_train.shape[0]
if hasattr(config, 'augmentation'):
(self.aug1, self.aug2) = DataTransform(self.x_data, config)
def __getitem__(self, index):
if hasattr(self, 'aug1'):
return (self.x_data[index], self.y_data[index], self.aug1[index], self.aug2[index])
else:
return (self.x_data[index], self.y_data[index])
def __len__(self):
return self.len
|
def data_generator1(train_data, test_data, train_labels, test_labels, configs):
train_time_series_ts = train_data
test_time_series_ts = test_data
mvn = MeanVarNormalize()
mvn.train((train_time_series_ts + test_time_series_ts))
(bias, scale) = (mvn.bias, mvn.scale)
train_time_series = train_time_series_ts.to_pd().to_numpy()
train_time_series = ((train_time_series - bias) / scale)
test_time_series = test_time_series_ts.to_pd().to_numpy()
test_time_series = ((test_time_series - bias) / scale)
train_labels = train_labels.to_pd().to_numpy()
test_labels = test_labels.to_pd().to_numpy()
test_anomaly_window_num = int((len(np.where((test_labels[1:] != test_labels[:(- 1)]))[0]) / 2))
train_x = subsequences(train_time_series, configs.window_size, configs.time_step)
test_x = subsequences(test_time_series, configs.window_size, configs.time_step)
train_y = subsequences(train_labels, configs.window_size, configs.time_step)
test_y = subsequences(test_labels, configs.window_size, configs.time_step)
train_y_window = np.zeros(train_x.shape[0])
test_y_window = np.zeros(test_x.shape[0])
train_anomaly_window_num = 0
for (i, item) in enumerate(train_y[:]):
if (sum(item[:configs.time_step]) >= 1):
train_anomaly_window_num += 1
train_y_window[i] = 1
else:
train_y_window[i] = 0
for (i, item) in enumerate(test_y[:]):
if (sum(item[:configs.time_step]) >= 1):
test_y_window[i] = 1
else:
test_y_window[i] = 0
(train_x, val_x, train_y, val_y) = train_test_split(train_x, train_y_window, test_size=0.2, shuffle=False)
train_x = train_x.transpose((0, 2, 1))
val_x = val_x.transpose((0, 2, 1))
test_x = test_x.transpose((0, 2, 1))
train_dat_dict = dict()
train_dat_dict['samples'] = train_x
train_dat_dict['labels'] = train_y
val_dat_dict = dict()
val_dat_dict['samples'] = val_x
val_dat_dict['labels'] = val_y
test_dat_dict = dict()
test_dat_dict['samples'] = test_x
test_dat_dict['labels'] = test_y_window
train_dataset = Load_Dataset(train_dat_dict, configs)
val_dataset = Load_Dataset(val_dat_dict, configs)
test_dataset = Load_Dataset(test_dat_dict, configs)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size, shuffle=True, drop_last=configs.drop_last, num_workers=0)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0)
return (train_loader, val_loader, test_loader, test_anomaly_window_num)
|
def data_generator2(train_data, test_data, train_labels, test_labels, configs):
train_time_series_ts = train_data
test_time_series_ts = test_data
mvn = MeanVarNormalize()
mvn.train((train_time_series_ts + test_time_series_ts))
(bias, scale) = (mvn.bias, mvn.scale)
train_time_series = train_time_series_ts.to_pd().to_numpy()
train_time_series = ((train_time_series - bias) / scale)
test_time_series = test_time_series_ts.to_pd().to_numpy()
test_time_series = ((test_time_series - bias) / scale)
train_labels = train_labels.to_pd().to_numpy()
test_labels = test_labels.to_pd().to_numpy()
test_anomaly_window_num = int((len(np.where((test_labels[1:] != test_labels[:(- 1)]))[0]) / 2))
train_x = subsequences(train_time_series, configs.window_size, configs.time_step)
test_x = subsequences(test_time_series, configs.window_size, configs.time_step)
train_y = subsequences(train_labels, configs.window_size, configs.time_step)
test_y = subsequences(test_labels, configs.window_size, configs.time_step)
(train_x, val_x, train_y, val_y) = train_test_split(train_x, train_y, test_size=0.2, shuffle=False)
train_x = train_x.transpose((0, 2, 1))
val_x = val_x.transpose((0, 2, 1))
test_x = test_x.transpose((0, 2, 1))
train_dat_dict = dict()
train_dat_dict['samples'] = train_x
train_dat_dict['labels'] = train_y
val_dat_dict = dict()
val_dat_dict['samples'] = val_x
val_dat_dict['labels'] = val_y
test_dat_dict = dict()
test_dat_dict['samples'] = test_x
test_dat_dict['labels'] = test_y
train_dataset = Load_Dataset(train_dat_dict, configs)
val_dataset = Load_Dataset(val_dat_dict, configs)
test_dataset = Load_Dataset(test_dat_dict, configs)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size, shuffle=True, drop_last=configs.drop_last, num_workers=0)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0)
return (train_loader, val_loader, test_loader, test_anomaly_window_num)
|
class base_Model(nn.Module):
def __init__(self, configs, device):
super(base_Model, self).__init__()
self.input_channels = configs.input_channels
self.final_out_channels = configs.final_out_channels
self.features_len = configs.features_len
self.project_channels = configs.project_channels
self.hidden_size = configs.hidden_size
self.window_size = configs.window_size
self.device = device
self.num_layers = configs.num_layers
self.kernel_size = configs.kernel_size
self.stride = configs.stride
self.dropout = configs.dropout
self.conv_block1 = nn.Sequential(nn.Conv1d(self.input_channels, 32, kernel_size=self.kernel_size, stride=self.stride, bias=False, padding=(self.kernel_size // 2)), nn.BatchNorm1d(32), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1), nn.Dropout(self.dropout))
self.conv_block2 = nn.Sequential(nn.Conv1d(32, self.final_out_channels, kernel_size=8, stride=1, bias=False, padding=4), nn.BatchNorm1d(self.final_out_channels), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1))
self.conv_block3 = nn.Sequential(nn.Conv1d(64, self.final_out_channels, kernel_size=8, stride=1, bias=False, padding=4), nn.BatchNorm1d(self.final_out_channels), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1))
self.encoder = nn.LSTM(self.final_out_channels, self.hidden_size, batch_first=True, num_layers=self.num_layers, bias=False, dropout=self.dropout)
self.decoder = nn.LSTM(self.final_out_channels, self.hidden_size, batch_first=True, num_layers=self.num_layers, bias=False, dropout=self.dropout)
self.output_layer = nn.Linear(self.hidden_size, self.final_out_channels)
self.project = nn.Linear((self.final_out_channels * self.features_len), self.project_channels, bias=False)
self.projection_head = nn.Sequential(nn.Linear((self.final_out_channels * self.features_len), ((self.final_out_channels * self.features_len) // 2)), nn.BatchNorm1d(((self.final_out_channels * self.features_len) // 2)), nn.ReLU(inplace=True), nn.Linear(((self.final_out_channels * self.features_len) // 2), self.project_channels))
def init_hidden_state(self, batch_size):
h = torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(self.device)
c = torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(self.device)
return (h, c)
def forward(self, x_in):
if torch.isnan(x_in).any():
print('tensor contain nan')
x = self.conv_block1(x_in)
x = self.conv_block2(x)
hidden = x.permute(0, 2, 1)
hidden = hidden.reshape(hidden.size(0), (- 1))
project = self.projection_head(hidden)
return project
|
class base_Model(nn.Module):
def __init__(self, configs, device):
super(base_Model, self).__init__()
self.input_channels = configs.input_channels
self.final_out_channels = configs.final_out_channels
self.features_len = configs.features_len
self.project_channels = configs.project_channels
self.hidden_size = configs.hidden_size
self.window_size = configs.window_size
self.device = device
self.num_layers = configs.num_layers
self.kernel_size = configs.kernel_size
self.stride = configs.stride
self.dropout = configs.dropout
self.conv_block1 = nn.Sequential(nn.Conv1d(self.input_channels, 32, kernel_size=self.kernel_size, stride=self.stride, bias=False, padding=(self.kernel_size // 2)), nn.BatchNorm1d(32), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1), nn.Dropout(self.dropout))
self.conv_block2 = nn.Sequential(nn.Conv1d(32, self.final_out_channels, kernel_size=8, stride=1, bias=False, padding=4), nn.BatchNorm1d(self.final_out_channels), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1))
self.conv_block3 = nn.Sequential(nn.Conv1d(64, self.final_out_channels, kernel_size=8, stride=1, bias=False, padding=4), nn.BatchNorm1d(self.final_out_channels), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2, padding=1))
self.encoder = nn.LSTM(self.final_out_channels, self.hidden_size, batch_first=True, num_layers=self.num_layers, bias=False, dropout=self.dropout)
self.decoder = nn.LSTM(self.final_out_channels, self.hidden_size, batch_first=True, num_layers=self.num_layers, bias=False, dropout=self.dropout)
self.output_layer = nn.Linear(self.hidden_size, self.final_out_channels)
self.project = nn.Linear((self.final_out_channels * self.features_len), self.project_channels, bias=False)
self.projection_head = nn.Sequential(nn.Linear((self.final_out_channels * self.features_len), ((self.final_out_channels * self.features_len) // 2)), nn.BatchNorm1d(((self.final_out_channels * self.features_len) // 2)), nn.ReLU(inplace=True), nn.Linear(((self.final_out_channels * self.features_len) // 2), self.project_channels))
def init_hidden_state(self, batch_size):
h = torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(self.device)
c = torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(self.device)
return (h, c)
def forward(self, x_in):
if torch.isnan(x_in).any():
print('tensor contain nan')
x = self.conv_block1(x_in)
x = self.conv_block2(x)
hidden = x.permute(0, 2, 1)
hidden = hidden.reshape(hidden.size(0), (- 1))
project = self.projection_head(hidden)
return project
|
class EarlyStopping():
"Early stops the training if validation loss doesn't improve after a given patience."
def __init__(self, save_path, idx, patience=10, verbose=False, delta=0):
'\n Args:\n save_path : save model path\n patience (int): How long to wait after last time validation loss improved.\n Default: 7\n verbose (bool): If True, prints a message for each validation loss improvement.\n Default: False\n delta (float): Minimum change in the monitored quantity to qualify as an improvement.\n Default: 0\n '
self.save_path = save_path
self.idx = idx
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.best_affiliation = None
self.best_rpa_score = None
self.early_stop = False
self.delta = delta
def __call__(self, score, affiliation, rpa_score, model):
if (self.best_score is None):
self.best_score = score
self.best_affiliation = affiliation
self.best_rpa_score = rpa_score
self.save_checkpoint(score, model)
elif (score.correct_num < (self.best_score.correct_num + self.delta)):
self.counter += 1
if (self.counter >= self.patience):
self.early_stop = True
else:
self.best_score = score
self.best_affiliation = affiliation
self.best_rpa_score = rpa_score
self.save_checkpoint(score, model)
self.counter = 0
def save_checkpoint(self, score, model):
'Saves model when score decrease.'
if self.verbose:
print(f'score decreased ({self.best_score.correct_num:.6f} --> {score.correct_num:.6f}). Saving model ...')
path = os.path.join(self.save_path, (str(self.idx).zfill(2) + '_best_network.pth'))
torch.save(model.state_dict(), path)
|
def Trainer(model, model_optimizer, train_dl, val_dl, test_dl, device, logger, config, idx):
logger.debug('Training started ....')
save_path = ('./best_network/' + config.dataset)
os.makedirs(save_path, exist_ok=True)
early_stopping = EarlyStopping(save_path, idx)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')
(all_epoch_train_loss, all_epoch_test_loss) = ([], [])
center = torch.zeros(config.project_channels, device=device)
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
length = torch.tensor(0, device=device)
for epoch in range(1, (config.num_epoch + 1)):
(train_target, train_score, train_loss, length) = model_train(model, model_optimizer, train_dl, center, length, config, device, epoch)
(val_target, val_score_origin, val_loss, all_projection) = model_evaluate(model, val_dl, center, length, config, device, epoch)
(test_target, test_score_origin, test_loss, all_projection) = model_evaluate(model, test_dl, center, length, config, device, epoch)
if (epoch < config.change_center_epoch):
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
scheduler.step(train_loss)
logger.debug(f'''
Epoch : {epoch}
Train Loss : {train_loss:.4f} |
Valid Loss : {val_loss:.4f} |
Test Loss : {test_loss:.4f} |
''')
all_epoch_train_loss.append(train_loss.item())
all_epoch_test_loss.append(val_loss.item())
if (config.dataset == 'UCR'):
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = tsad_reasonable(test_target, predict, config.time_step)
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
early_stopping(score_reasonable, test_affiliation, test_score, model)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
if early_stopping.early_stop:
print('Early stopping')
break
logger.debug('\n################## Training is Done! #########################')
if (config.dataset == 'UCR'):
score_reasonable = early_stopping.best_score
test_affiliation = early_stopping.best_affiliation
test_score = early_stopping.best_rpa_score
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
else:
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = reasonable_accumulator(1, 0)
val_f1 = val_score.f1(ScoreType.RevisedPointAdjusted)
val_precision = val_score.precision(ScoreType.RevisedPointAdjusted)
val_recall = val_score.recall(ScoreType.RevisedPointAdjusted)
print('Valid affiliation-metrics')
logger.debug(f'''Test precision: {val_affiliation['precision']:2.4f} | Test recall: {val_affiliation['recall']:2.4f}
''')
print('Valid RAP F1')
logger.debug(f'''Valid F1: {val_f1:2.4f} | Valid precision: {val_precision:2.4f} | Valid recall: {val_recall:2.4f}
''')
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
return (test_score_origin, test_affiliation, test_score, score_reasonable, predict)
|
def model_train(model, model_optimizer, train_loader, center, length, config, device, epoch):
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
model.train()
for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader):
(data, target) = (data.float().to(device), target.long().to(device))
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
model_optimizer.zero_grad()
(feature1, feature_dec1) = model(all_data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
if ((config.objective == 'soft-boundary') and (epoch >= config.freeze_length_epoch)):
length = torch.tensor(get_radius(score, config.nu), device=device)
total_loss.append(loss.item())
loss.backward()
model_optimizer.step()
target = target.reshape((- 1))
predict = score.detach().cpu().numpy()
target = target.detach().cpu().numpy()
all_target.extend(target)
all_predict.extend(predict)
total_loss = torch.tensor(total_loss).mean()
return (all_target, all_predict, total_loss, length)
|
def model_evaluate(model, test_dl, center, length, config, device, epoch):
model.eval()
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
all_projection = []
with torch.no_grad():
for (data, target, aug1, aug2) in test_dl:
(data, target) = (data.float().to(device), target.long().to(device))
(feature1, feature_dec1) = model(data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
total_loss.append(loss.item())
predict = score.detach().cpu().numpy()
target = target.reshape((- 1))
all_target.extend(target.detach().cpu().numpy())
all_predict.extend(predict)
all_projection.append(feature1)
total_loss = torch.tensor(total_loss).mean()
all_projection = torch.cat(all_projection, dim=0)
all_target = np.array(all_target)
return (all_target, all_predict, total_loss, all_projection)
|
def train(feature1, feature_dec1, center, length, epoch, config, device):
center = center.unsqueeze(0)
center = F.normalize(center, dim=1)
feature1 = F.normalize(feature1, dim=1)
feature_dec1 = F.normalize(feature_dec1, dim=1)
distance1 = F.cosine_similarity(feature1, center, eps=1e-06)
distance_dec1 = F.cosine_similarity(feature_dec1, center, eps=1e-06)
distance1 = (1 - distance1)
distance_dec1 = (1 - distance_dec1)
sigma_aug1 = torch.sqrt((feature1.var([0]) + 0.0001))
sigma_aug2 = torch.sqrt((feature_dec1.var([0]) + 0.0001))
sigma_loss1 = torch.max(torch.zeros_like(sigma_aug1), (1 - sigma_aug1))
sigma_loss2 = torch.max(torch.zeros_like(sigma_aug2), (1 - sigma_aug2))
loss_sigam = torch.mean(((sigma_loss1 + sigma_loss2) / 2))
score = (distance1 + distance_dec1)
if (config.objective == 'soft-boundary'):
diff1 = (score - length)
loss_oc = (length + ((1 / config.nu) * torch.mean(torch.max(torch.zeros_like(diff1), diff1))))
else:
loss_oc = torch.mean(score)
loss = ((config.omega1 * loss_oc) + (config.omega2 * loss_sigam))
return (loss, score)
|
def center_c(train_loader, model, device, center, config, eps=0.1):
'Initialize hypersphere center c as the mean from an initial forward pass on the data.'
n_samples = 0
c = center
model.eval()
with torch.no_grad():
for data in train_loader:
(data, target, aug1, aug2) = data
data = data.float().to(device)
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
(outputs, dec) = model(all_data)
n_samples += outputs.shape[0]
all_feature = torch.cat((outputs, dec), dim=0)
c += torch.sum(all_feature, dim=0)
c /= (2 * n_samples)
c[((abs(c) < eps) & (c < 0))] = (- eps)
c[((abs(c) < eps) & (c > 0))] = eps
return c
|
def get_radius(dist: torch.Tensor, nu: float):
'Optimally solve for radius R via the (1-nu)-quantile of distances.'
dist = dist.reshape((- 1))
return np.quantile(dist.clone().data.cpu().numpy(), (1 - nu))
|
def Trainer(model, model_optimizer, train_dl, val_dl, test_dl, device, logger, config, experiment_log_dir, idx):
save_path = ('./best_network/' + config.dataset)
os.makedirs(save_path, exist_ok=True)
early_stopping = EarlyStopping(save_path, idx)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')
(all_epoch_train_loss, all_epoch_test_loss) = ([], [])
center = torch.zeros(config.project_channels, device=device)
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
length = torch.tensor(0, device=device)
for epoch in range(1, (config.num_epoch + 1)):
(train_target, train_score, train_loss, length) = model_train(model, model_optimizer, train_dl, center, length, config, device, epoch)
(val_target, val_score_origin, val_loss, all_projection) = model_evaluate(model, val_dl, center, length, config, device, epoch)
(test_target, test_score_origin, test_loss, all_projection) = model_evaluate(model, test_dl, center, length, config, device, epoch)
if (epoch < config.change_center_epoch):
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
scheduler.step(train_loss)
logger.debug(f'''
Epoch : {epoch}
Train Loss : {train_loss:.4f} |
Valid Loss : {val_loss:.4f} |
Test Loss : {test_loss:.4f} |
''')
all_epoch_train_loss.append(train_loss.item())
all_epoch_test_loss.append(val_loss.item())
if (config.dataset == 'UCR'):
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = tsad_reasonable(test_target, predict, config.time_step)
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
early_stopping(score_reasonable, test_affiliation, test_score, model)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
if early_stopping.early_stop:
print('Early stopping')
break
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
if (config.dataset == 'UCR'):
score_reasonable = tsad_reasonable(test_target, predict, config.time_step)
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
else:
score_reasonable = reasonable_accumulator(1, 0)
logger.debug('\n################## Training is Done! #########################')
val_f1 = val_score.f1(ScoreType.RevisedPointAdjusted)
val_precision = val_score.precision(ScoreType.RevisedPointAdjusted)
val_recall = val_score.recall(ScoreType.RevisedPointAdjusted)
print('Valid affiliation-metrics')
logger.debug(f'''Test precision: {val_affiliation['precision']:2.4f} | Test recall: {val_affiliation['recall']:2.4f}
''')
print('Valid RAP F1')
logger.debug(f'''Valid F1: {val_f1:2.4f} | Valid precision: {val_precision:2.4f} | Valid recall: {val_recall:2.4f}
''')
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
return (test_score_origin, test_affiliation, test_score, score_reasonable, predict)
|
def model_train(model, model_optimizer, train_loader, center, length, config, device, epoch):
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
model.train()
for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader):
(data, target) = (data.float().to(device), target.long().to(device))
model_optimizer.zero_grad()
(feature1, feature_dec1) = model(data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
if ((config.objective == 'soft-boundary') and (epoch >= config.freeze_length_epoch)):
length = torch.tensor(get_radius(score, config.nu), device=device)
total_loss.append(loss.item())
loss.backward()
model_optimizer.step()
target = target.reshape((- 1))
predict = score.detach().cpu().numpy()
target = target.detach().cpu().numpy()
all_target.extend(target)
all_predict.extend(predict)
total_loss = torch.tensor(total_loss).mean()
return (all_target, all_predict, total_loss, length)
|
def model_evaluate(model, test_dl, center, length, config, device, epoch):
model.eval()
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
all_projection = []
with torch.no_grad():
for (data, target, aug1, aug2) in test_dl:
(data, target) = (data.float().to(device), target.long().to(device))
(feature1, feature_dec1) = model(data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
total_loss.append(loss.item())
predict = score.detach().cpu().numpy()
target = target.reshape((- 1))
all_target.extend(target.detach().cpu().numpy())
all_predict.extend(predict)
all_projection.append(feature1)
total_loss = torch.tensor(total_loss).mean()
all_projection = torch.cat(all_projection, dim=0)
all_target = np.array(all_target)
return (all_target, all_predict, total_loss, all_projection)
|
def train(feature1, feature_dec1, center, length, epoch, config, device):
center = center.unsqueeze(0)
center = F.normalize(center, dim=1)
feature1 = F.normalize(feature1, dim=1)
feature_dec1 = F.normalize(feature_dec1, dim=1)
distance1 = F.cosine_similarity(feature1, center, eps=1e-06)
distance_dec1 = F.cosine_similarity(feature_dec1, center, eps=1e-06)
distance1 = (1 - distance1)
distance_dec1 = (1 - distance_dec1)
sigma_aug1 = torch.sqrt((feature1.var([0]) + 0.0001))
sigma_aug2 = torch.sqrt((distance_dec1.var([0]) + 0.0001))
sigma_loss1 = torch.max(torch.zeros_like(sigma_aug1), (1 - sigma_aug1))
sigma_loss2 = torch.max(torch.zeros_like(sigma_aug2), (1 - sigma_aug2))
loss_sigam = torch.mean(((sigma_loss1 + sigma_loss2) / 2))
score = (distance1 + distance_dec1)
if (config.objective == 'soft-boundary'):
diff1 = (score - length)
loss_oc = (length + ((1 / config.nu) * torch.mean(torch.max(torch.zeros_like(diff1), diff1))))
else:
loss_oc = torch.mean(score)
loss = ((config.omega1 * loss_oc) + (config.omega2 * loss_sigam))
return (loss, score)
|
def center_c(train_loader, model, device, center, config, eps=0.1):
'Initialize hypersphere center c as the mean from an initial forward pass on the data.'
n_samples = 0
c = center
model.eval()
with torch.no_grad():
for data in train_loader:
(data, target, aug1, aug2) = data
data = data.float().to(device)
(outputs, dec) = model(data)
n_samples += outputs.shape[0]
all_feature = torch.cat((outputs, dec), dim=0)
c += torch.sum(all_feature, dim=0)
c /= (2 * n_samples)
c[((abs(c) < eps) & (c < 0))] = (- eps)
c[((abs(c) < eps) & (c > 0))] = eps
return c
|
def get_radius(dist: torch.Tensor, nu: float):
'Optimally solve for radius R via the (1-nu)-quantile of distances.'
dist = dist.reshape((- 1))
return np.quantile(dist.clone().data.cpu().numpy(), (1 - nu))
|
def Trainer(model, model_optimizer, train_dl, val_dl, test_dl, device, logger, config, experiment_log_dir, idx):
save_path = ('./best_network/' + config.dataset)
os.makedirs(save_path, exist_ok=True)
early_stopping = EarlyStopping(save_path, idx)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')
(all_epoch_train_loss, all_epoch_test_loss) = ([], [])
center = torch.zeros(config.project_channels, device=device)
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
length = torch.tensor(0, device=device)
for epoch in range(1, (config.num_epoch + 1)):
(train_target, train_score, train_loss, length) = model_train(model, model_optimizer, train_dl, center, length, config, device, epoch)
(val_target, val_score_origin, val_loss, all_projection) = model_evaluate(model, val_dl, center, length, config, device, epoch)
(test_target, test_score_origin, test_loss, all_projection) = model_evaluate(model, test_dl, center, length, config, device, epoch)
if (epoch < config.change_center_epoch):
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
scheduler.step(train_loss)
logger.debug(f'''
Epoch : {epoch}
Train Loss : {train_loss:.4f} |
Valid Loss : {val_loss:.4f} |
Test Loss : {test_loss:.4f} |
''')
all_epoch_train_loss.append(train_loss.item())
all_epoch_test_loss.append(val_loss.item())
if (config.dataset == 'UCR'):
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = tsad_reasonable(test_target, predict, config.time_step)
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
early_stopping(score_reasonable, test_affiliation, test_score, model)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
if early_stopping.early_stop:
print('Early stopping')
break
logger.debug('\n################## Training is Done! #########################')
if (config.dataset == 'UCR'):
score_reasonable = early_stopping.best_score
test_affiliation = early_stopping.best_affiliation
test_score = early_stopping.best_rpa_score
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
else:
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = reasonable_accumulator(1, 0)
val_f1 = val_score.f1(ScoreType.RevisedPointAdjusted)
val_precision = val_score.precision(ScoreType.RevisedPointAdjusted)
val_recall = val_score.recall(ScoreType.RevisedPointAdjusted)
print('Valid affiliation-metrics')
logger.debug(f'''Test precision: {val_affiliation['precision']:2.4f} | Test recall: {val_affiliation['recall']:2.4f}
''')
print('Valid RAP F1')
logger.debug(f'''Valid F1: {val_f1:2.4f} | Valid precision: {val_precision:2.4f} | Valid recall: {val_recall:2.4f}
''')
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
return (test_score_origin, test_affiliation, test_score, score_reasonable, predict)
|
def model_train(model, model_optimizer, train_loader, center, length, config, device, epoch):
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
model.train()
for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader):
(data, target) = (data.float().to(device), target.long().to(device))
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
model_optimizer.zero_grad()
feature1 = model(all_data)
(loss, score) = train(feature1, center, length, epoch, config, device)
if ((config.objective == 'soft-boundary') and (epoch >= config.freeze_length_epoch)):
length = torch.tensor(get_radius(score, config.nu), device=device)
total_loss.append(loss.item())
loss.backward()
model_optimizer.step()
target = target.reshape((- 1))
predict = score.detach().cpu().numpy()
target = target.detach().cpu().numpy()
all_target.extend(target)
all_predict.extend(predict)
total_loss = torch.tensor(total_loss).mean()
return (all_target, all_predict, total_loss, length)
|
def model_evaluate(model, test_dl, center, length, config, device, epoch):
model.eval()
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
all_projection = []
with torch.no_grad():
for (data, target, aug1, aug2) in test_dl:
(data, target) = (data.float().to(device), target.long().to(device))
feature1 = model(data)
(loss, score) = train(feature1, center, length, epoch, config, device)
total_loss.append(loss.item())
predict = score.detach().cpu().numpy()
target = target.reshape((- 1))
all_target.extend(target.detach().cpu().numpy())
all_predict.extend(predict)
all_projection.append(feature1)
total_loss = torch.tensor(total_loss).mean()
all_projection = torch.cat(all_projection, dim=0)
all_target = np.array(all_target)
return (all_target, all_predict, total_loss, all_projection)
|
def train(feature1, center, length, epoch, config, device):
center = center.unsqueeze(0)
center = F.normalize(center, dim=1)
feature1 = F.normalize(feature1, dim=1)
distance1 = F.cosine_similarity(feature1, center, eps=1e-06)
distance1 = (1 - distance1)
sigma_aug1 = torch.sqrt((feature1.var([0]) + 0.0001))
sigma_loss1 = torch.max(torch.zeros_like(sigma_aug1), (1 - sigma_aug1))
loss_sigam = torch.mean(sigma_loss1)
score = distance1
if (config.objective == 'soft-boundary'):
diff1 = (score - length)
loss_oc = (length + ((1 / config.nu) * torch.mean(torch.max(torch.zeros_like(diff1), diff1))))
else:
loss_oc = torch.mean(score)
loss = ((config.omega1 * loss_oc) + (config.omega2 * loss_sigam))
return (loss, score)
|
def center_c(train_loader, model, device, center, config, eps=0.1):
'Initialize hypersphere center c as the mean from an initial forward pass on the data.'
n_samples = 0
c = center
model.eval()
with torch.no_grad():
for data in train_loader:
(data, target, aug1, aug2) = data
data = data.float().to(device)
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
outputs = model(all_data)
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
c[((abs(c) < eps) & (c < 0))] = (- eps)
c[((abs(c) < eps) & (c > 0))] = eps
return c
|
def get_radius(dist: torch.Tensor, nu: float):
'Optimally solve for radius R via the (1-nu)-quantile of distances.'
dist = dist.reshape((- 1))
return np.quantile(dist.clone().data.cpu().numpy(), (1 - nu))
|
def Trainer(model, model_optimizer, train_dl, val_dl, test_dl, device, logger, config, experiment_log_dir, idx):
logger.debug('Training started ....')
save_path = ('./best_network/' + config.dataset)
os.makedirs(save_path, exist_ok=True)
early_stopping = EarlyStopping(save_path, idx)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')
(all_epoch_train_loss, all_epoch_test_loss) = ([], [])
center = torch.zeros(config.project_channels, device=device)
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
length = torch.tensor(0, device=device)
for epoch in range(1, (config.num_epoch + 1)):
(train_target, train_score, train_loss, length) = model_train(model, model_optimizer, train_dl, center, length, config, device, epoch)
(val_target, val_score_origin, val_loss, all_projection) = model_evaluate(model, val_dl, center, length, config, device, epoch)
(test_target, test_score_origin, test_loss, all_projection) = model_evaluate(model, test_dl, center, length, config, device, epoch)
if (epoch < config.change_center_epoch):
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
scheduler.step(train_loss)
logger.debug(f'''
Epoch : {epoch}
Train Loss : {train_loss:.4f} |
Valid Loss : {val_loss:.4f} |
Test Loss : {test_loss:.4f} |
''')
all_epoch_train_loss.append(train_loss.item())
all_epoch_test_loss.append(val_loss.item())
if (config.dataset == 'UCR'):
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = tsad_reasonable(test_target, predict, config.time_step)
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
early_stopping(score_reasonable, test_affiliation, test_score, model)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
if early_stopping.early_stop:
print('Early stopping')
break
logger.debug('\n################## Training is Done! #########################')
if (config.dataset == 'UCR'):
score_reasonable = early_stopping.best_score
test_affiliation = early_stopping.best_affiliation
test_score = early_stopping.best_rpa_score
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
else:
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = reasonable_accumulator(1, 0)
val_f1 = val_score.f1(ScoreType.RevisedPointAdjusted)
val_precision = val_score.precision(ScoreType.RevisedPointAdjusted)
val_recall = val_score.recall(ScoreType.RevisedPointAdjusted)
print('Valid affiliation-metrics')
logger.debug(f'''Test precision: {val_affiliation['precision']:2.4f} | Test recall: {val_affiliation['recall']:2.4f}
''')
print('Valid RAP F1')
logger.debug(f'''Valid F1: {val_f1:2.4f} | Valid precision: {val_precision:2.4f} | Valid recall: {val_recall:2.4f}
''')
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
return (test_score_origin, test_affiliation, test_score, score_reasonable, predict)
|
def model_train(model, model_optimizer, train_loader, center, length, config, device, epoch):
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
model.train()
for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader):
(data, target) = (data.float().to(device), target.long().to(device))
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
model_optimizer.zero_grad()
(feature1, feature_dec1) = model(all_data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
if ((config.objective == 'soft-boundary') and (epoch >= config.freeze_length_epoch)):
length = torch.tensor(get_radius(score, config.nu), device=device)
total_loss.append(loss.item())
loss.backward()
model_optimizer.step()
target = target.reshape((- 1))
predict = score.detach().cpu().numpy()
target = target.detach().cpu().numpy()
all_target.extend(target)
all_predict.extend(predict)
total_loss = torch.tensor(total_loss).mean()
return (all_target, all_predict, total_loss, length)
|
def model_evaluate(model, test_dl, center, length, config, device, epoch):
model.eval()
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
all_projection = []
with torch.no_grad():
for (data, target, aug1, aug2) in test_dl:
(data, target) = (data.float().to(device), target.long().to(device))
(feature1, feature_dec1) = model(data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
total_loss.append(loss.item())
predict = score.detach().cpu().numpy()
target = target.reshape((- 1))
all_target.extend(target.detach().cpu().numpy())
all_predict.extend(predict)
all_projection.append(feature1)
total_loss = torch.tensor(total_loss).mean()
all_projection = torch.cat(all_projection, dim=0)
all_target = np.array(all_target)
return (all_target, all_predict, total_loss, all_projection)
|
def train(feature1, feature_dec1, center, length, epoch, config, device):
feature1 = F.normalize(feature1, dim=1)
feature_dec1 = F.normalize(feature_dec1, dim=1)
distance1 = F.cosine_similarity(feature1, feature_dec1, eps=1e-06)
distance1 = (1 - distance1)
sigma_aug1 = torch.sqrt((feature1.var([0]) + 0.0001))
sigma_loss1 = torch.max(torch.zeros_like(sigma_aug1), (1 - sigma_aug1))
loss_sigam = torch.mean(sigma_loss1)
score = distance1
if (config.objective == 'soft-boundary'):
diff1 = (score - length)
loss_oc = (length + ((1 / config.nu) * torch.mean(torch.max(torch.zeros_like(diff1), diff1))))
else:
loss_oc = torch.mean(score)
loss = ((config.omega1 * loss_oc) + (config.omega2 * loss_sigam))
return (loss, score)
|
def center_c(train_loader, model, device, center, config, eps=0.1):
'Initialize hypersphere center c as the mean from an initial forward pass on the data.'
n_samples = 0
c = center
model.eval()
with torch.no_grad():
for data in train_loader:
(data, target, aug1, aug2) = data
data = data.float().to(device)
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
(outputs, dec) = model(all_data)
n_samples += outputs.shape[0]
all_feature = torch.cat((outputs, dec), dim=0)
c += torch.sum(all_feature, dim=0)
c /= (2 * n_samples)
c[((abs(c) < eps) & (c < 0))] = (- eps)
c[((abs(c) < eps) & (c > 0))] = eps
return c
|
def get_radius(dist: torch.Tensor, nu: float):
'Optimally solve for radius R via the (1-nu)-quantile of distances.'
dist = dist.reshape((- 1))
return np.quantile(dist.clone().data.cpu().numpy(), (1 - nu))
|
def Trainer(model, model_optimizer, train_dl, val_dl, test_dl, device, logger, config, experiment_log_dir, idx):
logger.debug('Training started ....')
save_path = ('./best_network/' + config.dataset)
os.makedirs(save_path, exist_ok=True)
early_stopping = EarlyStopping(save_path, idx)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')
(all_epoch_train_loss, all_epoch_test_loss) = ([], [])
center = torch.zeros(config.project_channels, device=device)
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
length = torch.tensor(0, device=device)
for epoch in range(1, (config.num_epoch + 1)):
(train_target, train_score, train_loss, length) = model_train(model, model_optimizer, train_dl, center, length, config, device, epoch)
(val_target, val_score_origin, val_loss, all_projection) = model_evaluate(model, val_dl, center, length, config, device, epoch)
(test_target, test_score_origin, test_loss, all_projection) = model_evaluate(model, test_dl, center, length, config, device, epoch)
if (epoch < config.change_center_epoch):
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
scheduler.step(train_loss)
logger.debug(f'''
Epoch : {epoch}
Train Loss : {train_loss:.4f} |
Valid Loss : {val_loss:.4f} |
Test Loss : {test_loss:.4f} |
''')
all_epoch_train_loss.append(train_loss.item())
all_epoch_test_loss.append(val_loss.item())
if (config.dataset == 'UCR'):
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = tsad_reasonable(test_target, predict, config.time_step)
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
early_stopping(score_reasonable, test_affiliation, test_score, model)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
if early_stopping.early_stop:
print('Early stopping')
break
logger.debug('\n################## Training is Done! #########################')
if (config.dataset == 'UCR'):
score_reasonable = early_stopping.best_score
test_affiliation = early_stopping.best_affiliation
test_score = early_stopping.best_rpa_score
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
else:
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = reasonable_accumulator(1, 0)
val_f1 = val_score.f1(ScoreType.RevisedPointAdjusted)
val_precision = val_score.precision(ScoreType.RevisedPointAdjusted)
val_recall = val_score.recall(ScoreType.RevisedPointAdjusted)
print('Valid affiliation-metrics')
logger.debug(f'''Test precision: {val_affiliation['precision']:2.4f} | Test recall: {val_affiliation['recall']:2.4f}
''')
print('Valid RAP F1')
logger.debug(f'''Valid F1: {val_f1:2.4f} | Valid precision: {val_precision:2.4f} | Valid recall: {val_recall:2.4f}
''')
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
return (test_score_origin, test_affiliation, test_score, score_reasonable, predict)
|
def model_train(model, model_optimizer, train_loader, center, length, config, device, epoch):
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
model.train()
for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader):
(data, target) = (data.float().to(device), target.long().to(device))
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
model_optimizer.zero_grad()
(feature1, feature_dec1) = model(all_data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
if ((config.objective == 'soft-boundary') and (epoch >= config.freeze_length_epoch)):
length = torch.tensor(get_radius(score, config.nu), device=device)
total_loss.append(loss.item())
loss.backward()
model_optimizer.step()
target = target.reshape((- 1))
predict = score.detach().cpu().numpy()
target = target.detach().cpu().numpy()
all_target.extend(target)
all_predict.extend(predict)
total_loss = torch.tensor(total_loss).mean()
return (all_target, all_predict, total_loss, length)
|
def model_evaluate(model, test_dl, center, length, config, device, epoch):
model.eval()
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
all_projection = []
with torch.no_grad():
for (data, target, aug1, aug2) in test_dl:
(data, target) = (data.float().to(device), target.long().to(device))
(feature1, feature_dec1) = model(data)
(loss, score) = train(feature1, feature_dec1, center, length, epoch, config, device)
total_loss.append(loss.item())
predict = score.detach().cpu().numpy()
target = target.reshape((- 1))
all_target.extend(target.detach().cpu().numpy())
all_predict.extend(predict)
all_projection.append(feature1)
total_loss = torch.tensor(total_loss).mean()
all_projection = torch.cat(all_projection, dim=0)
all_target = np.array(all_target)
return (all_target, all_predict, total_loss, all_projection)
|
def train(feature1, feature_dec1, center, length, epoch, config, device):
center = center.unsqueeze(0)
center = F.normalize(center, dim=1)
feature1 = F.normalize(feature1, dim=1)
feature_dec1 = F.normalize(feature_dec1, dim=1)
distance1 = F.cosine_similarity(feature1, center, eps=1e-06)
distance_dec1 = F.cosine_similarity(feature_dec1, center, eps=1e-06)
distance1 = (1 - distance1)
distance_dec1 = (1 - distance_dec1)
score = (distance1 + distance_dec1)
if (config.objective == 'soft-boundary'):
diff1 = (score - length)
loss_oc = (length + ((1 / config.nu) * torch.mean(torch.max(torch.zeros_like(diff1), diff1))))
else:
loss_oc = torch.mean(score)
loss = loss_oc
return (loss, score)
|
def center_c(train_loader, model, device, center, config, eps=0.1):
'Initialize hypersphere center c as the mean from an initial forward pass on the data.'
n_samples = 0
c = center
model.eval()
with torch.no_grad():
for data in train_loader:
(data, target, aug1, aug2) = data
data = data.float().to(device)
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
all_data = torch.cat((data, aug1, aug2), dim=0)
(outputs, dec) = model(all_data)
n_samples += outputs.shape[0]
all_feature = torch.cat((outputs, dec), dim=0)
c += torch.sum(all_feature, dim=0)
c /= (2 * n_samples)
c[((abs(c) < eps) & (c < 0))] = (- eps)
c[((abs(c) < eps) & (c > 0))] = eps
return c
|
def get_radius(dist: torch.Tensor, nu: float):
'Optimally solve for radius R via the (1-nu)-quantile of distances.'
dist = dist.reshape((- 1))
return np.quantile(dist.clone().data.cpu().numpy(), (1 - nu))
|
def Trainer(model, model_optimizer, train_dl, val_dl, test_dl, device, logger, config, experiment_log_dir, idx):
logger.debug('Training started ....')
save_path = ('./best_network/' + config.dataset)
os.makedirs(save_path, exist_ok=True)
early_stopping = EarlyStopping(save_path, idx)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')
(all_epoch_train_loss, all_epoch_test_loss) = ([], [])
center = torch.zeros(config.project_channels, device=device)
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
length = torch.tensor(0, device=device)
for epoch in range(1, (config.num_epoch + 1)):
(train_target, train_score, train_loss, length) = model_train(model, model_optimizer, train_dl, center, length, config, device, epoch)
(val_target, val_score_origin, val_loss, all_projection) = model_evaluate(model, val_dl, center, length, config, device, epoch)
(test_target, test_score_origin, test_loss, all_projection) = model_evaluate(model, test_dl, center, length, config, device, epoch)
if (epoch < config.change_center_epoch):
center = center_c(train_dl, model, device, center, config, eps=config.center_eps)
scheduler.step(train_loss)
logger.debug(f'''
Epoch : {epoch}
Train Loss : {train_loss:.4f} |
Valid Loss : {val_loss:.4f} |
Test Loss : {test_loss:.4f} |
''')
all_epoch_train_loss.append(train_loss.item())
all_epoch_test_loss.append(val_loss.item())
if (config.dataset == 'UCR'):
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = tsad_reasonable(test_target, predict, config.time_step)
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
early_stopping(score_reasonable, test_affiliation, test_score, model)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
if early_stopping.early_stop:
print('Early stopping')
break
logger.debug('\n################## Training is Done! #########################')
if (config.dataset == 'UCR'):
score_reasonable = early_stopping.best_score
test_affiliation = early_stopping.best_affiliation
test_score = early_stopping.best_rpa_score
print('Test accuracy metrics')
logger.debug(f'''Test accuracy: {score_reasonable.correct_num:2.4f}
''')
else:
(val_affiliation, val_score, _) = ad_predict(val_target, val_score_origin, config.threshold_determine, config.detect_nu)
(test_affiliation, test_score, predict) = ad_predict(test_target, test_score_origin, config.threshold_determine, config.detect_nu)
score_reasonable = reasonable_accumulator(1, 0)
val_f1 = val_score.f1(ScoreType.RevisedPointAdjusted)
val_precision = val_score.precision(ScoreType.RevisedPointAdjusted)
val_recall = val_score.recall(ScoreType.RevisedPointAdjusted)
print('Valid affiliation-metrics')
logger.debug(f'''Test precision: {val_affiliation['precision']:2.4f} | Test recall: {val_affiliation['recall']:2.4f}
''')
print('Valid RAP F1')
logger.debug(f'''Valid F1: {val_f1:2.4f} | Valid precision: {val_precision:2.4f} | Valid recall: {val_recall:2.4f}
''')
test_f1 = test_score.f1(ScoreType.RevisedPointAdjusted)
test_precision = test_score.precision(ScoreType.RevisedPointAdjusted)
test_recall = test_score.recall(ScoreType.RevisedPointAdjusted)
print('Test affiliation-metrics')
logger.debug(f'''Test precision: {test_affiliation['precision']:2.4f} | Test recall: {test_affiliation['recall']:2.4f}
''')
print('Test RAP F1')
logger.debug(f'''Test F1: {test_f1:2.4f} | Test precision: {test_precision:2.4f} | Test recall: {test_recall:2.4f}
''')
return (test_score_origin, test_affiliation, test_score, score_reasonable, predict)
|
def model_train(model, model_optimizer, train_loader, center, length, config, device, epoch):
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
model.train()
for (batch_idx, (data, target, aug1, aug2)) in enumerate(train_loader):
(data, target) = (data.float().to(device), target.long().to(device))
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
model_optimizer.zero_grad()
feature1 = model(aug1)
feature2 = model(aug2)
(loss, score) = train(feature1, feature2, center, length, epoch, config, device)
if ((config.objective == 'soft-boundary') and (epoch >= config.freeze_length_epoch)):
length = torch.tensor(get_radius(score, config.nu), device=device)
total_loss.append(loss.item())
loss.backward()
model_optimizer.step()
target = target.reshape((- 1))
predict = score.detach().cpu().numpy()
target = target.detach().cpu().numpy()
all_target.extend(target)
all_predict.extend(predict)
total_loss = torch.tensor(total_loss).mean()
return (all_target, all_predict, total_loss, length)
|
def model_evaluate(model, test_dl, center, length, config, device, epoch):
model.eval()
(total_loss, total_f1, total_precision, total_recall) = ([], [], [], [])
(all_target, all_predict) = ([], [])
all_projection = []
with torch.no_grad():
for (data, target, aug1, aug2) in test_dl:
(data, target) = (data.float().to(device), target.long().to(device))
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
feature1 = model(aug1)
feature2 = model(aug2)
(loss, score) = train(feature1, feature2, center, length, epoch, config, device)
total_loss.append(loss.item())
predict = score.detach().cpu().numpy()
target = target.reshape((- 1))
all_target.extend(target.detach().cpu().numpy())
all_predict.extend(predict)
all_projection.append(feature1)
total_loss = torch.tensor(total_loss).mean()
all_projection = torch.cat(all_projection, dim=0)
all_target = np.array(all_target)
return (all_target, all_predict, total_loss, all_projection)
|
def train(feature1, feature2, center, length, epoch, config, device):
center = center.unsqueeze(0)
center = F.normalize(center, dim=1)
feature1 = F.normalize(feature1, dim=1)
feature2 = F.normalize(feature2, dim=1)
distance1 = F.cosine_similarity(feature1, center, eps=1e-06)
distance2 = F.cosine_similarity(feature2, center, eps=1e-06)
distance1 = (1 - distance1)
distance2 = (1 - distance2)
sigma_aug1 = torch.sqrt((feature1.var([0]) + 0.0001))
sigma_aug2 = torch.sqrt((distance2.var([0]) + 0.0001))
sigma_loss1 = torch.max(torch.zeros_like(sigma_aug1), (1 - sigma_aug1))
sigma_loss2 = torch.max(torch.zeros_like(sigma_aug2), (1 - sigma_aug2))
loss_sigam = torch.mean(((sigma_loss1 + sigma_loss2) / 2))
score = (distance1 + distance2)
if (config.objective == 'soft-boundary'):
diff1 = (score - length)
loss_oc = (length + ((1 / config.nu) * torch.mean(torch.max(torch.zeros_like(diff1), diff1))))
else:
loss_oc = torch.mean(score)
loss = ((config.omega1 * loss_oc) + (config.omega2 * loss_sigam))
return (loss, score)
|
def center_c(train_loader, model, device, center, config, eps=0.1):
'Initialize hypersphere center c as the mean from an initial forward pass on the data.'
n_samples = 0
c = center
model.eval()
with torch.no_grad():
for data in train_loader:
(data, target, aug1, aug2) = data
data = data.float().to(device)
(aug1, aug2) = (aug1.float().to(device), aug2.float().to(device))
aug1 = aug1.float().to(device)
aug2 = aug2.float().to(device)
outputs1 = model(aug1)
outputs2 = model(aug2)
n_samples += outputs1.shape[0]
all_feature = torch.cat((outputs1, outputs2), dim=0)
c += torch.sum(all_feature, dim=0)
c /= (2 * n_samples)
c[((abs(c) < eps) & (c < 0))] = (- eps)
c[((abs(c) < eps) & (c > 0))] = eps
return c
|
def get_radius(dist: torch.Tensor, nu: float):
'Optimally solve for radius R via the (1-nu)-quantile of distances.'
dist = dist.reshape((- 1))
return np.quantile(dist.clone().data.cpu().numpy(), (1 - nu))
|
class CPCConf(DetectorConfig):
_default_transform = MeanVarNormalize()
@initializer
def __init__(self, logging_dir='./results/cpc', epochs=150, n_warmup_steps=100, batch_size=256, sequence_length=16, timestep=2, masked_frames=0, cuda=torch.cuda.is_available(), seed=1, log_interval=50, **kwargs):
super(CPCConf, self).__init__(**kwargs)
|
class ForwardLibriSpeechRawXXreverseDataset(data.Dataset):
def __init__(self, raw_file, list_file):
' raw_file: train-clean-100.h5\n list_file: list/training.txt\n audio_window: 20480\n '
self.raw_file = raw_file
self.utts = []
with open(list_file) as f:
temp = f.readlines()
temp = [x.strip() for x in temp]
self.h5f = h5py.File(self.raw_file, 'r')
for i in temp:
utt_len = self.h5f[i].shape[0]
self.utts.append(i)
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
original = self.h5f[utt_id][:]
return (utt_id, self.h5f[utt_id][:], original[::(- 1)].copy())
|
class ForwardLibriSpeechReverseRawDataset(data.Dataset):
def __init__(self, raw_file, list_file):
' raw_file: train-clean-100.h5\n list_file: list/training.txt\n audio_window: 20480\n '
self.raw_file = raw_file
self.utts = []
with open(list_file) as f:
temp = f.readlines()
temp = [x.strip() for x in temp]
self.h5f = h5py.File(self.raw_file, 'r')
for i in temp:
utt_len = self.h5f[i].shape[0]
self.utts.append(i)
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
original = self.h5f[utt_id][:]
return (utt_id, original[::(- 1)].copy())
|
class ForwardLibriSpeechRawDataset(data.Dataset):
def __init__(self, raw_file, list_file):
' raw_file: train-clean-100.h5\n list_file: list/training.txt\n audio_window: 20480\n '
self.raw_file = raw_file
self.utts = []
with open(list_file) as f:
temp = f.readlines()
temp = [x.strip() for x in temp]
self.h5f = h5py.File(self.raw_file, 'r')
for i in temp:
utt_len = self.h5f[i].shape[0]
self.utts.append(i)
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
return (utt_id, self.h5f[utt_id][:])
|
class ReverseRawDataset(data.Dataset):
def __init__(self, raw_file, list_file, audio_window):
' RawDataset trained reverse;\n raw_file: train-clean-100.h5\n list_file: list/training.txt\n audio_window: 20480\n '
self.raw_file = raw_file
self.audio_window = audio_window
self.utts = []
with open(list_file) as f:
temp = f.readlines()
temp = [x.strip() for x in temp]
self.h5f = h5py.File(self.raw_file, 'r')
for i in temp:
utt_len = self.h5f[i].shape[0]
if (utt_len > 20480):
self.utts.append(i)
"\n with open(index_file) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n self.spk2idx = {}\n for i in content:\n spk = i.split(' ')[0]\n idx = i.split(' ')[1]\n self.spk2idx[spk] = int(idx)\n "
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
utt_len = self.h5f[utt_id].shape[0]
index = np.random.randint(((utt_len - self.audio_window) + 1))
original = self.h5f[utt_id][index:(index + self.audio_window)]
return original[::(- 1)].copy()
|
class ForwardDatasetSITWSilence(data.Dataset):
' dataset for forward passing sitw without vad '
def __init__(self, wav_file):
' wav_file: /export/c01/jlai/thesis/data/sitw_dev_enroll/wav.scp\n '
self.wav_file = wav_file
with open(wav_file) as f:
temp = f.readlines()
self.utts = [x.strip().split(' ')[0] for x in temp]
self.wavs = [x.strip().split(' ')[1] for x in temp]
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
wav_path = self.wavs[index]
(fs, data) = wavfile.read(wav_path)
return (self.utts[index], data)
|
class ForwardDatasetSwbdSreSilence(data.Dataset):
' dataset for forward passing swbd_sre or sre16 without vad '
def __init__(self, wav_dir, scp_file):
' wav_dir: /export/c01/jlai/thesis/data/swbd_sre_combined/wav/\n list_file: /export/c01/jlai/thesis/data/swbd_sre_combined/list/log/swbd_sre_utt.{1..50}.scp\n '
self.wav_dir = wav_dir
with open(scp_file) as f:
temp = f.readlines()
self.utts = [x.strip() for x in temp]
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
path = (self.wav_dir + utt_id)
(fs, data) = wavfile.read(path)
return (utt_id, data)
|
class RawDatasetSwbdSreOne(data.Dataset):
' dataset for swbd_sre with vad ; for training cpc with ONE voiced segment per recording '
def __init__(self, raw_file, list_file):
' raw_file: swbd_sre_combined_20k_20480.h5\n list_file: list/training3.txt, list/val3.txt\n '
self.raw_file = raw_file
with open(list_file) as f:
temp = f.readlines()
all_utt = [x.strip() for x in temp]
self.utts = defaultdict((lambda : 0))
for i in all_utt:
count = i.split('-')[(- 1)]
utt_uniq = i[:(- (len(count) + 1))]
self.utts[utt_uniq] += 1
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts.keys()[index]
count = self.utts[utt_id]
select = randint(1, count)
h5f = h5py.File(self.raw_file, 'r')
return h5f[((utt_id + '-') + str(select))][:]
|
class RawDatasetSwbdSreSilence(data.Dataset):
' dataset for swbd_sre without vad; for training cpc with ONE voiced/unvoiced segment per recording '
def __init__(self, raw_file, list_file, audio_window):
' raw_file: swbd_sre_combined_20k_20480.h5\n list_file: list/training2.txt, list/val2.txt\n '
self.raw_file = raw_file
self.audio_window = audio_window
with open(list_file) as f:
temp = f.readlines()
self.utts = [x.strip() for x in temp]
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
h5f = h5py.File(self.raw_file, 'r')
utt_len = h5f[utt_id].shape[0]
index = np.random.randint(((utt_len - self.audio_window) + 1))
return h5f[utt_id][index:(index + self.audio_window)]
|
class RawDatasetSwbdSre(data.Dataset):
' dataset for swbd_sre with vad ; for training cpc with ONE voiced segment per recording '
def __init__(self, raw_file, list_file):
' raw_file: swbd_sre_combined_20k_20480.h5\n list_file: list/training.txt\n '
self.raw_file = raw_file
with open(list_file) as f:
temp = f.readlines()
self.utts = [x.strip() for x in temp]
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
h5f = h5py.File(self.raw_file, 'r')
return h5f[utt_id][:]
|
class RawDatasetSpkClass(data.Dataset):
def __init__(self, raw_file, list_file, index_file, audio_window, frame_window):
' raw_file: train-clean-100.h5\n list_file: list/training.txt\n index_file: spk2idx\n audio_window: 20480\n '
self.raw_file = raw_file
self.audio_window = audio_window
self.frame_window = frame_window
with open(list_file) as f:
temp = f.readlines()
self.utts = [x.strip() for x in temp]
with open(index_file) as f:
content = f.readlines()
content = [x.strip() for x in content]
self.spk2idx = {}
for i in content:
spk = i.split(' ')[0]
idx = int(i.split(' ')[1])
self.spk2idx[spk] = idx
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
h5f = h5py.File(self.raw_file, 'r')
utt_len = h5f[utt_id].shape[0]
index = np.random.randint(((utt_len - self.audio_window) + 1))
speaker = utt_id.split('-')[0]
label = torch.tensor(self.spk2idx[speaker])
return (h5f[utt_id][index:(index + self.audio_window)], label.repeat(self.frame_window))
|
class RawXXreverseDataset(data.Dataset):
' RawDataset but returns sequence twice: x, x_reverse '
def __init__(self, raw_file, list_file, audio_window):
' raw_file: train-clean-100.h5\n list_file: list/training.txt\n audio_window: 20480\n '
self.raw_file = raw_file
self.audio_window = audio_window
self.utts = []
with open(list_file) as f:
temp = f.readlines()
temp = [x.strip() for x in temp]
self.h5f = h5py.File(self.raw_file, 'r')
for i in temp:
utt_len = self.h5f[i].shape[0]
if (utt_len > 20480):
self.utts.append(i)
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
utt_len = self.h5f[utt_id].shape[0]
index = np.random.randint(((utt_len - self.audio_window) + 1))
original = self.h5f[utt_id][index:(index + self.audio_window)]
return (original, original[::(- 1)].copy())
|
class RawDataset(data.Dataset):
def __init__(self, raw_file, list_file, audio_window):
' raw_file: train-clean-100.h5\n list_file: list/training.txt\n audio_window: 20480\n '
self.raw_file = raw_file
self.audio_window = audio_window
self.utts = []
with open(list_file) as f:
temp = f.readlines()
temp = [x.strip() for x in temp]
self.h5f = h5py.File(self.raw_file, 'r')
for i in temp:
utt_len = self.h5f[i].shape[0]
if (utt_len > 20480):
self.utts.append(i)
"\n with open(index_file) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n self.spk2idx = {}\n for i in content:\n spk = i.split(' ')[0]\n idx = i.split(' ')[1]\n self.spk2idx[spk] = int(idx)\n "
def __len__(self):
'Denotes the total number of utterances\n '
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
utt_len = self.h5f[utt_id].shape[0]
index = np.random.randint(((utt_len - self.audio_window) + 1))
return self.h5f[utt_id][index:(index + self.audio_window)]
|
def forwardXXreverse(args, cpc_model, device, data_loader, output_ark, output_scp):
logger.info('Starting Forward Passing')
cpc_model.eval()
ark_scp_output = ((('ark:| copy-feats --compress=true ark:- ark,scp:' + output_ark) + ',') + output_scp)
with torch.no_grad():
with ko.open_or_fd(ark_scp_output, 'wb') as f:
for [utt_id, data, data_r] in data_loader:
data = data.float().unsqueeze(1).to(device)
data_r = data_r.float().unsqueeze(1).to(device)
data = data.contiguous()
data_r = data.contiguous()
hidden1 = cpc_model.init_hidden1(len(data))
hidden2 = cpc_model.init_hidden2(len(data))
output = cpc_model.predict(data, data_r, hidden1, hidden2)
mat = output.squeeze(0).cpu().numpy()
ko.write_mat(f, mat, key=utt_id[0])
|
def forward_dct(args, cpc_model, device, data_loader, output_ark, output_scp, dct_dim=24):
' forward with dct '
logger.info('Starting Forward Passing')
cpc_model.eval()
ark_scp_output = ((('ark:| copy-feats --compress=true ark:- ark,scp:' + output_ark) + ',') + output_scp)
with torch.no_grad():
with ko.open_or_fd(ark_scp_output, 'wb') as f:
for [utt_id, data] in data_loader:
data = data.float().unsqueeze(1).to(device)
data = data.contiguous()
hidden = cpc_model.init_hidden(len(data))
(output, hidden) = cpc_model.predict(data, hidden)
mat = output.squeeze(0).cpu().numpy()
dct_mat = fft.dct(mat, type=2, n=dct_dim)
ko.write_mat(f, dct_mat, key=utt_id[0])
|
def forward(cpc_model, device, data_loader, output_ark, output_scp):
logger.info('Starting Forward Passing')
cpc_model.eval()
ark_scp_output = ((('ark:| copy-feats --compress=true ark:- ark,scp:' + output_ark) + ',') + output_scp)
with torch.no_grad():
with ko.open_or_fd(ark_scp_output, 'wb') as f:
for [utt_id, data] in data_loader:
data = data.float().unsqueeze(1).to(device)
data = data.contiguous()
hidden = cpc_model.init_hidden(len(data), use_gpu=False)
(output, hidden) = cpc_model.predict(data, hidden)
mat = output.squeeze(0).cpu().numpy()
ko.write_mat(f, mat, key=utt_id[0])
|
class UnsupportedDataType(Exception):
pass
|
class UnknownVectorHeader(Exception):
pass
|
class UnknownMatrixHeader(Exception):
pass
|
class BadSampleSize(Exception):
pass
|
class BadInputFormat(Exception):
pass
|
class SubprocessFailed(Exception):
pass
|
def open_or_fd(file, mode='rb'):
" fd = open_or_fd(file)\n Open file, gzipped file, pipe, or forward the file-descriptor.\n Eventually seeks in the 'file' argument contains ':offset' suffix.\n "
offset = None
try:
if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
(prefix, file) = file.split(':', 1)
if re.search(':[0-9]+$', file):
(file, offset) = file.rsplit(':', 1)
if (file[(- 1)] == '|'):
fd = popen(file[:(- 1)], 'rb')
elif (file[0] == '|'):
fd = popen(file[1:], 'wb')
elif (file.split('.')[(- 1)] == 'gz'):
fd = gzip.open(file, mode)
else:
fd = open(file, mode)
except TypeError:
fd = file
if (offset != None):
fd.seek(int(offset))
return fd
|
def popen(cmd, mode='rb'):
if (not isinstance(cmd, str)):
raise TypeError(('invalid cmd type (%s, expected string)' % type(cmd)))
import subprocess, io, threading
def cleanup(proc, cmd):
ret = proc.wait()
if (ret > 0):
raise SubprocessFailed(('cmd %s returned %d !' % (cmd, ret)))
return
if (mode == 'r'):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return io.TextIOWrapper(proc.stdout)
elif (mode == 'w'):
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return io.TextIOWrapper(proc.stdin)
elif (mode == 'rb'):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return proc.stdout
elif (mode == 'wb'):
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return proc.stdin
else:
raise ValueError(('invalid mode %s' % mode))
|
def read_key(fd):
" [key] = read_key(fd)\n Read the utterance-key from the opened ark/stream descriptor 'fd'.\n "
key = ''
while 1:
char = fd.read(1).decode()
if (char == ''):
break
if (char == ' '):
break
key += char
key = key.strip()
if (key == ''):
return None
assert (re.match('^[\\.\\/a-zA-Z0-9_-]+$', key) != None)
return key
|
def read_ali_ark(file_or_fd):
" Alias to 'read_vec_int_ark()' "
return read_vec_int_ark(file_or_fd)
|
def read_vec_int_ark(file_or_fd):
" generator(key,vec) = read_vec_int_ark(file_or_fd)\n Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.\n file_or_fd : ark, gzipped ark, pipe or opened file descriptor.\n\n Read ark to a 'dictionary':\n d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }\n "
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd)
(yield (key, ali))
key = read_key(fd)
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_vec_int(file_or_fd):
' [int-vec] = read_vec_int(file_or_fd)\n Read kaldi integer vector, ascii or binary input,\n '
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if (binary == '\x00B'):
assert (fd.read(1).decode() == '\x04')
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0]
vec = np.frombuffer(fd.read((vec_size * 5)), dtype=[('size', 'int8'), ('value', 'int32')], count=vec_size)
assert (vec[0]['size'] == 4)
ans = vec[:]['value']
else:
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('[')
arr.remove(']')
except ValueError:
pass
ans = np.array(arr, dtype=int)
if (fd is not file_or_fd):
fd.close()
return ans
|
def write_vec_int(file_or_fd, v, key=''):
" write_vec_int(f, v, key='')\n Write a binary kaldi integer vector to filename or stream.\n Arguments:\n file_or_fd : filename or opened file descriptor for writing,\n v : the vector to be stored,\n key (optional) : used for writing ark-file, the utterance-id gets written before the vector.\n\n Example of writing single vector:\n kaldi_io.write_vec_int(filename, vec)\n\n Example of writing arkfile:\n with open(ark_file,'w') as f:\n for key,vec in dict.iteritems():\n kaldi_io.write_vec_flt(f, vec, key=key)\n "
fd = open_or_fd(file_or_fd, mode='wb')
if (sys.version_info[0] == 3):
assert (fd.mode == 'wb')
try:
if (key != ''):
fd.write((key + ' ').encode())
fd.write('\x00B'.encode())
fd.write('\x04'.encode())
fd.write(struct.pack(np.dtype('int32').char, v.shape[0]))
for i in range(len(v)):
fd.write('\x04'.encode())
fd.write(struct.pack(np.dtype('int32').char, v[i]))
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_vec_flt_scp(file_or_fd):
" generator(key,mat) = read_vec_flt_scp(file_or_fd)\n Returns generator of (key,vector) tuples, read according to kaldi scp.\n file_or_fd : scp, gzipped scp, pipe or opened file descriptor.\n\n Iterate the scp:\n for key,vec in kaldi_io.read_vec_flt_scp(file):\n ...\n\n Read scp to a 'dictionary':\n d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }\n "
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key, rxfile) = line.decode().split(' ')
vec = read_vec_flt(rxfile)
(yield (key, vec))
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_vec_flt_ark(file_or_fd):
" generator(key,vec) = read_vec_flt_ark(file_or_fd)\n Create generator of (key,vector<float>) tuples, reading from an ark file/stream.\n file_or_fd : ark, gzipped ark, pipe or opened file descriptor.\n\n Read ark to a 'dictionary':\n d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }\n "
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd)
(yield (key, ali))
key = read_key(fd)
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_vec_flt(file_or_fd):
' [flt-vec] = read_vec_flt(file_or_fd)\n Read kaldi float vector, ascii or binary input,\n '
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if (binary == '\x00B'):
header = fd.read(3).decode()
if (header == 'FV '):
sample_size = 4
elif (header == 'DV '):
sample_size = 8
else:
raise UnknownVectorHeader(("The header contained '%s'" % header))
assert (sample_size > 0)
assert (fd.read(1).decode() == '\x04')
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0]
buf = fd.read((vec_size * sample_size))
if (sample_size == 4):
ans = np.frombuffer(buf, dtype='float32')
elif (sample_size == 8):
ans = np.frombuffer(buf, dtype='float64')
else:
raise BadSampleSize
return ans
else:
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('[')
arr.remove(']')
except ValueError:
pass
ans = np.array(arr, dtype=float)
if (fd is not file_or_fd):
fd.close()
return ans
|
def write_vec_flt(file_or_fd, v, key=''):
" write_vec_flt(f, v, key='')\n Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.\n Arguments:\n file_or_fd : filename or opened file descriptor for writing,\n v : the vector to be stored,\n key (optional) : used for writing ark-file, the utterance-id gets written before the vector.\n\n Example of writing single vector:\n kaldi_io.write_vec_flt(filename, vec)\n\n Example of writing arkfile:\n with open(ark_file,'w') as f:\n for key,vec in dict.iteritems():\n kaldi_io.write_vec_flt(f, vec, key=key)\n "
fd = open_or_fd(file_or_fd, mode='wb')
if (sys.version_info[0] == 3):
assert (fd.mode == 'wb')
try:
if (key != ''):
fd.write((key + ' ').encode())
fd.write('\x00B'.encode())
if (v.dtype == 'float32'):
fd.write('FV '.encode())
elif (v.dtype == 'float64'):
fd.write('DV '.encode())
else:
raise UnsupportedDataType(("'%s', please use 'float32' or 'float64'" % v.dtype))
fd.write('\x04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0]))
fd.write(v.tobytes())
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_mat_scp(file_or_fd):
" generator(key,mat) = read_mat_scp(file_or_fd)\n Returns generator of (key,matrix) tuples, read according to kaldi scp.\n file_or_fd : scp, gzipped scp, pipe or opened file descriptor.\n\n Iterate the scp:\n for key,mat in kaldi_io.read_mat_scp(file):\n ...\n\n Read scp to a 'dictionary':\n d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }\n "
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key, rxfile) = line.decode().split(' ')
mat = read_mat(rxfile)
(yield (key, mat))
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_mat_ark(file_or_fd):
" generator(key,mat) = read_mat_ark(file_or_fd)\n Returns generator of (key,matrix) tuples, read from ark file/stream.\n file_or_fd : scp, gzipped scp, pipe or opened file descriptor.\n\n Iterate the ark:\n for key,mat in kaldi_io.read_mat_ark(file):\n ...\n\n Read ark to a 'dictionary':\n d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }\n "
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
mat = read_mat(fd)
(yield (key, mat))
key = read_key(fd)
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_mat(file_or_fd):
' [mat] = read_mat(file_or_fd)\n Reads single kaldi matrix, supports ascii and binary.\n file_or_fd : file, gzipped file, pipe or opened file descriptor.\n '
fd = open_or_fd(file_or_fd)
try:
binary = fd.read(2).decode()
if (binary == '\x00B'):
mat = _read_mat_binary(fd)
else:
assert (binary == ' [')
mat = _read_mat_ascii(fd)
finally:
if (fd is not file_or_fd):
fd.close()
return mat
|
def _read_mat_binary(fd):
header = fd.read(3).decode()
if header.startswith('CM'):
return _read_compressed_mat(fd, header)
elif (header == 'FM '):
sample_size = 4
elif (header == 'DM '):
sample_size = 8
else:
raise UnknownMatrixHeader(("The header contained '%s'" % header))
assert (sample_size > 0)
(s1, rows, s2, cols) = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
buf = fd.read(((rows * cols) * sample_size))
if (sample_size == 4):
vec = np.frombuffer(buf, dtype='float32')
elif (sample_size == 8):
vec = np.frombuffer(buf, dtype='float64')
else:
raise BadSampleSize
mat = np.reshape(vec, (rows, cols))
return mat
|
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if (len(line) == 0):
raise BadInputFormat
if (len(line.strip()) == 0):
continue
arr = line.strip().split()
if (arr[(- 1)] != ']'):
rows.append(np.array(arr, dtype='float32'))
else:
rows.append(np.array(arr[:(- 1)], dtype='float32'))
mat = np.vstack(rows)
return mat
|
def _read_compressed_mat(fd, format):
' Read a compressed matrix,\n see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h\n methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),\n '
assert (format == 'CM ')
global_header = np.dtype([('minvalue', 'float32'), ('range', 'float32'), ('num_rows', 'int32'), ('num_cols', 'int32')])
per_col_header = np.dtype([('percentile_0', 'uint16'), ('percentile_25', 'uint16'), ('percentile_75', 'uint16'), ('percentile_100', 'uint16')])
def uint16_to_float(value, min, range):
return np.float32((min + ((range * 1.52590218966964e-05) * value)))
def uint8_to_float_v2(vec, p0, p25, p75, p100):
mask_0_64 = (vec <= 64)
mask_65_192 = np.all([(vec > 64), (vec <= 192)], axis=0)
mask_193_255 = (vec > 192)
ans = np.empty(len(vec), dtype='float32')
ans[mask_0_64] = (p0 + (((p25 - p0) / 64.0) * vec[mask_0_64]))
ans[mask_65_192] = (p25 + (((p75 - p25) / 128.0) * (vec[mask_65_192] - 64)))
ans[mask_193_255] = (p75 + (((p100 - p75) / 63.0) * (vec[mask_193_255] - 192)))
return ans
(globmin, globrange, rows, cols) = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
col_headers = np.frombuffer(fd.read((cols * 8)), dtype=per_col_header, count=cols)
data = np.reshape(np.frombuffer(fd.read((cols * rows)), dtype='uint8', count=(cols * rows)), newshape=(cols, rows))
mat = np.empty((cols, rows), dtype='float32')
for (i, col_header) in enumerate(col_headers):
col_header_flt = [uint16_to_float(percentile, globmin, globrange) for percentile in col_header]
mat[i] = uint8_to_float_v2(data[i], *col_header_flt)
return mat.T
|
def write_mat(file_or_fd, m, key=''):
" write_mat(f, m, key='')\n Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.\n Arguments:\n file_or_fd : filename of opened file descriptor for writing,\n m : the matrix to be stored,\n key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.\n\n Example of writing single matrix:\n kaldi_io.write_mat(filename, mat)\n\n Example of writing arkfile:\n with open(ark_file,'w') as f:\n for key,mat in dict.iteritems():\n kaldi_io.write_mat(f, mat, key=key)\n "
fd = open_or_fd(file_or_fd, mode='wb')
if (sys.version_info[0] == 3):
assert (fd.mode == 'wb')
try:
if (key != ''):
fd.write((key + ' ').encode())
fd.write('\x00B'.encode())
if (m.dtype == 'float32'):
fd.write('FM '.encode())
elif (m.dtype == 'float64'):
fd.write('DM '.encode())
else:
raise UnsupportedDataType(("'%s', please use 'float32' or 'float64'" % m.dtype))
fd.write('\x04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[0]))
fd.write('\x04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[1]))
fd.write(m.tobytes())
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_cnet_ark(file_or_fd):
" Alias of function 'read_post_ark()', 'cnet' = confusion network "
return read_post_ark(file_or_fd)
|
def read_post_ark(file_or_fd):
" generator(key,vec<vec<int,float>>) = read_post_ark(file)\n Returns generator of (key,posterior) tuples, read from ark file.\n file_or_fd : ark, gzipped ark, pipe or opened file descriptor.\n\n Iterate the ark:\n for key,post in kaldi_io.read_post_ark(file):\n ...\n\n Read ark to a 'dictionary':\n d = { key:post for key,post in kaldi_io.read_post_ark(file) }\n "
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
post = read_post(fd)
(yield (key, post))
key = read_key(fd)
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_post(file_or_fd):
" [post] = read_post(file_or_fd)\n Reads single kaldi 'Posterior' in binary format.\n\n The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',\n the outer-vector is usually time axis, inner-vector are the records\n at given time, and the tuple is composed of an 'index' (integer)\n and a 'float-value'. The 'float-value' can represent a probability\n or any other numeric value.\n\n Returns vector of vectors of tuples.\n "
fd = open_or_fd(file_or_fd)
ans = []
binary = fd.read(2).decode()
assert (binary == '\x00B')
assert (fd.read(1).decode() == '\x04')
outer_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0]
for i in range(outer_vec_size):
assert (fd.read(1).decode() == '\x04')
inner_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0]
data = np.frombuffer(fd.read((inner_vec_size * 10)), dtype=[('size_idx', 'int8'), ('idx', 'int32'), ('size_post', 'int8'), ('post', 'float32')], count=inner_vec_size)
assert (data[0]['size_idx'] == 4)
assert (data[0]['size_post'] == 4)
ans.append(data[['idx', 'post']].tolist())
if (fd is not file_or_fd):
fd.close()
return ans
|
def read_cntime_ark(file_or_fd):
" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)\n Returns generator of (key,cntime) tuples, read from ark file.\n file_or_fd : file, gzipped file, pipe or opened file descriptor.\n\n Iterate the ark:\n for key,time in kaldi_io.read_cntime_ark(file):\n ...\n\n Read ark to a 'dictionary':\n d = { key:time for key,time in kaldi_io.read_post_ark(file) }\n "
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
(yield (key, cntime))
key = read_key(fd)
finally:
if (fd is not file_or_fd):
fd.close()
|
def read_cntime(file_or_fd):
" [cntime] = read_cntime(file_or_fd)\n Reads single kaldi 'Confusion Network time info', in binary format:\n C++ type: vector<tuple<float,float> >.\n (begin/end times of bins at the confusion network).\n\n Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'\n\n file_or_fd : file, gzipped file, pipe or opened file descriptor.\n\n Returns vector of tuples.\n "
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
assert (binary == '\x00B')
assert (fd.read(1).decode() == '\x04')
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0]
data = np.frombuffer(fd.read((vec_size * 10)), dtype=[('size_beg', 'int8'), ('t_beg', 'float32'), ('size_end', 'int8'), ('t_end', 'float32')], count=vec_size)
assert (data[0]['size_beg'] == 4)
assert (data[0]['size_end'] == 4)
ans = data[['t_beg', 't_end']].tolist()
if (fd is not file_or_fd):
fd.close()
return ans
|
def read_segments_as_bool_vec(segments_file):
" [ bool_vec ] = read_segments_as_bool_vec(segments_file)\n using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'\n - t-beg, t-end is in seconds,\n - assumed 100 frames/second,\n "
segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
assert (len(segs) > 0)
assert (len(np.unique([rec[1] for rec in segs])) == 1)
start = np.rint([(100 * rec[2]) for rec in segs]).astype(int)
end = np.rint([(100 * rec[3]) for rec in segs]).astype(int)
frms = np.repeat(np.r_[(np.tile([False, True], len(end)), False)], np.r_[(np.c_[((start - np.r_[(0, end[:(- 1)])]), (end - start))].flat, 0)])
assert (np.sum((end - start)) == np.sum(frms))
return frms
|
def setup_logs(save_dir, run_name):
logger = logging.getLogger('cdc')
logger.setLevel(logging.INFO)
log_file = os.path.join(save_dir, (run_name + '.log'))
fh = logging.FileHandler(log_file)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
|
class ScheduledOptim(object):
'A simple wrapper class for learning rate scheduling'
def __init__(self, optimizer, n_warmup_steps):
self.optimizer = optimizer
self.d_model = 128
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = 0
self.delta = 1
def state_dict(self):
self.optimizer.state_dict()
def step(self):
'Step by the inner optimizer'
self.optimizer.step()
def zero_grad(self):
'Zero out the gradients by the inner optimizer'
self.optimizer.zero_grad()
def increase_delta(self):
self.delta *= 2
def update_learning_rate(self):
'Learning rate scheduling per step'
self.n_current_steps += self.delta
new_lr = (np.power(self.d_model, (- 0.5)) * np.min([np.power(self.n_current_steps, (- 0.5)), (np.power(self.n_warmup_steps, (- 1.5)) * self.n_current_steps)]))
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
return new_lr
|
def prediction_spk(args, cdc_model, spk_model, device, data_loader, batch_size, frame_window):
logger.info('Starting Evaluation')
cdc_model.eval()
spk_model.eval()
total_loss = 0
total_acc = 0
with torch.no_grad():
for [data, target] in data_loader:
data = data.float().unsqueeze(1).to(device)
target = target.to(device)
hidden = cdc_model.init_hidden(len(data))
(output, hidden) = cdc_model.predict(data, hidden)
data = output.contiguous().view(((- 1), 256))
target = target.view(((- 1),))
output = spk_model.forward(data)
total_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
total_acc += pred.eq(target.view_as(pred)).sum().item()
total_loss /= (len(data_loader.dataset) * frame_window)
total_acc /= ((1.0 * len(data_loader.dataset)) * frame_window)
logger.info('===> Final predictions done. Here is a snippet')
logger.info('===> Evaluation set: Average loss: {:.4f}\tAccuracy: {:.4f}\n'.format(total_loss, total_acc))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.