code stringlengths 17 6.64M |
|---|
class CustomDataset(Dataset):
def __init__(self, eval_list_file, **kwargs):
super(CustomDataset, self).__init__()
X = []
if os.path.isfile(eval_list_file):
print('[Dataset] Reading custom eval list file: {}'.format(eval_list_file))
X = open(eval_list_file, 'r').read().splitlines()
else:
raise ValueError('[Dataset] eval list file does not exist: {}'.format(eval_list_file))
print(('[Dataset] - number of data for custom test: ' + str(len(X))))
self.X = X
def _load_wav(self, wav_path, fs):
(wav, sr) = librosa.load(wav_path, sr=fs)
return (wav, sr)
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_path = self.X[index]
(wav_original, fs_original) = self._load_wav(wav_path, fs=None)
(wav_resample, fs_resample) = self._load_wav(wav_path, fs=FS)
return (wav_resample, wav_original, wav_path)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)]
wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
wav_paths = [sorted_batch[i][2] for i in range(bs)]
return (wavs, wavs_2, None, None, None, wav_paths)
|
def get_basename(path):
return os.path.splitext(os.path.split(path)[(- 1)])[0]
|
def get_number(basename):
if ('_' in basename):
return basename.split('_')[1]
else:
return basename
|
def _calculate_asv_score(model, file_list, gt_root, trgspk, threshold):
results = {}
for (i, cvt_wav_path) in enumerate(tqdm(file_list)):
basename = get_basename(cvt_wav_path)
number = get_number(basename)
gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav'))
results[basename] = calculate_accept(cvt_wav_path, gt_wav_path, model, threshold)
return (results, (100.0 * float(np.mean(np.array(list(results.values()))))))
|
def _calculate_asr_score(model, device, file_list, groundtruths):
keys = ['hits', 'substitutions', 'deletions', 'insertions']
ers = {}
c_results = {k: 0 for k in keys}
w_results = {k: 0 for k in keys}
for (i, cvt_wav_path) in enumerate(tqdm(file_list)):
basename = get_basename(cvt_wav_path)
number = get_number(basename)
groundtruth = groundtruths[number[1:]]
(wav, _) = librosa.load(cvt_wav_path, sr=16000)
transcription = transcribe(model, device, wav)
(c_result, w_result, norm_groundtruth, norm_transcription) = calculate_measures(groundtruth, transcription)
ers[basename] = [(c_result['cer'] * 100.0), (w_result['wer'] * 100.0), norm_transcription, norm_groundtruth]
for k in keys:
c_results[k] += c_result[k]
w_results[k] += w_result[k]
def er(r):
return ((float(((r['substitutions'] + r['deletions']) + r['insertions'])) / float(((r['substitutions'] + r['deletions']) + r['hits']))) * 100.0)
cer = er(c_results)
wer = er(w_results)
return (ers, cer, wer)
|
def _calculate_mcd_f0(file_list, gt_root, trgspk, f0min, f0max, results):
for (i, cvt_wav_path) in enumerate(file_list):
basename = get_basename(cvt_wav_path)
number = get_number(basename)
gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav'))
(cvt_wav, cvt_fs) = librosa.load(cvt_wav_path, sr=None)
(gt_wav, gt_fs) = librosa.load(gt_wav_path, sr=None)
assert (cvt_fs == gt_fs)
(mcd, f0rmse, f0corr, ddur) = calculate_mcd_f0(cvt_wav, gt_wav, gt_fs, f0min, f0max)
results.append([basename, mcd, f0rmse, f0corr, ddur])
|
def get_parser():
parser = argparse.ArgumentParser(description='objective evaluation script.')
parser.add_argument('--wavdir', required=True, type=str, help='directory for converted waveforms')
parser.add_argument('--trgspk', required=True, type=str, help='target speaker')
parser.add_argument('--data_root', type=str, default='./data', help='directory of data')
parser.add_argument('--log_path', type=str, default=None, help='path of output log. If not specified, output to <wavdir>/obj.log')
parser.add_argument('--n_jobs', default=10, type=int, help='number of parallel jobs')
return parser
|
def main():
args = get_parser().parse_args()
trgspk = args.trgspk
task = ('task1' if (trgspk[1] == 'E') else 'task2')
gt_root = os.path.join(args.data_root, 'vcc2020')
f0_path = os.path.join(args.data_root, 'f0.yaml')
threshold_path = os.path.join(args.data_root, 'thresholds.yaml')
transcription_path = os.path.join(args.data_root, 'vcc2020', 'prompts', 'Eng_transcriptions.txt')
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
with open(f0_path, 'r') as f:
f0_all = yaml.load(f, Loader=yaml.FullLoader)
f0min = f0_all[trgspk]['f0min']
f0max = f0_all[trgspk]['f0max']
with open(transcription_path, 'r') as f:
lines = f.read().splitlines()
groundtruths = {line.split(' ')[0]: ' '.join(line.split(' ')[1:]) for line in lines}
converted_files = sorted(find_files(args.wavdir, query='*300??*.wav'))
print('number of utterances = {}'.format(len(converted_files)))
threshold = None
threshold_all = {}
if os.path.exists(threshold_path):
with open(threshold_path, 'r') as f:
threshold_all = yaml.load(f, Loader=yaml.FullLoader)
if (threshold_all and (task in threshold_all)):
(equal_error_rate, threshold) = threshold_all[task]
if (not threshold):
(equal_error_rate, threshold) = calculate_threshold(gt_root, task, device)
if threshold_all:
threshold_all[task] = [equal_error_rate, threshold]
else:
threshold_all = {task: [equal_error_rate, threshold]}
with open(threshold_path, 'w') as f:
yaml.safe_dump(threshold_all, f)
print(f'[INFO]: Equal error rate: {equal_error_rate}')
print(f'[INFO]: Threshold: {threshold}')
print('Calculating ASV-based score...')
asv_model = load_asv_model(device)
(accept_results, accept_rate) = _calculate_asv_score(asv_model, converted_files, gt_root, trgspk, threshold)
print('Calculating ASR-based score...')
asr_model = load_asr_model(device)
(ers, cer, wer) = _calculate_asr_score(asr_model, device, converted_files, groundtruths)
if (task == 'task1'):
print('Calculating MCD and f0-related scores...')
file_lists = np.array_split(converted_files, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
with mp.Manager() as manager:
results = manager.list()
processes = []
for f in file_lists:
p = mp.Process(target=_calculate_mcd_f0, args=(f, gt_root, trgspk, f0min, f0max, results))
p.start()
processes.append(p)
for p in processes:
p.join()
results = sorted(results, key=(lambda x: x[0]))
results = [((result + ers[result[0]]) + [accept_results[result[0]]]) for result in results]
else:
results = []
for f in converted_files:
basename = get_basename(f)
results.append((([basename] + ers[basename]) + [accept_results[basename]]))
log_path = (args.log_path if args.log_path else os.path.join(args.wavdir, 'obj.log'))
with open(log_path, 'w') as f:
if (task == 'task1'):
mMCD = np.mean(np.array([result[1] for result in results]))
mf0RMSE = np.mean(np.array([result[2] for result in results]))
mf0CORR = np.mean(np.array([result[3] for result in results]))
mDDUR = np.mean(np.array([result[4] for result in results]))
mCER = cer
mWER = wer
mACCEPT = accept_rate
for result in results:
if (task == 'task1'):
f.write('{} {:.2f} {:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result))
elif (task == 'task2'):
f.write('{} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result))
if (task == 'task1'):
print('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT))
f.write('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT))
elif (task == 'task2'):
print('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT))
f.write('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT))
|
def get_parser():
parser = argparse.ArgumentParser(description='Extract results.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--upstream', type=str, required=True, help='upstream')
parser.add_argument('--task', type=str, required=True, choices=['task1', 'task2'], help='task')
parser.add_argument('--tag', type=str, required=True, help='tag')
parser.add_argument('--vocoder', type=str, required=True, help='vocoder name')
parser.add_argument('--expdir', type=str, default='result/downstream', help='expdir')
parser.add_argument('--start_epoch', default=4000, type=int)
parser.add_argument('--end_epoch', default=10000, type=int)
parser.add_argument('--step_epoch', default=1000, type=int)
parser.add_argument('--out', '-O', type=str, help='The output filename. If omitted, then output to sys.stdout')
return parser
|
def grep(filepath, query):
lines = []
with open(filepath, 'r') as f:
for line in f:
if (query in line):
lines.append(line.rstrip())
return lines
|
def encoder_init(m):
'Initialize encoder parameters.'
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain('relu'))
|
class Taco2Encoder(torch.nn.Module):
'Encoder module of the Tacotron2 TTS model.\n\n Reference:\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n '
def __init__(self, idim, elayers=1, eunits=512, econv_layers=3, econv_chans=512, econv_filts=5, use_batch_norm=True, use_residual=False, dropout_rate=0.5):
'Initialize Tacotron2 encoder module.\n\n Args:\n idim (int) Dimension of the inputs.\n elayers (int, optional) The number of encoder blstm layers.\n eunits (int, optional) The number of encoder blstm units.\n econv_layers (int, optional) The number of encoder conv layers.\n econv_filts (int, optional) The number of encoder conv filter size.\n econv_chans (int, optional) The number of encoder conv filter channels.\n use_batch_norm (bool, optional) Whether to use batch normalization.\n use_residual (bool, optional) Whether to use residual connection.\n dropout_rate (float, optional) Dropout rate.\n\n '
super(Taco2Encoder, self).__init__()
self.idim = idim
self.use_residual = use_residual
self.input_layer = torch.nn.Linear(idim, econv_chans)
if (econv_layers > 0):
self.convs = torch.nn.ModuleList()
for layer in range(econv_layers):
ichans = econv_chans
if use_batch_norm:
self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.BatchNorm1d(econv_chans), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))]
else:
self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))]
else:
self.convs = None
if (elayers > 0):
iunits = (econv_chans if (econv_layers != 0) else embed_dim)
self.blstm = torch.nn.LSTM(iunits, (eunits // 2), elayers, batch_first=True, bidirectional=True)
else:
self.blstm = None
self.apply(encoder_init)
def forward(self, xs, ilens=None):
'Calculate forward propagation.\n Args:\n xs (Tensor): Batch of the padded acoustic feature sequence (B, Lmax, idim)\n '
xs = self.input_layer(xs).transpose(1, 2)
if (self.convs is not None):
for i in range(len(self.convs)):
if self.use_residual:
xs += self.convs[i](xs)
else:
xs = self.convs[i](xs)
if (self.blstm is None):
return xs.transpose(1, 2)
if (not isinstance(ilens, torch.Tensor)):
ilens = torch.tensor(ilens)
xs = pack_padded_sequence(xs.transpose(1, 2), ilens.cpu(), batch_first=True)
self.blstm.flatten_parameters()
(xs, _) = self.blstm(xs)
(xs, hlens) = pad_packed_sequence(xs, batch_first=True)
return (xs, hlens)
|
class Taco2Prenet(torch.nn.Module):
'Prenet module for decoder of Tacotron2.\n\n The Prenet preforms nonlinear conversion\n of inputs before input to auto-regressive lstm,\n which helps alleviate the exposure bias problem.\n\n Note:\n This module alway applies dropout even in evaluation.\n See the detail in `Natural TTS Synthesis by\n Conditioning WaveNet on Mel Spectrogram Predictions`_.\n\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n '
def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5):
super(Taco2Prenet, self).__init__()
self.dropout_rate = dropout_rate
self.prenet = torch.nn.ModuleList()
for layer in range(n_layers):
n_inputs = (idim if (layer == 0) else n_units)
self.prenet += [torch.nn.Sequential(torch.nn.Linear(n_inputs, n_units), torch.nn.ReLU())]
def forward(self, x):
if (len(self.prenet) == 0):
return F.dropout(x, self.dropout_rate)
for i in range(len(self.prenet)):
x = F.dropout(self.prenet[i](x), self.dropout_rate)
return x
|
class RNNLayer(nn.Module):
' RNN wrapper, includes time-downsampling'
def __init__(self, input_dim, module, bidirection, dim, dropout, layer_norm, sample_rate, proj):
super(RNNLayer, self).__init__()
rnn_out_dim = ((2 * dim) if bidirection else dim)
self.out_dim = rnn_out_dim
self.dropout = dropout
self.layer_norm = layer_norm
self.sample_rate = sample_rate
self.proj = proj
self.layer = getattr(nn, module.upper())(input_dim, dim, bidirectional=bidirection, num_layers=1, batch_first=True)
if self.layer_norm:
self.ln = nn.LayerNorm(rnn_out_dim)
if (self.dropout > 0):
self.dp = nn.Dropout(p=dropout)
if self.proj:
self.pj = nn.Linear(rnn_out_dim, rnn_out_dim)
def forward(self, input_x, x_len):
if (not self.training):
self.layer.flatten_parameters()
input_x = pack_padded_sequence(input_x, x_len, batch_first=True, enforce_sorted=False)
(output, _) = self.layer(input_x)
(output, x_len) = pad_packed_sequence(output, batch_first=True)
if self.layer_norm:
output = self.ln(output)
if (self.dropout > 0):
output = self.dp(output)
if (self.sample_rate > 1):
(output, x_len) = downsample(output, x_len, self.sample_rate, 'drop')
if self.proj:
output = torch.tanh(self.pj(output))
return (output, x_len)
|
class RNNCell(nn.Module):
' RNN cell wrapper'
def __init__(self, input_dim, module, dim, dropout, layer_norm, proj):
super(RNNCell, self).__init__()
rnn_out_dim = dim
self.out_dim = rnn_out_dim
self.dropout = dropout
self.layer_norm = layer_norm
self.proj = proj
self.cell = getattr(nn, (module.upper() + 'Cell'))(input_dim, dim)
if self.layer_norm:
self.ln = nn.LayerNorm(rnn_out_dim)
if (self.dropout > 0):
self.dp = nn.Dropout(p=dropout)
if self.proj:
self.pj = nn.Linear(rnn_out_dim, rnn_out_dim)
def forward(self, input_x, z, c):
(new_z, new_c) = self.cell(input_x, (z, c))
if self.layer_norm:
new_z = self.ln(new_z)
if (self.dropout > 0):
new_z = self.dp(new_z)
if self.proj:
new_z = torch.tanh(self.pj(new_z))
return (new_z, new_c)
|
class Model(nn.Module):
def __init__(self, input_dim, output_dim, resample_ratio, stats, ar, encoder_type, hidden_dim, lstmp_layers, lstmp_dropout_rate, lstmp_proj_dim, lstmp_layernorm, prenet_layers=2, prenet_dim=256, prenet_dropout_rate=0.5, **kwargs):
super(Model, self).__init__()
self.ar = ar
self.encoder_type = encoder_type
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.resample_ratio = resample_ratio
self.register_buffer('target_mean', torch.from_numpy(stats.mean_).float())
self.register_buffer('target_scale', torch.from_numpy(stats.scale_).float())
if (encoder_type == 'taco2'):
self.encoder = Taco2Encoder(input_dim, eunits=hidden_dim)
elif (encoder_type == 'ffn'):
self.encoder = torch.nn.Sequential(torch.nn.Linear(input_dim, hidden_dim), torch.nn.ReLU())
else:
raise ValueError('Encoder type not supported.')
self.prenet = Taco2Prenet(idim=output_dim, n_layers=prenet_layers, n_units=prenet_dim, dropout_rate=prenet_dropout_rate)
self.lstmps = nn.ModuleList()
for i in range(lstmp_layers):
if ar:
prev_dim = (output_dim if (prenet_layers == 0) else prenet_dim)
rnn_input_dim = ((hidden_dim + prev_dim) if (i == 0) else hidden_dim)
rnn_layer = RNNCell(rnn_input_dim, 'LSTM', hidden_dim, lstmp_dropout_rate, lstmp_layernorm, proj=True)
else:
rnn_input_dim = hidden_dim
rnn_layer = RNNLayer(rnn_input_dim, 'LSTM', False, hidden_dim, lstmp_dropout_rate, lstmp_layernorm, sample_rate=1, proj=True)
self.lstmps.append(rnn_layer)
self.proj = torch.nn.Linear(hidden_dim, output_dim)
def normalize(self, x):
return ((x - self.target_mean) / self.target_scale)
def forward(self, features, lens, targets=None):
'Calculate forward propagation.\n Args:\n features: Batch of the sequences of input features (B, Lmax, idim).\n targets: Batch of the sequences of padded target features (B, Lmax, odim).\n '
B = features.shape[0]
features = features.permute(0, 2, 1)
resampled_features = F.interpolate(features, scale_factor=self.resample_ratio)
resampled_features = resampled_features.permute(0, 2, 1)
lens = (lens * self.resample_ratio)
if (self.encoder_type == 'taco2'):
(encoder_states, lens) = self.encoder(resampled_features, lens)
elif (self.encoder_type == 'ffn'):
encoder_states = self.encoder(resampled_features)
if self.ar:
if (targets is not None):
targets = targets.transpose(0, 1)
predicted_list = []
c_list = [encoder_states.new_zeros(B, self.hidden_dim)]
z_list = [encoder_states.new_zeros(B, self.hidden_dim)]
for _ in range(1, len(self.lstmps)):
c_list += [encoder_states.new_zeros(B, self.hidden_dim)]
z_list += [encoder_states.new_zeros(B, self.hidden_dim)]
prev_out = encoder_states.new_zeros(B, self.output_dim)
for (t, encoder_state) in enumerate(encoder_states.transpose(0, 1)):
concat = torch.cat([encoder_state, self.prenet(prev_out)], dim=1)
for (i, lstmp) in enumerate(self.lstmps):
lstmp_input = (concat if (i == 0) else z_list[(i - 1)])
(z_list[i], c_list[i]) = lstmp(lstmp_input, z_list[i], c_list[i])
predicted_list += [self.proj(z_list[(- 1)]).view(B, self.output_dim, (- 1))]
prev_out = (targets[t] if (targets is not None) else predicted_list[(- 1)].squeeze((- 1)))
prev_out = self.normalize(prev_out)
predicted = torch.cat(predicted_list, dim=2)
predicted = predicted.transpose(1, 2)
else:
predicted = encoder_states
for (i, lstmp) in enumerate(self.lstmps):
(predicted, lens) = lstmp(predicted, lens)
predicted = self.proj(predicted)
return (predicted, lens)
|
def low_cut_filter(x, fs, cutoff=70):
'FUNCTION TO APPLY LOW CUT FILTER\n\n Args:\n x (ndarray): Waveform sequence\n fs (int): Sampling frequency\n cutoff (float): Cutoff frequency of low cut filter\n\n Return:\n (ndarray): Low cut filtered waveform sequence\n '
nyquist = (fs // 2)
norm_cutoff = (cutoff / nyquist)
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
|
def spc2npow(spectrogram):
'Calculate normalized power sequence from spectrogram\n\n Parameters\n ----------\n spectrogram : array, shape (T, `fftlen / 2 + 1`)\n Array of spectrum envelope\n\n Return\n ------\n npow : array, shape (`T`, `1`)\n Normalized power sequence\n\n '
npow = np.apply_along_axis(_spvec2pow, 1, spectrogram)
meanpow = np.mean(npow)
npow = (10.0 * np.log10((npow / meanpow)))
return npow
|
def _spvec2pow(specvec):
'Convert a spectrum envelope into a power\n\n Parameters\n ----------\n specvec : vector, shape (`fftlen / 2 + 1`)\n Vector of specturm envelope |H(w)|^2\n\n Return\n ------\n power : scala,\n Power of a frame\n\n '
fftl2 = (len(specvec) - 1)
fftl = (fftl2 * 2)
power = (specvec[0] + specvec[fftl2])
for k in range(1, fftl2):
power += (2.0 * specvec[k])
power /= fftl
return power
|
def extfrm(data, npow, power_threshold=(- 20)):
'Extract frame over the power threshold\n\n Parameters\n ----------\n data: array, shape (`T`, `dim`)\n Array of input data\n npow : array, shape (`T`)\n Vector of normalized power sequence.\n power_threshold : float, optional\n Value of power threshold [dB]\n Default set to -20\n\n Returns\n -------\n data: array, shape (`T_ext`, `dim`)\n Remaining data after extracting frame\n `T_ext` <= `T`\n\n '
T = data.shape[0]
if (T != len(npow)):
raise 'Length of two vectors is different.'
valid_index = np.where((npow > power_threshold))
extdata = data[valid_index]
assert (extdata.shape[0] <= T)
return extdata
|
def world_extract(x, fs, f0min, f0max):
x = (x * np.iinfo(np.int16).max)
x = np.array(x, dtype=np.float64)
x = low_cut_filter(x, fs)
(f0, time_axis) = pw.harvest(x, fs, f0_floor=f0min, f0_ceil=f0max, frame_period=MCEP_SHIFT)
sp = pw.cheaptrick(x, f0, time_axis, fs, fft_size=MCEP_FFTL)
ap = pw.d4c(x, f0, time_axis, fs, fft_size=MCEP_FFTL)
mcep = pysptk.sp2mc(sp, MCEP_DIM, MCEP_ALPHA)
npow = spc2npow(sp)
return {'sp': sp, 'mcep': mcep, 'ap': ap, 'f0': f0, 'npow': npow}
|
def calculate_mcd_f0(x, y, fs, f0min, f0max):
'\n x and y must be in range [-1, 1]\n '
gt_feats = world_extract(x, fs, f0min, f0max)
cvt_feats = world_extract(y, fs, f0min, f0max)
gt_mcep_nonsil_pow = extfrm(gt_feats['mcep'], gt_feats['npow'])
cvt_mcep_nonsil_pow = extfrm(cvt_feats['mcep'], cvt_feats['npow'])
(_, path) = fastdtw(cvt_mcep_nonsil_pow, gt_mcep_nonsil_pow, dist=scipy.spatial.distance.euclidean)
twf_pow = np.array(path).T
cvt_mcep_dtw_pow = cvt_mcep_nonsil_pow[twf_pow[0]]
gt_mcep_dtw_pow = gt_mcep_nonsil_pow[twf_pow[1]]
diff2sum = np.sum(((cvt_mcep_dtw_pow - gt_mcep_dtw_pow) ** 2), 1)
mcd = np.mean(((10.0 / np.log(10.0)) * np.sqrt((2 * diff2sum))), 0)
gt_nonsil_f0_idx = np.where((gt_feats['f0'] > 0))[0]
cvt_nonsil_f0_idx = np.where((cvt_feats['f0'] > 0))[0]
try:
gt_mcep_nonsil_f0 = gt_feats['mcep'][gt_nonsil_f0_idx]
cvt_mcep_nonsil_f0 = cvt_feats['mcep'][cvt_nonsil_f0_idx]
(_, path) = fastdtw(cvt_mcep_nonsil_f0, gt_mcep_nonsil_f0, dist=scipy.spatial.distance.euclidean)
twf_f0 = np.array(path).T
cvt_f0_dtw = cvt_feats['f0'][cvt_nonsil_f0_idx][twf_f0[0]]
gt_f0_dtw = gt_feats['f0'][gt_nonsil_f0_idx][twf_f0[1]]
f0rmse = np.sqrt(np.mean(((cvt_f0_dtw - gt_f0_dtw) ** 2)))
f0corr = scipy.stats.pearsonr(cvt_f0_dtw, gt_f0_dtw)[0]
except ValueError:
logging.warning('No nonzero f0 is found. Skip f0rmse f0corr computation and set them to NaN. This might due to unconverge training. Please tune the training time and hypers.')
f0rmse = np.nan
f0corr = np.nan
(x_trim, _) = librosa.effects.trim(y=x)
(y_trim, _) = librosa.effects.trim(y=y)
ddur = float((abs((len(x_trim) - len(y_trim))) / fs))
return (mcd, f0rmse, f0corr, ddur)
|
def load_asr_model(device):
'Load model'
print(f'[INFO]: Load the pre-trained ASR by {ASR_PRETRAINED_MODEL}.')
model = Wav2Vec2ForCTC.from_pretrained(ASR_PRETRAINED_MODEL).to(device)
tokenizer = Wav2Vec2Tokenizer.from_pretrained(ASR_PRETRAINED_MODEL)
models = {'model': model, 'tokenizer': tokenizer}
return models
|
def normalize_sentence(sentence):
'Normalize sentence'
sentence = sentence.upper()
sentence = jiwer.RemovePunctuation()(sentence)
sentence = jiwer.RemoveWhiteSpace(replace_by_space=True)(sentence)
sentence = jiwer.RemoveMultipleSpaces()(sentence)
sentence = jiwer.Strip()(sentence)
sentence = sentence.upper()
return sentence
|
def calculate_measures(groundtruth, transcription):
'Calculate character/word measures (hits, subs, inserts, deletes) for one given sentence'
groundtruth = normalize_sentence(groundtruth)
transcription = normalize_sentence(transcription)
c_result = jiwer.cer(groundtruth, transcription, return_dict=True)
w_result = jiwer.compute_measures(groundtruth, transcription)
return (c_result, w_result, groundtruth, transcription)
|
def transcribe(model, device, wav):
'Calculate score on one single waveform'
inputs = model['tokenizer'](wav, sampling_rate=16000, return_tensors='pt', padding='longest')
input_values = inputs.input_values.to(device)
attention_mask = inputs.attention_mask.to(device)
logits = model['model'](input_values, attention_mask=attention_mask).logits
predicted_ids = torch.argmax(logits, dim=(- 1))
transcription = model['tokenizer'].batch_decode(predicted_ids)[0]
return transcription
|
def load_asv_model(device):
model = VoiceEncoder().to(device)
return model
|
def get_embedding(wav_path, encoder):
wav = preprocess_wav(wav_path)
embedding = encoder.embed_utterance(wav)
return embedding
|
def get_cosine_similarity(x_emb, y_emb):
return (np.inner(x_emb, y_emb) / (np.linalg.norm(x_emb) * np.linalg.norm(y_emb)))
|
def generate_sample(embeddings, this_spk, other_spks, label):
'\n Calculate cosine similarity.\n Generate positive or negative samples with the label.\n '
this_spk_embs = embeddings[this_spk]
other_spk_embs = list(chain(*[embeddings[spk] for spk in other_spks]))
samples = []
for this_spk_emb in this_spk_embs:
for other_spk_emb in other_spk_embs:
cosine_similarity = get_cosine_similarity(this_spk_emb, other_spk_emb)
samples.append((cosine_similarity, label))
return samples
|
def calculate_equal_error_rate(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
equal_error_rate = brentq(a, 0.0, 1.0)
threshold = interp1d(fpr, thresholds)(equal_error_rate)
return (equal_error_rate, threshold)
|
def calculate_threshold(data_root, task, device, query='E3*.wav'):
if (task == 'task1'):
spks = (SRCSPKS + TRGSPKS_TASK1)
if (task == 'task2'):
spks = (SRCSPKS + TRGSPKS_TASK2)
else:
raise NotImplementedError
encoder = load_asv_model(device)
embeddings = defaultdict(list)
for spk in spks:
wav_list = find_files(os.path.join(data_root, spk), query)
print(f'Extracting spekaer embedding for {len(wav_list)} files of {spk}')
for wav_path in wav_list:
embedding = get_embedding(wav_path, encoder)
embeddings[spk].append(embedding)
samples = []
for spk in spks:
negative_spks = [_spk for _spk in spks if (_spk != spk)]
samples += generate_sample(embeddings, spk, [spk], 1)
samples += generate_sample(embeddings, spk, negative_spks, 0)
print(f'[INFO]: Number of samples: {len(samples)}')
scores = [x[0] for x in samples]
labels = [x[1] for x in samples]
(equal_error_rate, threshold) = calculate_equal_error_rate(labels, scores)
return (float(equal_error_rate), float(threshold))
|
def calculate_accept(x_path, y_path, encoder, threshold):
x_emb = get_embedding(x_path, encoder)
y_emb = get_embedding(y_path, encoder)
cosine_similarity = get_cosine_similarity(x_emb, y_emb)
return (cosine_similarity > threshold)
|
class SequenceDataset(Dataset):
def __init__(self, split, bucket_size, dictionary, libri_root, bucket_file, **kwargs):
super(SequenceDataset, self).__init__()
self.dictionary = dictionary
self.libri_root = libri_root
self.sample_rate = SAMPLE_RATE
self.split_sets = kwargs[split]
assert os.path.isdir(bucket_file), 'Please first run `python3 preprocess/generate_len_for_bucket.py -h` to get bucket file.'
table_list = []
for item in self.split_sets:
file_path = os.path.join(bucket_file, (item + '.csv'))
if os.path.exists(file_path):
table_list.append(pd.read_csv(file_path))
else:
logging.warning(f'{item} is not found in bucket_file: {bucket_file}, skipping it.')
table_list = pd.concat(table_list)
table_list = table_list.sort_values(by=['length'], ascending=False)
X = table_list['file_path'].tolist()
X_lens = table_list['length'].tolist()
assert (len(X) != 0), f'0 data found for {split}'
Y = self._load_transcript(X)
x_names = set([self._parse_x_name(x) for x in X])
y_names = set(Y.keys())
usage_list = list((x_names & y_names))
Y = {key: Y[key] for key in usage_list}
self.Y = {k: self.dictionary.encode_line(v, line_tokenizer=(lambda x: x.split())).long() for (k, v) in Y.items()}
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in tqdm(zip(X, X_lens), total=len(X), desc=f'ASR dataset {split}', dynamic_ncols=True):
if (self._parse_x_name(x) in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
def _parse_x_name(self, x):
return x.split('/')[(- 1)].split('.')[0]
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.libri_root, wav_path))
assert (sr == self.sample_rate), f'Sample rate mismatch: real {sr}, config {self.sample_rate}'
return wav.view((- 1))
def _load_transcript(self, x_list):
'Load the transcripts for Librispeech'
def process_trans(transcript):
transcript = transcript.upper()
return (' '.join(list(transcript.replace(' ', '|'))) + ' |')
trsp_sequences = {}
split_spkr_chap_list = list(set(('/'.join(x.split('/')[:(- 1)]) for x in x_list)))
for dir in split_spkr_chap_list:
parts = dir.split('/')
trans_path = f'{parts[(- 2)]}-{parts[(- 1)]}.trans.txt'
path = os.path.join(self.libri_root, dir, trans_path)
assert os.path.exists(path)
with open(path, 'r') as trans_f:
for line in trans_f:
lst = line.strip().split()
trsp_sequences[lst[0]] = process_trans(' '.join(lst[1:]))
return trsp_sequences
def _build_dictionary(self, transcripts, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8):
d = Dictionary()
transcript_list = list(transcripts.values())
Dictionary.add_transcripts_to_dictionary(transcript_list, d, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file).numpy() for x_file in self.X[index]]
label_batch = [self.Y[self._parse_x_name(x_file)].numpy() for x_file in self.X[index]]
filename_batch = [Path(x_file).stem for x_file in self.X[index]]
return (wav_batch, label_batch, filename_batch)
def collate_fn(self, items):
assert (len(items) == 1)
return (items[0][0], items[0][1], items[0][2])
|
class Dictionary(fairseq_Dictionary):
'Dictionary inheritted from FairSeq'
@staticmethod
def _add_transcripts_to_dictionary_single_worker(transcripts, eos_word, worker_id=0, num_workers=1):
counter = Counter()
size = len(transcripts)
chunk_size = (size // num_workers)
offset = (worker_id * chunk_size)
end = min((size + 1), (offset + chunk_size))
for line in transcripts[offset:end]:
for word in line.split():
counter.update([word])
counter.update([eos_word])
return counter
@staticmethod
def add_transcripts_to_dictionary(transcripts, dict, num_workers):
def merge_result(counter):
for (w, c) in sorted(counter.items()):
dict.add_symbol(w, c)
if (num_workers > 1):
pool = get_context('spawn').Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(Dictionary._add_transcripts_to_dictionary_single_worker, (transcripts, dict.eos_word, worker_id, num_workers)))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Dictionary._add_transcripts_to_dictionary_single_worker(transcripts, dict.eos_word))
|
def token_to_word(text):
return text.replace(' ', '').replace('|', ' ').strip()
|
def get_decoder(decoder_args_dict, dictionary):
decoder_args = Namespace(**decoder_args_dict)
if (decoder_args.decoder_type == 'kenlm'):
from .w2l_decoder import W2lKenLMDecoder
decoder_args.beam_size_token = len(dictionary)
if isinstance(decoder_args.unk_weight, str):
decoder_args.unk_weight = eval(decoder_args.unk_weight)
return W2lKenLMDecoder(decoder_args, dictionary)
return None
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n upstream_rate: int\n 160: for upstream with 10 ms per frame\n 320: for upstream with 20 ms per frame\n\n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/example/config.yaml\n\n expdir: string\n The expdir from command-line argument, you should save all results into\n this directory, like some logging files.\n\n **kwargs: dict\n All the arguments specified by the argparser in run_downstream.py\n and all the other fields in config.yaml, in case you need it.\n\n Note1. Feel free to add new argument for __init__ as long as it is\n a command-line argument or a config field. You can check the constructor\n code in downstream/runner.py\n "
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.upstream_rate = upstream_rate
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = expdir
self.dictionary = Dictionary.load(self.datarc.get('dict_path', str((Path(__file__).parent / 'char.dict'))))
self.projector = nn.Linear(upstream_dim, self.modelrc['project_dim'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc[self.modelrc['select']]
self.model = model_cls(self.modelrc['project_dim'], len(self.dictionary.symbols), upstream_rate, **model_conf)
self.blank = self.dictionary.bos()
self.objective = nn.CTCLoss(blank=self.blank, zero_infinity=self.datarc['zero_infinity'])
decoder_args = self.datarc.get('decoder_args')
self.decoder = get_decoder(decoder_args, self.dictionary)
self.register_buffer('best_score', (torch.ones(1) * 100))
def get_dataloader(self, split):
'\n Args:\n split: string\n The name of the dataloader, can be train/dev/test-clean/test-other for asr\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n '
if (not hasattr(self, f'{split}_dataset')):
batch_size = (self.datarc['batch_size'] if (split == 'train') else self.datarc['eval_batch_size'])
setattr(self, f'{split}_dataset', SequenceDataset(split, batch_size, self.dictionary, **self.datarc))
if (split == 'train'):
return self._get_train_dataloader(self.train_dataset)
else:
return self._get_eval_dataloader(getattr(self, f'{split}_dataset'))
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=1, shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=1, shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _compute_metrics(self, pred_tokens_all, pred_words_all, target_tokens_all, target_words_all):
'Computes WER and UER given the prediction and true transcriptions'
unit_error_sum = 0.0
word_error_sum = 0.0
unit_length_sum = 0
word_length_sum = 0
for (pred_tokens, pred_words, target_tokens, target_words) in zip(pred_tokens_all, pred_words_all, target_tokens_all, target_words_all):
pred_tokens = pred_tokens.split()
target_tokens = target_tokens.split()
unit_error_sum += editdistance.eval(pred_tokens, target_tokens)
unit_length_sum += len(target_tokens)
word_error_sum += editdistance.eval(pred_words, target_words)
word_length_sum += len(target_words)
(uer, wer) = (100.0, 100.0)
if (unit_length_sum > 0):
uer = ((100.0 * unit_error_sum) / unit_length_sum)
if (word_length_sum > 0):
wer = ((100.0 * word_error_sum) / word_length_sum)
return (uer, wer)
def _decode(self, log_probs, input_lens):
'Decoder that take log probabilities as input and outputs decoded seq'
pred_tokens_batch = []
pred_words_batch = []
for (log_prob, in_len) in zip(log_probs, input_lens):
log_prob = log_prob[:in_len].unsqueeze(0)
decoded = None
if ((self.decoder is not None) and (not self.training)):
decoded = self.decoder.decode(log_prob)
if (len(decoded) >= 1):
decoded = decoded[0]
decoded = (None if (len(decoded) < 1) else decoded[0])
pred_token_ids = log_prob.argmax(dim=(- 1)).unique_consecutive()
pred_token_ids = pred_token_ids[(pred_token_ids != self.blank)].tolist()
pred_tokens = self.dictionary.string(pred_token_ids)
if ((decoded is not None) and ('words' in decoded)):
pred_words = decoded['words']
else:
pred_words = token_to_word(pred_tokens).split()
pred_tokens_batch.append(pred_tokens)
pred_words_batch.append(pred_words)
return (pred_tokens_batch, pred_words_batch)
def _get_log_probs(self, features):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features])
features = pad_sequence(features, batch_first=True).to(device=device)
features = self.projector(features)
(logits, log_probs_len) = self.model(features, features_len)
log_probs = nn.functional.log_softmax(logits, dim=(- 1))
return (log_probs, log_probs_len)
def inference(self, features, filenames):
(log_probs, log_probs_len) = self._get_log_probs(features)
(_, pred_words_batch) = self._decode(log_probs.float().contiguous().cpu(), log_probs_len)
hyps = [' '.join(hyp) for hyp in pred_words_batch]
if (filenames != []):
with open((Path(self.expdir) / 'inference.ark'), 'w') as file:
for (hyp, filename) in zip(hyps, filenames):
file.write(f'''{filename} {hyp}
''')
return hyps
def forward(self, split, features, labels, filenames, records, **kwargs):
'\n Args:\n split: string\n The name of the dataloader, can be train/dev/test-clean/test-other for asr\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n your_other_contents1, ... :\n in the order defined by your dataloader (dataset + collate_fn)\n these are all in cpu, and you can move them to the same device\n as features\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records (also customized by you)\n\n Note1. downstream/runner.py will call self.log_records\n 1. every `log_step` during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n '
(log_probs, log_probs_len) = self._get_log_probs(features)
device = features[0].device
labels = [torch.IntTensor(l) for l in labels]
labels_len = torch.IntTensor([len(label) for label in labels]).to(device=device)
labels = pad_sequence(labels, batch_first=True, padding_value=self.dictionary.pad()).to(device=device)
loss = self.objective(log_probs.transpose(0, 1), labels, log_probs_len, labels_len)
records['loss'].append(loss.item())
target_tokens_batch = []
target_words_batch = []
for label in labels:
label_idx = ((label != self.dictionary.pad()) & (label != self.dictionary.eos()))
target_token_ids = label[label_idx].tolist()
target_tokens = self.dictionary.string(target_token_ids)
target_words = token_to_word(target_tokens).split()
target_tokens_batch.append(target_tokens)
target_words_batch.append(target_words)
with torch.no_grad():
(pred_tokens_batch, pred_words_batch) = self._decode(log_probs.float().contiguous().cpu(), log_probs_len)
records['target_tokens'] += target_tokens_batch
records['target_words'] += target_words_batch
records['pred_tokens'] += pred_tokens_batch
records['pred_words'] += pred_words_batch
records['filenames'] += filenames
return loss
def log_records(self, split, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n split: string\n 'train':\n records and batchids contain contents for `log_step` batches\n `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n 'dev' or 'test-clean' or 'test-other' :\n records and batchids contain contents for the entire evaluation dataset\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{your_task_name}/{split}-{key}' as key name to log your contents,\n preventing conflict with the logging of other tasks\n\n global_step:\n The global_step when training, which is helpful for Tensorboard logging\n\n batch_ids:\n The batches contained in records when enumerating over the dataloader\n\n total_batch_num:\n The total amount of batches in the dataloader\n\n Return:\n a list of string\n Each string is a filename we wish to use to save the current model\n according to the evaluation result, like the best.ckpt on the dev set\n You can return nothing or an empty list when no need to save the checkpoint\n "
loss = torch.FloatTensor(records['loss']).mean().item()
print(f'{split} loss: {loss}')
(uer, wer) = self._compute_metrics(records['pred_tokens'], records['pred_words'], records['target_tokens'], records['target_words'])
logger.add_scalar(f'asr/{split}-loss', loss, global_step=global_step)
logger.add_scalar(f'asr/{split}-uer', uer, global_step=global_step)
logger.add_scalar(f'asr/{split}-wer', wer, global_step=global_step)
print(f'{split} uer: {uer}')
print(f'{split} wer: {wer}')
save_names = []
if ((split == 'dev-clean') and (wer < self.best_score)):
self.best_score = (torch.ones(1) * wer)
save_names.append(f'{split}-best.ckpt')
if (('test' in split) or ('dev' in split)):
lm = ('noLM' if (self.decoder is None) else 'LM')
hyp_ark = open(os.path.join(self.expdir, f'{split}-{lm}-hyp.ark'), 'w')
ref_ark = open(os.path.join(self.expdir, f'{split}-{lm}-ref.ark'), 'w')
for (filename, hyp, ref) in zip(records['filenames'], records['pred_words'], records['target_words']):
hyp = ' '.join(hyp)
ref = ' '.join(ref)
hyp_ark.write(f'''{filename} {hyp}
''')
ref_ark.write(f'''{filename} {ref}
''')
hyp_ark.close()
ref_ark.close()
return save_names
|
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
self.criterion_type = CriterionType.CTC
self.blank = (tgt_dict.index('<ctc_blank>') if ('<ctc_blank>' in tgt_dict.indices) else tgt_dict.bos())
if ('<sep>' in tgt_dict.indices):
self.silence = tgt_dict.index('<sep>')
elif ('|' in tgt_dict.indices):
self.silence = tgt_dict.index('|')
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
def generate(self, models, sample, **unused):
'Generate a batch of inferences.'
encoder_input = {k: v for (k, v) in sample['net_input'].items() if (k != 'prev_output_tokens')}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
'Run encoder and normalize emissions'
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, 'get_logits'):
emissions = model.get_logits(encoder_out)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
'Normalize tokens by handling CTC blank, ASG replabels, etc.'
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter((lambda x: (x != self.blank)), idxs)
return torch.LongTensor(list(idxs))
|
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, 'unit_lm', False)
if args.lexicon:
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index('<unk>')
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for (i, (word, spellings)) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
(_, score) = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (tgt_dict.unk() not in spelling_idxs), f'{spelling} {spelling_idxs}'
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(beam_size=args.beam, beam_size_token=int(getattr(args, 'beam_size_token', len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type)
if (self.asg_transitions is None):
N = 768
self.asg_transitions = []
self.decoder = LexiconDecoder(self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, self.asg_transitions, self.unit_lm)
else:
assert args.unit_lm, 'lexicon free decoding can only be done with a unit language model'
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(beam_size=args.beam, beam_size_token=int(getattr(args, 'beam_size_token', len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type)
self.decoder = LexiconFreeDecoder(self.decoder_opts, self.lm, self.silence, self.blank, [])
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
'Returns frame numbers corresponding to every non-blank token.\n\n Parameters\n ----------\n token_idxs : List[int]\n IDs of decoded tokens.\n\n Returns\n -------\n List[int]\n Frame numbers corresponding to every non-blank token.\n '
timesteps = []
for (i, token_idx) in enumerate(token_idxs):
if (token_idx == self.blank):
continue
if ((i == 0) or (token_idx != token_idxs[(i - 1)])):
timesteps.append(i)
return timesteps
def decode(self, emissions):
(B, T, N) = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0)))
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[:self.nbest]
hypos.append([{'tokens': self.get_tokens(result.tokens), 'score': result.score, 'timesteps': self.get_timesteps(result.tokens), 'words': [self.word_dict.get_entry(x) for x in result.words if (x >= 0)]} for result in nbest_results])
return hypos
|
class AtisDataset(Dataset):
def __init__(self, df, base_path, Sy_intent, type):
self.df = df
self.base_path = base_path
self.max_length = (SAMPLE_RATE * EXAMPLE_WAV_MAX_SEC)
self.Sy_intent = Sy_intent
self.type = type
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
wav_path = os.path.join(self.base_path, self.type, (self.df.loc[idx]['id'] + '.wav'))
(wav, sr) = torchaudio.load(wav_path)
wav = wav.squeeze(0)
label = []
for slot in ['intent']:
value = self.df.loc[idx][slot]
label.append(self.Sy_intent[slot][value])
return (wav, torch.tensor(label).long())
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class Identity(nn.Module):
def __init__(self, config):
super(Identity, self).__init__()
def forward(self, feature, att_mask, head_mask):
return [feature]
|
class Mean(nn.Module):
def __init__(self, out_dim):
super(Mean, self).__init__()
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
agg_vec_list = []
for i in range(len(feature)):
length = (torch.nonzero((att_mask[i] < 0), as_tuple=False)[0][0] + 1)
agg_vec = torch.mean(feature[i][:length], dim=0)
agg_vec_list.append(agg_vec)
return torch.stack(agg_vec_list)
|
class SAP(nn.Module):
' Self Attention Pooling module incoporate attention mask'
def __init__(self, out_dim):
super(SAP, self).__init__()
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling \n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep, att_mask):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n \n attention_weight:\n att_w : size (N, T, 1)\n \n return:\n utter_rep: size (N, H)\n '
seq_len = batch_rep.shape[1]
softmax = nn.functional.softmax
att_logits = self.W(batch_rep).squeeze((- 1))
att_logits = (att_mask + att_logits)
att_w = softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class Model(nn.Module):
def __init__(self, input_dim, agg_module, output_dim, config):
super(Model, self).__init__()
self.agg_method = eval(agg_module)(input_dim)
self.linear = nn.Linear(input_dim, output_dim)
self.model = eval(config['module'])(Namespace(**config['hparams']))
self.head_mask = ([None] * config['hparams']['num_hidden_layers'])
def forward(self, features, att_mask):
features = self.model(features, att_mask.unsqueeze((- 1)), head_mask=self.head_mask, output_all_encoded_layers=False)
utterance_vector = self.agg_method(features[0], att_mask)
predicted = self.linear(utterance_vector)
return predicted
|
class AudioSLUDataset(Dataset):
def __init__(self, df, base_path, Sy_intent, speaker_name):
self.df = df
self.base_path = base_path
self.max_length = (SAMPLE_RATE * EXAMPLE_WAV_MAX_SEC)
self.Sy_intent = Sy_intent
self.speaker_name = speaker_name
self.resampler = torchaudio.transforms.Resample(ORIGINAL_SAMPLE_RATE, SAMPLE_RATE)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
wav_path = os.path.join(self.base_path, ('audio_' + self.speaker_name), 'snips', (self.df.loc[idx]['u_id'] + '.mp3'))
(wav, sr) = torchaudio.load(wav_path)
assert (sr == ORIGINAL_SAMPLE_RATE)
wav = self.resampler(wav)
wav = wav.squeeze(0)
label = []
for slot in ['intent']:
value = self.df.loc[idx][slot]
label.append(self.Sy_intent[slot][value])
return (wav, torch.tensor(label).long())
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class Identity(nn.Module):
def __init__(self, config, **kwargs):
super(Identity, self).__init__()
def forward(self, feature, att_mask, head_mask, **kwargs):
return [feature]
|
class Mean(nn.Module):
def __init__(self, out_dim):
super(Mean, self).__init__()
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
agg_vec_list = []
for i in range(len(feature)):
length = (torch.nonzero((att_mask[i] < 0), as_tuple=False)[0][0] + 1)
agg_vec = torch.mean(feature[i][:length], dim=0)
agg_vec_list.append(agg_vec)
return torch.stack(agg_vec_list)
|
class SAP(nn.Module):
' Self Attention Pooling module incoporate attention mask'
def __init__(self, out_dim):
super(SAP, self).__init__()
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling \n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep, att_mask):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n \n attention_weight:\n att_w : size (N, T, 1)\n \n return:\n utter_rep: size (N, H)\n '
seq_len = batch_rep.shape[1]
softmax = nn.functional.softmax
att_logits = self.W(batch_rep).squeeze((- 1))
att_logits = (att_mask + att_logits)
att_w = softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class Model(nn.Module):
def __init__(self, input_dim, agg_module, output_dim, config):
super(Model, self).__init__()
self.agg_method = eval(agg_module)(input_dim)
self.linear = nn.Linear(input_dim, output_dim)
self.model = eval(config['module'])(Namespace(**config['hparams']))
self.head_mask = ([None] * config['hparams']['num_hidden_layers'])
def forward(self, features, att_mask):
features = self.model(features, att_mask.unsqueeze((- 1)), head_mask=self.head_mask, output_all_encoded_layers=False)
utterance_vector = self.agg_method(features[0], att_mask)
predicted = self.linear(utterance_vector)
return F.log_softmax(predicted, dim=(- 1))
|
class CommonVoiceDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, ascending=False, ratio=1.0, offset=0, **kwargs):
self.path = path
self.bucket_size = bucket_size
for s in split:
with open(s, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
(file_list, text) = ([], [])
for (i, row) in enumerate(rows):
if (i == 0):
continue
file_list.append(join(path, row[0]))
text.append(tokenizer.encode(row[1]))
print(f'Found {len(file_list)} samples.')
if (ratio < 1.0):
print(f'Ratio = {ratio}, offset = {offset}')
skip = int((1.0 / ratio))
(file_list, text) = (file_list[offset::skip], text[offset::skip])
total_len = 0.0
for f in file_list:
total_len += (getsize(f) / 32000.0)
print('Total audio len = {:.2f} mins = {:.2f} hours'.format((total_len / 60.0), (total_len / 3600.0)))
(self.file_list, self.text) = (file_list, text)
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
def normalize(sent, language):
sent = unicodedata.normalize('NFKC', sent).upper()
sent = sent.translate(translator)
sent = re.sub(' +', ' ', sent)
if (language in ['zh-TW', 'zh-CN', 'ja']):
sent = sent.replace(' ', '')
if (language in ['zh-TW', 'zh-CN', 'ja', 'ar', 'ru']):
if any([(c.encode('UTF-8').isalpha() or (c == "'")) for c in list(sent)]):
return ''
if (language == 'zh-CN'):
if (len(zhcn_exception.intersection(set(list(sent)))) > 0):
return ''
if (language == 'es'):
if (len(spanish_exception.intersection(set(list(sent)))) > 0):
return ''
if (language == 'en'):
if any([(not (((ord(c) >= ord('A')) and (ord(c) <= ord('Z'))) or (c == ' ') or (c == "'"))) for c in list(sent)]):
return ''
return sent.strip()
|
def read_tsv(path, corpus_root, language, accent=None, hours=(- 1)):
with open(path, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
data_list = []
total_len = 0
iterator = tqdm(enumerate(rows))
for (i, row) in iterator:
if (i == 0):
continue
if ((language == 'es') and (row[7] != 'mexicano')):
continue
if ((language == 'en') and (row[7] != accent)):
continue
audio = MP3(join(corpus_root, row[1]))
secs = audio.info.length
sent_normed = normalize(row[2], language)
if (sent_normed == ''):
continue
data_list.append({'path': row[1], 'sentence': sent_normed, 'accent': (row[7] if (row[7] != '') else 'unk'), 'len': secs})
total_len += secs
if ((hours > 0) and ((total_len / 3600.0) > hours)):
iterator.close()
break
print(f'Read {len(data_list)} files')
print('Total {:.2f} hours'.format((total_len / 3600.0)))
return data_list
|
def write_tsv(data, out_path):
with open(out_path, 'w') as fp:
writer = csv.writer(fp, delimiter='\t')
writer.writerow(['path', 'sentence'])
for d in data:
path = (d['path'][:(- 3)] + 'wav')
writer.writerow([path, d['sentence']])
|
def write_txt(data, out_path):
with open(out_path, 'w') as fp:
for d in data:
fp.write((d['sentence'] + '\n'))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, help='Root of Common Voice 7.0 directory.')
parser.add_argument('--lang', type=str, help='Language abbreviation.')
parser.add_argument('--out', type=str, help='Path to output directory.')
parser.add_argument('--accent', type=str, default='none', help='English accent')
parser.add_argument('--hours', type=float, default=(- 1), help='Maximum hours used.')
args = parser.parse_args()
os.makedirs(args.out, exist_ok=True)
os.makedirs(join(args.out, args.lang), exist_ok=True)
for s in ['train', 'dev', 'test']:
data_list = read_tsv(join(args.root, args.lang, (s + '.tsv')), join(args.root, args.lang, 'clips'), args.lang, accent=args.accent, hours=args.hours)
if (data_list[0].get('len', (- 1)) > 0):
data_list = sorted(data_list, reverse=True, key=(lambda x: x['len']))
write_tsv(data_list, join(args.out, args.lang, (s + '.tsv')))
if (s == 'train'):
write_txt(data_list, join(args.out, args.lang, (s + '.txt')))
|
def read_processed_tsv(path):
with open(path, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
file_list = []
for (i, row) in enumerate(rows):
if (i == 0):
continue
file_list.append((row[0][:(- 3)] + 'mp3'))
return file_list
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, help='Directory of the dataset.')
parser.add_argument('--tsv', type=str, help='Path to processed tsv file.')
args = parser.parse_args()
file_list = read_processed_tsv(args.tsv)
for file in tqdm(file_list):
file = str(file)
file = join(args.root, file)
(wav, sample_rate) = torchaudio.load(file)
wav = resample(wav.squeeze(0).numpy(), sample_rate, 16000, res_type='kaiser_best')
wav = torch.FloatTensor(wav).unsqueeze(0)
new_file = (file[:(- 3)] + 'wav')
torchaudio.save(new_file, wav, 16000)
|
def parse_lexicon(line, tokenizer):
line.replace('\t', ' ')
(word, *phonemes) = line.split()
for p in phonemes:
assert (p in tokenizer._vocab2idx.keys())
return (word, phonemes)
|
def read_text(file, word2phonemes, tokenizer):
"Get transcription of target wave file, \n it's somewhat redundant for accessing each txt multiplt times,\n but it works fine with multi-thread"
src_file = ('-'.join(file.split('-')[:(- 1)]) + '.trans.txt')
idx = file.split('/')[(- 1)].split('.')[0]
with open(src_file, 'r') as fp:
for line in fp:
if (idx == line.split(' ')[0]):
transcription = line[:(- 1)].split(' ', 1)[1]
phonemes = []
for word in transcription.split():
phonemes += word2phonemes[word]
return tokenizer.encode(' '.join(phonemes))
|
class LibriPhoneDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, lexicon, ascending=False, **kwargs):
self.path = path
self.bucket_size = bucket_size
word2phonemes_all = defaultdict(list)
for lexicon_file in lexicon:
with open(lexicon_file, 'r') as file:
lines = [line.strip() for line in file.readlines()]
for line in lines:
(word, phonemes) = parse_lexicon(line, tokenizer)
word2phonemes_all[word].append(phonemes)
word2phonemes = {}
for (word, phonemes_all) in word2phonemes_all.items():
if (len(phonemes_all) > 1):
print(f'[LibriPhone] - {len(phonemes_all)} of phoneme sequences found for {word}.')
for (idx, phonemes) in enumerate(phonemes_all):
print(f'{idx}. {phonemes}')
word2phonemes[word] = phonemes_all[0]
print(f'[LibriPhone] - Taking the first phoneme sequences for a deterministic behavior.')
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob('*.flac'))
assert (len(split_list) > 0), 'No data found @ {}'.format(join(path, s))
file_list += split_list
text = []
for f in tqdm(file_list, desc='word -> phonemes'):
text.append(read_text(str(f), word2phonemes, tokenizer))
(self.file_list, self.text) = zip(*[(f_name, txt) for (f_name, txt) in sorted(zip(file_list, text), reverse=(not ascending), key=(lambda x: len(x[1])))])
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
def read_text(file):
"Get transcription of target wave file, \n it's somewhat redundant for accessing each txt multiplt times,\n but it works fine with multi-thread"
src_file = ('-'.join(file.split('-')[:(- 1)]) + '.trans.txt')
idx = file.split('/')[(- 1)].split('.')[0]
with open(src_file, 'r') as fp:
for line in fp:
if (idx == line.split(' ')[0]):
return line[:(- 1)].split(' ', 1)[1]
|
class LibriDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, ascending=False, **kwargs):
self.path = path
self.bucket_size = bucket_size
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob('*.flac'))
assert (len(split_list) > 0), 'No data found @ {}'.format(join(path, s))
file_list += split_list
text = []
for f in tqdm(file_list, desc='Read text'):
transcription = read_text(str(f))
text.append(tokenizer.encode(transcription))
(self.file_list, self.text) = zip(*[(f_name, txt) for (f_name, txt) in sorted(zip(file_list, text), reverse=(not ascending), key=(lambda x: len(x[1])))])
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
class SnipsDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, num_workers=12, ascending=False, **kwargs):
self.path = path
self.bucket_size = bucket_size
self.speaker_list = (kwargs[f'{split}_speakers'] if (type(split) == str) else kwargs[f'{split[0]}_speakers'])
transcripts_file = open(join(self.path, ('all.iob.snips.txt' if ('-slot' in tokenizer.token_type) else 'all-trans.txt'))).readlines()
transcripts = {}
for line in transcripts_file:
line = line.strip().split(' ')
index = line[0]
sent = ' '.join(line[1:])
transcripts[index] = sent
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob('*.wav'))
new_list = []
uf = 0
for i in trange(len(split_list), desc='checking files'):
uid = str(split_list[i]).split('/')[(- 1)].split('.wav', 1)[0].split('/')[(- 1)]
if (uid in transcripts):
for spk in self.speaker_list:
if (uid[:len(spk)] == spk):
new_list.append(split_list[i])
break
else:
print(split_list[i], 'Not Found')
uf += 1
print(('%d wav file with label not found in text file!' % uf))
split_list = new_list
print(f'loaded audio from {len(self.speaker_list)} speakers {str(self.speaker_list)} with {len(split_list)} examples.')
assert (len(split_list) > 0), 'No data found @ {}'.format(join(path, s))
file_list += split_list
text = [transcripts[str(f).split('.wav', 1)[0].split('/')[(- 1)]] for f in file_list]
text = [tokenizer.encode(txt) for txt in tqdm(text, desc='tokenizing')]
(self.file_list, self.text) = zip(*[(f_name, txt) for (f_name, txt) in sorted(zip(file_list, text), reverse=(not ascending), key=(lambda x: len(x[1])))])
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
def collect_audio_batch(batch, split, half_batch_size_wav_len=300000):
'Collects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...]\n '
def audio_reader(filepath):
(wav, sample_rate) = torchaudio.load(filepath)
return wav.reshape((- 1))
if (type(batch[0]) is not tuple):
batch = batch[0]
first_len = audio_reader(str(batch[0][0])).size(0)
if (split == 'train'):
if ((first_len > half_batch_size_wav_len) and (len(batch) > 1)):
batch = batch[:(len(batch) // 2)]
(file, audio_feat, audio_len, text) = ([], [], [], [])
with torch.no_grad():
for b in batch:
file.append(str(b[0]).split('/')[(- 1)].split('.')[0])
feat = audio_reader(str(b[0])).numpy()
audio_feat.append(feat)
audio_len.append(len(feat))
text.append(torch.LongTensor(b[1]).numpy())
(audio_len, file, audio_feat, text) = zip(*[(feat_len, f_name, feat, txt) for (feat_len, f_name, feat, txt) in sorted(zip(audio_len, file, audio_feat, text), reverse=True, key=(lambda x: x[0]))])
return (audio_feat, text, file)
|
def create_dataset(split, tokenizer, name, bucketing, batch_size, **kwargs):
' Interface for creating all kinds of dataset'
if (name.lower() == 'librispeech'):
from .corpus.librispeech import LibriDataset as Dataset
elif (name.lower() == 'snips'):
from .corpus.snips import SnipsDataset as Dataset
elif (name.lower() == 'libriphone'):
from .corpus.libriphone import LibriPhoneDataset as Dataset
elif (name.lower() in {'common_voice', 'sbcsae'}):
from .corpus.common_voice import CommonVoiceDataset as Dataset
else:
raise NotImplementedError
if (split == 'train'):
kwargs['ratio'] = 1.0
kwargs['offset'] = 0
loader_bs = (1 if bucketing else batch_size)
bucket_size = (batch_size if bucketing else 1)
dataset = Dataset(kwargs['train'], tokenizer, bucket_size, **kwargs)
else:
loader_bs = EVAL_BATCH_SIZE
dataset = Dataset(kwargs[split], tokenizer, 1, **kwargs)
return (dataset, loader_bs)
|
def load_dataset(split, tokenizer, corpus):
' Prepare dataloader for training/validation'
num_workers = corpus.pop('num_workers', 12)
(dataset, loader_bs) = create_dataset(split, tokenizer, num_workers=num_workers, **corpus)
collate_fn = partial(collect_audio_batch, split=split)
if (split == 'train'):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
dataloader = DataLoader(dataset, batch_size=loader_bs, shuffle=(sampler is None), sampler=sampler, collate_fn=collate_fn, num_workers=num_workers)
else:
dataloader = DataLoader(dataset, batch_size=loader_bs, shuffle=False, collate_fn=collate_fn, num_workers=num_workers)
return dataloader
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.expdir = expdir
self.upstream_dim = upstream_dim
self.corpus = downstream_expert['corpus']
self.tokenizer = load_text_encoder(**downstream_expert['text'])
modelrc = downstream_expert['model']
self.projector = nn.Linear(upstream_dim, modelrc['project_dim'])
model_select = downstream_expert['model']['select']
self.model = eval(model_select)(modelrc['project_dim'], self.tokenizer.vocab_size, upstream_rate=upstream_rate, **modelrc.get(model_select, {}))
self.objective = nn.CTCLoss(blank=self.tokenizer.pad_idx, zero_infinity=modelrc['zero_infinity'])
self.save_best_on = downstream_expert.get('save_best_on', 'dev')
self.metrics = downstream_expert['metric']
self.metric_higher_better = downstream_expert['metric_higher_better']
self.register_buffer('best_score', (torch.ones(1) * (0 if self.metric_higher_better else (1 << 31))))
def _get_task_name(self):
return f"ctc-{self.corpus['name'].lower()}"
def get_dataloader(self, split):
return load_dataset(split, self.tokenizer, self.corpus)
def forward(self, split, features, labels, filenames, records, **kwargs):
device = features[0].device
labels = [torch.LongTensor(label) for label in labels]
features_len = torch.IntTensor([len(feat) for feat in features])
labels_len = torch.IntTensor([len(label) for label in labels])
features = pad_sequence(features, batch_first=True)
labels = pad_sequence(labels, batch_first=True, padding_value=self.tokenizer.pad_idx).to(device=device)
features = self.projector(features)
(logits, log_probs_len) = self.model(features, features_len)
log_probs = nn.functional.log_softmax(logits, dim=(- 1))
loss = self.objective(log_probs.transpose(0, 1), labels, log_probs_len, labels_len)
records['loss'].append(loss.item())
pred_tokens = log_probs.argmax(dim=(- 1))
filtered_tokens = []
for pred_token in pred_tokens:
pred_token = pred_token.unique_consecutive()
filtered_token = [token for token in pred_token.tolist() if ((token != self.tokenizer.pad_idx) and (token != self.tokenizer.eos_idx))]
filtered_tokens.append(filtered_token)
hypothesis = [self.tokenizer.decode(h) for h in filtered_tokens]
groundtruth = [self.tokenizer.decode(g.tolist()) for g in labels]
records['hypothesis'] += hypothesis
records['groundtruth'] += groundtruth
records['filename'] += filenames
return loss
def log_records(self, split, records, logger, global_step, **kwargs):
loss = torch.FloatTensor(records['loss']).mean().item()
results = {'loss': loss}
for metric in self.metrics:
results[metric] = eval(metric)(hypothesis=records['hypothesis'], groundtruth=records['groundtruth'])
save_names = []
for (key, value) in results.items():
print(f'{split} {key}: {value}')
logger.add_scalar(f'{self._get_task_name()}/{split}-{key}', value, global_step=global_step)
if (key == self.metrics[0]):
save_criterion = ((value > self.best_score) if self.metric_higher_better else (value < self.best_score))
if ((split in self.save_best_on) and save_criterion):
self.best_score = (torch.ones(1) * value)
save_names.append(f'{split}-best.ckpt')
if (('test' in split) or ('dev' in split)):
hyp_ark = open(os.path.join(self.expdir, f'{split}-hyp.ark'), 'w')
ref_ark = open(os.path.join(self.expdir, f'{split}-ref.ark'), 'w')
for (filename, hyp, ref) in zip(records['filename'], records['hypothesis'], records['groundtruth']):
hyp_ark.write(f'''{filename} {hyp}
''')
ref_ark.write(f'''{filename} {ref}
''')
hyp_ark.close()
ref_ark.close()
return save_names
|
def cer(hypothesis, groundtruth, **kwargs):
err = 0
tot = 0
for (p, t) in zip(hypothesis, groundtruth):
err += float(ed.eval(p, t))
tot += len(t)
return (err / tot)
|
def per(*args, **kwargs):
return wer(*args, **kwargs)
|
def wer(hypothesis, groundtruth, **kwargs):
err = 0
tot = 0
for (p, t) in zip(hypothesis, groundtruth):
p = p.split(' ')
t = t.split(' ')
err += float(ed.eval(p, t))
tot += len(t)
return (err / tot)
|
def clean(ref):
ref = re.sub('B\\-(\\S+) ', '', ref)
ref = re.sub(' E\\-(\\S+)', '', ref)
return ref
|
def parse(hyp, ref):
gex = re.compile('B\\-(\\S+) (.+?) E\\-\\1')
hyp = re.sub(' +', ' ', hyp)
ref = re.sub(' +', ' ', ref)
hyp_slots = gex.findall(hyp)
ref_slots = gex.findall(ref)
ref_slots = ';'.join([':'.join([x[1], x[0]]) for x in ref_slots])
if (len(hyp_slots) > 0):
hyp_slots = ';'.join([':'.join([clean(x[1]), x[0]]) for x in hyp_slots])
else:
hyp_slots = ''
ref = clean(ref)
hyp = clean(hyp)
return (ref, hyp, ref_slots, hyp_slots)
|
def slot_type_f1(hypothesis, groundtruth, **kwargs):
F1s = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
if ((len(hyp_dict.keys()) == 0) and (len(ref_dict.keys()) == 0)):
F1 = 1.0
elif (len(hyp_dict.keys()) == 0):
F1 = 0.0
elif (len(ref_dict.keys()) == 0):
F1 = 0.0
else:
(P, R) = (0.0, 0.0)
for slot in ref_dict:
if (slot in hyp_dict):
R += 1
R = (R / len(ref_dict.keys()))
for slot in hyp_dict:
if (slot in ref_dict):
P += 1
P = (P / len(hyp_dict.keys()))
F1 = ((((2 * P) * R) / (P + R)) if ((P + R) > 0) else 0.0)
F1s.append(F1)
return (sum(F1s) / len(F1s))
|
def slot_value_cer(hypothesis, groundtruth, **kwargs):
value_hyps = []
value_refs = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_cer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_cer = cer([hyp_v], [ref_v])
if (min_cer > tmp_cer):
min_cer = tmp_cer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return cer(value_hyps, value_refs)
|
def slot_value_wer(hypothesis, groundtruth, **kwargs):
value_hyps = []
value_refs = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_wer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_wer = wer([hyp_v], [ref_v])
if (min_wer > tmp_wer):
min_wer = tmp_wer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return wer(value_hyps, value_refs)
|
def slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot, **kwargs):
(test_case, TPs, FNs, FPs) = ([], 0, 0, 0)
slot2F1 = {}
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
unique_slots = list(ref_dict.keys())
if loop_over_all_slot:
unique_slots += [x for x in hyp_dict if (x not in ref_dict)]
for slot in unique_slots:
TP = 0
FP = 0
FN = 0
if (slot not in ref_dict):
for hyp_v in hyp_dict[slot]:
FP += 1
else:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
FN += 1
else:
match = False
for hyp_v in hyp_dict[slot]:
if (hyp_v == ref_v):
match = True
break
if match:
TP += 1
else:
FN += 1
FP += 1
slot2F1.setdefault(slot, [0, 0, 0])
slot2F1[slot][0] += TP
slot2F1[slot][1] += FN
slot2F1[slot][2] += FP
(all_TPs, all_FNs, all_FPs) = (0, 0, 0)
for slot in slot2F1.keys():
all_TPs += slot2F1[slot][0]
all_FNs += slot2F1[slot][1]
all_FPs += slot2F1[slot][2]
return (((100.0 * 2) * all_TPs) / (((2 * all_TPs) + all_FPs) + all_FNs))
|
def slot_edit_f1_full(hypothesis, groundtruth, **kwargs):
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=True, **kwargs)
|
def slot_edit_f1_part(hypothesis, groundtruth, **kwargs):
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=False, **kwargs)
|
class _BaseTextEncoder(abc.ABC):
@abc.abstractmethod
def encode(self, s):
raise NotImplementedError
@abc.abstractmethod
def decode(self, ids, ignore_repeat=False):
raise NotImplementedError
@abc.abstractproperty
def vocab_size(self):
raise NotImplementedError
@abc.abstractproperty
def token_type(self):
raise NotImplementedError
@abc.abstractclassmethod
def load_from_file(cls, vocab_file):
raise NotImplementedError
@property
def pad_idx(self):
return 0
@property
def eos_idx(self):
return 1
@property
def unk_idx(self):
return 2
def __repr__(self):
return '<{} vocab_size={}>'.format(type(self).__name__, self.vocab_size)
|
class CharacterTextEncoder(_BaseTextEncoder):
def __init__(self, vocab_list):
self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list)
self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)}
def encode(self, s):
s = s.strip('\r\n ')
return ([self.vocab_to_idx(v) for v in s] + [self.eos_idx])
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
elif (idx == self.eos_idx):
break
else:
vocabs.append(v)
return ''.join(vocabs)
@classmethod
def load_from_file(cls, vocab_file):
with open(vocab_file, 'r') as f:
vocab_list = [line.strip('\r\n') for line in f]
return cls(vocab_list)
@property
def vocab_size(self):
return len(self._vocab_list)
@property
def token_type(self):
return 'character'
def vocab_to_idx(self, vocab):
return self._vocab2idx.get(vocab, self.unk_idx)
def idx_to_vocab(self, idx):
return self._vocab_list[idx]
|
class CharacterTextSlotEncoder(_BaseTextEncoder):
def __init__(self, vocab_list, slots):
self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list)
self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)}
self.slots = slots
self.slot2id = {self.slots[i]: (i + len(self._vocab_list)) for i in range(len(self.slots))}
self.id2slot = {(i + len(self._vocab_list)): self.slots[i] for i in range(len(self.slots))}
def encode(self, s):
(sent, iobs) = s.strip('\r\n ').split('\t')
sent = sent.split(' ')[1:(- 1)]
iobs = iobs.split(' ')[1:(- 1)]
tokens = []
for (i, (wrd, iob)) in enumerate(zip(sent, iobs)):
if (wrd in '?!.,;-'):
continue
if (wrd == '&'):
wrd = 'AND'
if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))):
tokens.append(self.slot2id[('B-' + iob)])
tokens += [self.vocab_to_idx(v) for v in wrd]
if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))):
tokens.append(self.slot2id[('E-' + iob)])
if (i == (len(sent) - 1)):
tokens.append(self.eos_idx)
else:
tokens.append(self.vocab_to_idx(' '))
return tokens
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
elif (idx == self.eos_idx):
break
else:
vocabs.append(v)
return ''.join(vocabs)
@classmethod
def load_from_file(cls, vocab_file, slots_file):
with open(vocab_file, 'r') as f:
vocab_list = [line.strip('\r\n') for line in f]
org_slots = open(slots_file).read().split('\n')
slots = []
for slot in org_slots[1:]:
slots.append(('B-' + slot))
slots.append(('E-' + slot))
return cls(vocab_list, slots)
@property
def vocab_size(self):
return (len(self._vocab_list) + len(self.slots))
@property
def token_type(self):
return 'character-slot'
def vocab_to_idx(self, vocab):
return self._vocab2idx.get(vocab, self.unk_idx)
def idx_to_vocab(self, idx):
idx = int(idx)
if (idx < len(self._vocab_list)):
return self._vocab_list[idx]
else:
token = self.id2slot[idx]
if (token[0] == 'B'):
return (token + ' ')
elif (token[0] == 'E'):
return (' ' + token)
else:
raise ValueError('id2slot get:', token)
|
class SubwordTextEncoder(_BaseTextEncoder):
def __init__(self, spm):
if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)):
raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>')
self.spm = spm
def encode(self, s):
return self.spm.encode_as_ids(s)
def decode(self, idxs, ignore_repeat=False):
crop_idx = []
for (t, idx) in enumerate(idxs):
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
crop_idx.append(idx)
return self.spm.decode_ids(crop_idx)
@classmethod
def load_from_file(cls, filepath):
import sentencepiece as splib
spm = splib.SentencePieceProcessor()
spm.load(filepath)
spm.set_encode_extra_options(':eos')
return cls(spm)
@property
def vocab_size(self):
return len(self.spm)
@property
def token_type(self):
return 'subword'
|
class SubwordTextSlotEncoder(_BaseTextEncoder):
def __init__(self, spm, slots):
if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)):
raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>')
self.spm = spm
self.slots = slots
self.slot2id = {self.slots[i]: (i + len(self.spm)) for i in range(len(self.slots))}
self.id2slot = {(i + len(self.spm)): self.slots[i] for i in range(len(self.slots))}
def encode(self, s):
(sent, iobs) = s.strip().split('\t')
sent = sent.split(' ')[1:(- 1)]
iobs = iobs.split(' ')[1:(- 1)]
tokens = []
for (i, (wrd, iob)) in enumerate(zip(sent, iobs)):
if (wrd in '?!.,;-'):
continue
if (wrd == '&'):
wrd = 'AND'
if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))):
tokens.append(self.slot2id[('B-' + iob)])
tokens += self.spm.encode_as_ids(wrd)[:(- 1)]
if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))):
tokens.append(self.slot2id[('E-' + iob)])
if (tokens[(- 1)] != 1):
tokens.append(1)
return tokens
def decode(self, idxs, ignore_repeat=False):
crop_idx = []
for (t, idx) in enumerate(idxs):
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
crop_idx.append(idx)
sent = []
ret = []
for (i, x) in enumerate(crop_idx):
if (x >= len(self.spm)):
ret.append((self.spm.decode_ids(sent) + [self.id2slot[x]]))
else:
sent.append(x)
return ret
@classmethod
def load_from_file(cls, filepath, slots_file):
import sentencepiece as splib
spm = splib.SentencePieceProcessor()
spm.load(filepath)
spm.set_encode_extra_options(':eos')
org_slots = open(slots_file).read().split('\n')
slots = []
for slot in org_slots[1:]:
slots.append(('B-' + slot))
slots.append(('E-' + slot))
return cls(spm, slots)
@property
def vocab_size(self):
return (len(self.spm) + len(self.slots))
@property
def token_type(self):
return 'subword-slot'
|
class WordTextEncoder(CharacterTextEncoder):
def encode(self, s):
s = s.strip('\r\n ')
words = s.split(' ')
return ([self.vocab_to_idx(v) for v in words] + [self.eos_idx])
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
vocabs.append(v)
return ' '.join(vocabs)
@property
def token_type(self):
return 'word'
|
class BertTextEncoder(_BaseTextEncoder):
'Bert Tokenizer.\n\n https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/tokenization_bert.py\n '
def __init__(self, tokenizer):
self._tokenizer = tokenizer
self._tokenizer.pad_token = '<pad>'
self._tokenizer.eos_token = '<eos>'
self._tokenizer.unk_token = '<unk>'
def encode(self, s):
reduced_idx = []
for idx in self._tokenizer.encode(s):
try:
r_idx = (idx - BERT_FIRST_IDX)
assert (r_idx > 0)
reduced_idx.append(r_idx)
except:
reduced_idx.append(self.unk_idx)
reduced_idx.append(self.eos_idx)
return reduced_idx
def decode(self, idxs, ignore_repeat=False):
crop_idx = []
for (t, idx) in enumerate(idxs):
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
crop_idx.append((idx + BERT_FIRST_IDX))
return self._tokenizer.decode(crop_idx)
@property
def vocab_size(self):
return ((BERT_LAST_IDX - BERT_FIRST_IDX) + 1)
@property
def token_type(self):
return 'bert'
@classmethod
def load_from_file(cls, vocab_file):
from pytorch_transformers import BertTokenizer
return cls(BertTokenizer.from_pretrained(vocab_file))
@property
def pad_idx(self):
return 0
@property
def eos_idx(self):
return 1
@property
def unk_idx(self):
return 2
|
def load_text_encoder(mode, vocab_file, slots_file=None):
if (mode == 'character'):
return CharacterTextEncoder.load_from_file(vocab_file)
elif (mode == 'character-slot'):
return CharacterTextSlotEncoder.load_from_file(vocab_file, slots_file)
elif (mode == 'subword'):
return SubwordTextEncoder.load_from_file(vocab_file)
elif (mode == 'subword-slot'):
return SubwordTextSlotEncoder.load_from_file(vocab_file, slots_file)
elif (mode == 'word'):
return WordTextEncoder.load_from_file(vocab_file)
elif mode.startswith('bert-'):
return BertTextEncoder.load_from_file(mode)
else:
raise NotImplementedError('`{}` is not yet supported.'.format(mode))
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, rnn_layers, hidden_size, **kwargs):
super(Model, self).__init__()
self.use_rnn = (rnn_layers > 0)
if self.use_rnn:
self.rnn = nn.LSTM(input_dim, hidden_size, num_layers=rnn_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, output_class_num)
else:
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
features = features.float()
if self.use_rnn:
(hidden, _) = self.rnn(features)
predicted = self.linear(hidden)
else:
predicted = self.linear(features)
return predicted
|
def get_wav_paths(data_dirs):
wav_paths = find_files(data_dirs)
wav_dict = {}
for wav_path in wav_paths:
wav_name = splitext(basename(wav_path))[0]
start = wav_path.find('Session')
wav_path = wav_path[start:]
wav_dict[wav_name] = wav_path
return wav_dict
|
def preprocess(data_dirs, paths, out_path):
meta_data = []
for path in paths:
wav_paths = get_wav_paths(path_join(data_dirs, path, WAV_DIR_PATH))
label_dir = path_join(data_dirs, path, LABEL_DIR_PATH)
label_paths = list(os.listdir(label_dir))
label_paths = [label_path for label_path in label_paths if (splitext(label_path)[1] == '.txt')]
for label_path in label_paths:
with open(path_join(label_dir, label_path)) as f:
for line in f:
if (line[0] != '['):
continue
line = re.split('[\t\n]', line)
line = list(filter(None, line))
if (line[2] not in ['neu', 'hap', 'ang', 'sad', 'exc']):
continue
if (line[1] not in wav_paths):
continue
meta_data.append({'path': wav_paths[line[1]], 'label': line[2].replace('exc', 'hap'), 'speaker': re.split('_', basename(wav_paths[line[1]]))[0]})
data = {'labels': {'neu': 0, 'hap': 1, 'ang': 2, 'sad': 3}, 'meta_data': meta_data}
with open(out_path, 'w') as f:
json.dump(data, f)
|
def main(data_dir):
'Main function.'
paths = list(os.listdir(data_dir))
paths = [path for path in paths if (path[:7] == 'Session')]
paths.sort()
out_dir = os.path.join(data_dir, 'meta_data')
os.makedirs(out_dir, exist_ok=True)
for (i, path) in enumerate(paths):
os.makedirs(f'{out_dir}/{path}', exist_ok=True)
preprocess(data_dir, (paths[:i] + paths[(i + 1):]), path_join(f'{out_dir}/{path}', 'train_meta_data.json'))
preprocess(data_dir, [path], path_join(f'{out_dir}/{path}', 'test_meta_data.json'))
|
class IEMOCAPDataset(Dataset):
def __init__(self, data_dir, meta_path, pre_load=True):
self.data_dir = data_dir
self.pre_load = pre_load
with open(meta_path, 'r') as f:
self.data = json.load(f)
self.class_dict = self.data['labels']
self.idx2emotion = {value: key for (key, value) in self.class_dict.items()}
self.class_num = len(self.class_dict)
self.meta_data = self.data['meta_data']
(_, origin_sr) = torchaudio.load(path_join(self.data_dir, self.meta_data[0]['path']))
self.resampler = Resample(origin_sr, SAMPLE_RATE)
if self.pre_load:
self.wavs = self._load_all()
def _load_wav(self, path):
(wav, _) = torchaudio.load(path_join(self.data_dir, path))
wav = self.resampler(wav).squeeze(0)
return wav
def _load_all(self):
wavforms = []
for info in self.meta_data:
wav = self._load_wav(info['path'])
wavforms.append(wav)
return wavforms
def __getitem__(self, idx):
label = self.meta_data[idx]['label']
label = self.class_dict[label]
if self.pre_load:
wav = self.wavs[idx]
else:
wav = self._load_wav(self.meta_data[idx]['path'])
return (wav.numpy(), label, Path(self.meta_data[idx]['path']).stem)
def __len__(self):
return len(self.meta_data)
|
def collate_fn(samples):
return zip(*samples)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
DATA_ROOT = self.datarc['root']
meta_data = self.datarc['meta_data']
self.fold = (self.datarc.get('test_fold') or kwargs.get('downstream_variant'))
if (self.fold is None):
self.fold = 'fold1'
print(f'[Expert] - using the testing fold: "{self.fold}". Ps. Use -o config.downstream_expert.datarc.test_fold=fold2 to change test_fold in config.')
train_path = os.path.join(meta_data, self.fold.replace('fold', 'Session'), 'train_meta_data.json')
print(f'[Expert] - Training path: {train_path}')
test_path = os.path.join(meta_data, self.fold.replace('fold', 'Session'), 'test_meta_data.json')
print(f'[Expert] - Testing path: {test_path}')
dataset = IEMOCAPDataset(DATA_ROOT, train_path, self.datarc['pre_load'])
trainlen = int(((1 - self.datarc['valid_ratio']) * len(dataset)))
lengths = [trainlen, (len(dataset) - trainlen)]
torch.manual_seed(0)
(self.train_dataset, self.dev_dataset) = random_split(dataset, lengths)
self.test_dataset = IEMOCAPDataset(DATA_ROOT, test_path, self.datarc['pre_load'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(input_dim=self.modelrc['projector_dim'], output_dim=dataset.class_num, **model_conf)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.register_buffer('best_score', torch.zeros(1))
def get_downstream_name(self):
return self.fold.replace('fold', 'emotion')
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, filenames, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['loss'].append(loss.item())
records['filename'] += filenames
records['predict'] += [self.test_dataset.idx2emotion[idx] for idx in predicted_classid.cpu().tolist()]
records['truth'] += [self.test_dataset.idx2emotion[idx] for idx in labels.cpu().tolist()]
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ['acc', 'loss']:
values = records[key]
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(f'emotion-{self.fold}/{mode}-{key}', average, global_step=global_step)
with open((Path(self.expdir) / 'log.log'), 'a') as f:
if (key == 'acc'):
print(f'{mode} {key}: {average}')
f.write(f'''{mode} at step {global_step}: {average}
''')
if ((mode == 'dev') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
f.write(f'''New best on {mode} at step {global_step}: {average}
''')
save_names.append(f'{mode}-best.ckpt')
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_{self.fold}_predict.txt'), 'w') as file:
line = [f'''{f} {e}
''' for (f, e) in zip(records['filename'], records['predict'])]
file.writelines(line)
with open((Path(self.expdir) / f'{mode}_{self.fold}_truth.txt'), 'w') as file:
line = [f'''{f} {e}
''' for (f, e) in zip(records['filename'], records['truth'])]
file.writelines(line)
return save_names
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling\n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask=None):
'\n N: batch size, T: sequence length, H: Hidden dimension\n input:\n batch_rep : size (N, T, H)\n attention_weight:\n att_w : size (N, T, 1)\n return:\n utter_rep: size (N, H)\n '
att_logits = self.W(batch_rep).squeeze((- 1))
if (att_mask is not None):
att_logits = (att_mask + att_logits)
att_w = self.softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class CNNSelfAttention(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, padding, pooling, dropout, output_class_num, **kwargs):
super(CNNSelfAttention, self).__init__()
self.model_seq = nn.Sequential(nn.AvgPool1d(kernel_size, pooling, padding), nn.Dropout(p=dropout), nn.Conv1d(input_dim, hidden_dim, kernel_size, padding=padding), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(hidden_dim, hidden_dim, kernel_size, padding=padding), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(hidden_dim, hidden_dim, kernel_size, padding=padding))
self.pooling = SelfAttentionPooling(hidden_dim)
self.out_layer = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, output_class_num))
def forward(self, features, att_mask):
features = features.transpose(1, 2)
features = self.model_seq(features)
out = features.transpose(1, 2)
out = self.pooling(out, att_mask).squeeze((- 1))
predicted = self.out_layer(out)
return predicted
|
class FCN(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, padding, pooling, dropout, output_class_num, **kwargs):
super(FCN, self).__init__()
self.model_seq = nn.Sequential(nn.Conv1d(input_dim, 96, 11, stride=4, padding=5), nn.LocalResponseNorm(96), nn.ReLU(), nn.MaxPool1d(3, 2), nn.Dropout(p=dropout), nn.Conv1d(96, 256, 5, padding=2), nn.LocalResponseNorm(256), nn.ReLU(), nn.MaxPool1d(3, 2), nn.Dropout(p=dropout), nn.Conv1d(256, 384, 3, padding=1), nn.LocalResponseNorm(384), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(384, 384, 3, padding=1), nn.LocalResponseNorm(384), nn.ReLU(), nn.Conv1d(384, 256, 3, padding=1), nn.LocalResponseNorm(256), nn.MaxPool1d(3, 2))
self.pooling = SelfAttentionPooling(256)
self.out_layer = nn.Sequential(nn.Linear(256, 256), nn.ReLU(), nn.Linear(256, output_class_num))
def forward(self, features, att_mask):
features = features.transpose(1, 2)
features = self.model_seq(features)
out = features.transpose(1, 2)
out = self.pooling(out).squeeze((- 1))
predicted = self.out_layer(out)
return predicted
|
class DeepNet(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, padding, pooling, dropout, output_class_num, **kwargs):
super(DeepNet, self).__init__()
self.model_seq = nn.Sequential(nn.Conv1d(input_dim, 10, 9), nn.ReLU(), nn.Conv1d(10, 10, 5), nn.ReLU(), nn.Conv1d(10, 10, 3), nn.MaxPool1d(3, 1), nn.BatchNorm1d(10, affine=False), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(10, 40, 3), nn.ReLU(), nn.Conv1d(40, 40, 3), nn.MaxPool1d(2, 1), nn.BatchNorm1d(40, affine=False), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(40, 80, 10), nn.ReLU(), nn.Conv1d(80, 80, 1), nn.MaxPool1d(2, 1), nn.BatchNorm1d(80, affine=False), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(80, 80, 1))
self.pooling = SelfAttentionPooling(80)
self.out_layer = nn.Sequential(nn.Linear(80, 30), nn.ReLU(), nn.Linear(30, output_class_num))
def forward(self, features, att_mask):
features = features.transpose(1, 2)
features = self.model_seq(features)
out = features.transpose(1, 2)
out = self.pooling(out).squeeze((- 1))
predicted = self.out_layer(out)
return predicted
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.