code stringlengths 17 6.64M |
|---|
class DeepModel(nn.Module):
def __init__(self, input_dim, output_dim, model_type, pooling, **kwargs):
super(DeepModel, self).__init__()
self.pooling = pooling
self.model = eval(model_type)(input_dim=input_dim, output_class_num=output_dim, pooling=pooling, **kwargs)
def forward(self, features, features_len):
attention_mask = [torch.ones(math.ceil((l / self.pooling))) for l in features_len]
attention_mask = pad_sequence(attention_mask, batch_first=True)
attention_mask = ((1.0 - attention_mask) * (- 100000.0))
attention_mask = attention_mask.to(features.device)
predicted = self.model(features, attention_mask)
return (predicted, None)
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
"\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n LibriMix offeres different options for the users. For\n clean source separation, src=['mix_clean'] and tgt=['s1', 's2'].\n Please see https://github.com/JorisCos/LibriMix for details\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n "
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 1))
cond_list = ['s1', 's2', 'noise', 'mix_clean', 'mix_both', 'mix_single', 'noisy', 'clean']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.nn.functional.sigmoid, 'tanh': torch.nn.functional.tanh}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def main():
output_dir = '{}/wav{}/{}'.format(args.tgt_dir, args.sample_rate, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
if ((args.part == 'train') or (args.part == 'dev')):
dset = 'trainset_28spk_wav_16k'
elif (args.part == 'test'):
dset = 'testset_wav_16k'
for cond in ['clean', 'noisy']:
wav_dir = '{}/{}_{}'.format(args.src_dir, cond, dset)
filelist = [f for f in os.listdir(wav_dir) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
if (uttname.startswith('p226') or uttname.startswith('p287')):
if (args.part == 'train'):
continue
elif (args.part == 'dev'):
continue
wav_scp_file.write('{} {}/{}\n'.format(uttname, wav_dir, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['noisy'], tgt=['clean'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
'\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n '
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 1))
cond_list = ['noisy', 'clean']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh', 'none']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.sigmoid, 'tanh': torch.nn.functional.tanh, 'none': torch.nn.Identity()}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def main():
output_dir = '{}/wav{}/{}'.format(args.tgt_dir, args.sample_rate, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
if ((args.part == 'train') or (args.part == 'dev')):
dset = 'trainset_28spk_wav_16k'
elif (args.part == 'test'):
dset = 'testset_wav_16k'
for cond in ['clean', 'noisy']:
wav_dir = '{}/{}_{}'.format(args.src_dir, cond, dset)
filelist = [f for f in os.listdir(wav_dir) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
if (uttname.startswith('p226') or uttname.startswith('p287')):
if (args.part == 'train'):
continue
elif (args.part == 'dev'):
continue
wav_scp_file.write('{} {}/{}\n'.format(uttname, wav_dir, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
class RandomDataset(Dataset):
def __init__(self, **kwargs):
self.class_num = 48
def __getitem__(self, idx):
samples = random.randint((EXAMPLE_WAV_MIN_SEC * SAMPLE_RATE), (EXAMPLE_WAV_MAX_SEC * SAMPLE_RATE))
wav = torch.randn(samples)
label = random.randint(0, (self.class_num - 1))
return (wav, label)
def __len__(self):
return EXAMPLE_DATASET_SIZE
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n upstream_rate: int\n 160: for upstream with 10 ms per frame\n 320: for upstream with 20 ms per frame\n \n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/example/config.yaml\n\n expdir: string\n The expdir from command-line argument, you should save all results into\n this directory, like some logging files.\n\n **kwargs: dict\n All the arguments specified by the argparser in run_downstream.py\n and all the other fields in config.yaml, in case you need it.\n \n Note1. Feel free to add new argument for __init__ as long as it is\n a command-line argument or a config field. You can check the constructor\n code in downstream/runner.py\n "
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.train_dataset = RandomDataset(**self.datarc)
self.dev_dataset = RandomDataset(**self.datarc)
self.test_dataset = RandomDataset(**self.datarc)
self.connector = nn.Linear(upstream_dim, self.modelrc['input_dim'])
self.model = Model(output_class_num=self.train_dataset.class_num, **self.modelrc)
self.objective = nn.CrossEntropyLoss()
self.register_buffer('best_score', torch.zeros(1))
def get_dataloader(self, split, epoch: int=0):
"\n Args:\n split: string\n 'train'\n will always be called before the training loop\n\n 'dev', 'test', or more\n defined by the 'eval_dataloaders' field in your downstream config\n these will be called before the evaluation loops during the training loop\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
if (split == 'train'):
return self._get_train_dataloader(self.train_dataset, epoch)
elif (split == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (split == 'test'):
return self._get_eval_dataloader(self.test_dataset)
def _get_train_dataloader(self, dataset, epoch: int):
from s3prl.utility.data import get_ddp_sampler
sampler = get_ddp_sampler(dataset, epoch)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def forward(self, split, features, your_other_contents1, records, **kwargs):
"\n Args:\n split: string\n 'train'\n when the forward is inside the training loop\n\n 'dev', 'test' or more\n when the forward is inside the evaluation loop\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n your_other_contents1, ... :\n in the order defined by your dataloader (dataset + collate_fn)\n these are all in cpu, and you can move them to the same device\n as features\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records (also customized by you)\n\n Note1. downstream/runner.py will call self.log_records\n 1. every `log_step` during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n "
features = pad_sequence(features, batch_first=True)
features = self.connector(features)
predicted = self.model(features)
utterance_labels = your_other_contents1
labels = torch.LongTensor(utterance_labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['loss'].append(loss.item())
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
return loss
def log_records(self, split, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n split: string\n 'train':\n records and batchids contain contents for `log_step` batches\n `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n 'dev', 'test' or more:\n records and batchids contain contents for the entire evaluation dataset\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{your_task_name}/{split}-{key}' as key name to log your contents,\n preventing conflict with the logging of other tasks\n\n global_step:\n The global_step when training, which is helpful for Tensorboard logging\n\n batch_ids:\n The batches contained in records when enumerating over the dataloader\n\n total_batch_num:\n The total amount of batches in the dataloader\n \n Return:\n a list of string\n Each string is a filename we wish to use to save the current model\n according to the evaluation result, like the best.ckpt on the dev set\n You can return nothing or an empty list when no need to save the checkpoint\n "
save_names = []
for (key, values) in records.items():
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(f'example/{split}-{key}', average, global_step=global_step)
if ((split == 'dev') and (key == 'acc') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
save_names.append(f'{split}-best.ckpt')
return save_names
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
pooled = features.mean(dim=1)
predicted = self.linear(pooled)
return predicted
|
class FluentCommandsDataset(Dataset):
def __init__(self, df, base_path, Sy_intent):
self.df = df
self.base_path = base_path
self.max_length = (SAMPLE_RATE * EXAMPLE_WAV_MAX_SEC)
self.Sy_intent = Sy_intent
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
wav_path = os.path.join(self.base_path, self.df.loc[idx].path)
(wav, sr) = torchaudio.load(wav_path)
wav = wav.squeeze(0)
label = []
for slot in ['action', 'object', 'location']:
value = self.df.loc[idx][slot]
label.append(self.Sy_intent[slot][value])
return (wav.numpy(), np.array(label), Path(wav_path).stem)
def collate_fn(self, samples):
return zip(*samples)
|
def get_downstream_model(input_dim, output_dim, config):
model_cls = eval(config['select'])
model_conf = config.get(config['select'], {})
model = model_cls(input_dim, output_dim, **model_conf)
return model
|
class FrameLevel(nn.Module):
def __init__(self, input_dim, output_dim, hiddens=None, activation='ReLU', **kwargs):
super().__init__()
latest_dim = input_dim
self.hiddens = []
if (hiddens is not None):
for dim in hiddens:
self.hiddens += [nn.Linear(latest_dim, dim), getattr(nn, activation)()]
latest_dim = dim
self.hiddens = nn.Sequential(*self.hiddens)
self.linear = nn.Linear(latest_dim, output_dim)
def forward(self, hidden_state, features_len=None):
hidden_state = self.hiddens(hidden_state)
logit = self.linear(hidden_state)
return (logit, features_len)
|
class UtteranceLevel(nn.Module):
def __init__(self, input_dim, output_dim, pooling='MeanPooling', activation='ReLU', pre_net=None, post_net={'select': 'FrameLevel'}, **kwargs):
super().__init__()
latest_dim = input_dim
self.pre_net = (get_downstream_model(latest_dim, latest_dim, pre_net) if isinstance(pre_net, dict) else None)
self.pooling = eval(pooling)(input_dim=latest_dim, activation=activation)
self.post_net = get_downstream_model(latest_dim, output_dim, post_net)
def forward(self, hidden_state, features_len=None):
if (self.pre_net is not None):
(hidden_state, features_len) = self.pre_net(hidden_state, features_len)
(pooled, features_len) = self.pooling(hidden_state, features_len)
(logit, features_len) = self.post_net(pooled, features_len)
return (logit, features_len)
|
class MeanPooling(nn.Module):
def __init__(self, **kwargs):
super(MeanPooling, self).__init__()
def forward(self, feature_BxTxH, features_len, **kwargs):
' \n Arguments\n feature_BxTxH - [BxTxH] Acoustic feature with shape \n features_len - [B] of feature length\n '
agg_vec_list = []
for i in range(len(feature_BxTxH)):
agg_vec = torch.mean(feature_BxTxH[i][:features_len[i]], dim=0)
agg_vec_list.append(agg_vec)
return (torch.stack(agg_vec_list), torch.ones(len(feature_BxTxH)).long())
|
class AttentivePooling(nn.Module):
' Attentive Pooling module incoporate attention mask'
def __init__(self, input_dim, activation, **kwargs):
super(AttentivePooling, self).__init__()
self.sap_layer = AttentivePoolingModule(input_dim, activation)
def forward(self, feature_BxTxH, features_len):
' \n Arguments\n feature_BxTxH - [BxTxH] Acoustic feature with shape \n features_len - [B] of feature length\n '
device = feature_BxTxH.device
len_masks = torch.lt(torch.arange(features_len.max()).unsqueeze(0).to(device), features_len.unsqueeze(1))
(sap_vec, _) = self.sap_layer(feature_BxTxH, len_masks)
return (sap_vec, torch.ones(len(feature_BxTxH)).long())
|
class AttentivePoolingModule(nn.Module):
'\n Implementation of Attentive Pooling \n '
def __init__(self, input_dim, activation='ReLU', **kwargs):
super(AttentivePoolingModule, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = getattr(nn, activation)()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
'\n input:\n batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension\n \n attention_weight:\n att_w : size (B, T, 1)\n \n return:\n utter_rep: size (B, H)\n '
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze((- 1))
att_logits = (att_mask + att_logits)
att_w = self.softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return (utter_rep, att_w)
|
class VCC18SegmentalDataset(Dataset):
def __init__(self, dataframe, base_path, idtable='', valid=False):
self.base_path = Path(base_path)
self.dataframe = dataframe
self.segments_durations = 1
if Path.is_file(idtable):
self.idtable = torch.load(idtable)
for (i, judge_i) in enumerate(self.dataframe['JUDGE']):
self.dataframe['JUDGE'][i] = self.idtable[judge_i]
elif (not valid):
self.gen_idtable(idtable)
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
(wav_name, mean, mos, judge_id) = self.dataframe.loc[idx]
wav_path = ((self.base_path / 'Converted_speech_of_submitted_systems') / wav_name)
(wav, _) = apply_effects_file(str(wav_path), [['channels', '1'], ['rate', '16000'], ['norm']])
wav = wav.view((- 1))
wav_segments = unfold_segments(wav, self.segments_durations)
system_name = (wav_name[:3] + wav_name[(- 8):(- 4)])
return (wav_segments, mean, system_name, mos, judge_id)
def collate_fn(self, samples):
(wavs_segments, means, system_names, moss, judge_ids) = zip(*samples)
flattened_wavs_segments = [wav_segment for wav_segments in wavs_segments for wav_segment in wav_segments]
wav_segments_lengths = [len(wav_segments) for wav_segments in wavs_segments]
prefix_sums = list(accumulate(wav_segments_lengths, initial=0))
segment_judge_ids = []
for i in range((len(prefix_sums) - 1)):
segment_judge_ids.extend(([judge_ids[i]] * (prefix_sums[(i + 1)] - prefix_sums[i])))
return (torch.stack(flattened_wavs_segments), prefix_sums, torch.FloatTensor(means), system_names, torch.FloatTensor(moss), torch.LongTensor(segment_judge_ids))
def gen_idtable(self, idtable_path):
if (idtable_path == ''):
idtable_path = './idtable.pkl'
self.idtable = {}
count = 0
for (i, judge_i) in enumerate(self.dataframe['JUDGE']):
if (judge_i not in self.idtable.keys()):
self.idtable[judge_i] = count
count += 1
self.dataframe['JUDGE'][i] = self.idtable[judge_i]
else:
self.dataframe['JUDGE'][i] = self.idtable[judge_i]
torch.save(self.idtable, idtable_path)
|
class VCC16SegmentalDataset(Dataset):
def __init__(self, wav_list, base_path):
self.wav_dir = Path(base_path)
self.wav_list = wav_list
self.segments_durations = 1
def __len__(self):
return len(self.wav_list)
def __getitem__(self, idx):
wav_name = self.wav_list[idx]
wav_path = (self.wav_dir / wav_name)
(wav, _) = apply_effects_file(str(wav_path), [['channels', '1'], ['rate', '16000'], ['norm']])
wav = wav.view((- 1))
wav_segments = unfold_segments(wav, self.segments_durations)
system_name = wav_name.name.split('_')[0]
return (wav_segments, system_name)
def collate_fn(self, samples):
(wavs_segments, system_names) = zip(*samples)
flattened_wavs_segments = [wav_segment for wav_segments in wavs_segments for wav_segment in wav_segments]
wav_segments_lengths = [len(wav_segments) for wav_segments in wavs_segments]
prefix_sums = list(accumulate(wav_segments_lengths, initial=0))
return (torch.stack(flattened_wavs_segments), prefix_sums, None, system_names, None, None)
|
def unfold_segments(tensor, tgt_duration, sample_rate=16000):
seg_lengths = int((tgt_duration * sample_rate))
src_lengths = len(tensor)
step = (seg_lengths // 2)
tgt_lengths = (seg_lengths if (src_lengths <= seg_lengths) else (((src_lengths // step) + 1) * step))
pad_lengths = (tgt_lengths - src_lengths)
padded_tensor = torch.cat([tensor, torch.zeros(pad_lengths)])
segments = padded_tensor.unfold(0, seg_lengths, step).unbind(0)
return segments
|
class DownstreamExpert(nn.Module):
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
idtable = (Path(kwargs['expdir']) / 'idtable.pkl')
self.train_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'train_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable)
self.dev_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'valid_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable, valid=False)
self.vcc2018_test_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'test_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable, valid=False)
self.vcc2018_system_mos = pd.read_csv(Path(self.datarc['vcc2018_file_path'], 'VCC2018_Results/system_mos_all_trackwise.csv'))
self.vcc2016_test_dataset = VCC16SegmentalDataset(list(Path.iterdir(Path(self.datarc['vcc2016_file_path'], 'unified_speech'))), Path(self.datarc['vcc2016_file_path'], 'unified_speech'))
self.vcc2016_system_mos = pd.read_csv(Path(self.datarc['vcc2016_file_path'], 'system_mos.csv'), index_col=False)
self.connector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = Model(input_dim=self.modelrc['projector_dim'], clipping=(self.modelrc['clipping'] if ('clipping' in self.modelrc) else False), attention_pooling=(self.modelrc['attention_pooling'] if ('attention_pooling' in self.modelrc) else False), num_judges=5000)
self.objective = nn.MSELoss()
self.segment_weight = self.modelrc['segment_weight']
self.bias_weight = self.modelrc['bias_weight']
self.best_scores = {'dev_loss': np.inf, 'dev_LCC': (- np.inf), 'dev_SRCC': (- np.inf), 'vcc2016_test_LCC': (- np.inf), 'vcc2016_test_SRCC': (- np.inf)}
def get_dataloader(self, mode):
if (mode == 'train'):
return self._get_train_dataloader(self.train_dataset)
elif (mode == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (mode == 'vcc2018_test'):
return self._get_eval_dataloader(self.vcc2018_test_dataset)
elif (mode == 'vcc2016_test'):
return self._get_eval_dataloader(self.vcc2016_test_dataset)
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def forward(self, mode, features, prefix_sums, means, system_names, moses, judge_ids, records, **kwargs):
features = torch.stack(features)
features = self.connector(features)
uttr_scores = []
bias_scores = []
if (mode == 'train'):
means = means.to(features.device)
judge_ids = judge_ids.to(features.device)
moses = moses.to(features.device)
(segments_scores, segments_bias_scores) = self.model(features, judge_ids=judge_ids)
segments_loss = 0
uttr_loss = 0
bias_loss = 0
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
current_bias_scores = segments_bias_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
bias_score = current_bias_scores.mean(dim=(- 1))
bias_scores.append(bias_score.detach().cpu())
segments_loss += self.objective(current_segment_scores, means[i])
uttr_loss += self.objective(uttr_score, means[i])
bias_loss += self.objective(bias_score, moses[i])
segments_loss /= (len(prefix_sums) - 1)
uttr_loss /= (len(prefix_sums) - 1)
bias_loss /= (len(prefix_sums) - 1)
loss = (((self.segment_weight * segments_loss) + (self.bias_weight * bias_loss)) + uttr_loss)
records['segment loss'].append(segments_loss.item())
records['utterance loss'].append(uttr_loss.item())
records['bias loss'].append(bias_loss.item())
records['total loss'].append(loss.item())
records['pred_scores'] += uttr_scores
records['true_scores'] += means.detach().cpu().tolist()
if ((mode == 'dev') or (mode == 'vcc2018_test')):
means = means.to(features.device)
segments_scores = self.model(features)
segments_loss = 0
uttr_loss = 0
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
segments_loss += self.objective(current_segment_scores, means[i])
uttr_loss += self.objective(uttr_score, means[i])
segments_loss /= (len(prefix_sums) - 1)
uttr_loss /= (len(prefix_sums) - 1)
loss = (segments_loss + uttr_loss)
records['total loss'].append(loss.item())
records['pred_scores'] += uttr_scores
records['true_scores'] += means.detach().cpu().tolist()
if (mode == 'vcc2016_test'):
segments_scores = self.model(features)
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
if (len(records['system']) == 0):
records['system'].append(defaultdict(list))
for i in range(len(system_names)):
records['system'][0][system_names[i]].append(uttr_scores[i].tolist())
if (mode == 'train'):
return loss
return 0
def log_records(self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
save_names = []
if (mode == 'train'):
avg_uttr_loss = torch.FloatTensor(records['utterance loss']).mean().item()
avg_frame_loss = torch.FloatTensor(records['segment loss']).mean().item()
avg_bias_loss = torch.FloatTensor(records['bias loss']).mean().item()
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-utterance loss', avg_uttr_loss, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-segment loss', avg_frame_loss, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-bias loss', avg_bias_loss, global_step=global_step)
if ((mode == 'train') or (mode == 'dev')):
avg_total_loss = torch.FloatTensor(records['total loss']).mean().item()
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-total loss', avg_total_loss, global_step=global_step)
if ((mode == 'dev') or (mode == 'vcc2018_test')):
all_pred_scores = records['pred_scores']
all_true_scores = records['true_scores']
all_pred_scores = np.array(all_pred_scores)
all_true_scores = np.array(all_true_scores)
MSE = np.mean(((all_true_scores - all_pred_scores) ** 2))
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level MSE', MSE, global_step=global_step)
(pearson_rho, _) = pearsonr(all_true_scores, all_pred_scores)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level LCC', pearson_rho, global_step=global_step)
(spearman_rho, _) = spearmanr(all_true_scores.T, all_pred_scores.T)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level SRCC', spearman_rho, global_step=global_step)
tqdm.write(f'[{mode}] Utterance-level MSE = {MSE:.4f}')
tqdm.write(f'[{mode}] Utterance-level LCC = {pearson_rho:.4f}')
tqdm.write(f'[{mode}] Utterance-level SRCC = {spearman_rho:.4f}')
if ((mode == 'dev') or (mode == 'vcc2018_test')):
system_level_mos = self.vcc2018_system_mos
if (mode == 'vcc2016_test'):
system_level_mos = self.vcc2016_system_mos
if ((mode == 'dev') or (mode == 'vcc2018_test') or (mode == 'vcc2016_test')):
all_system_pred_scores = []
all_system_true_scores = []
for (key, values) in records['system'][0].items():
all_system_pred_scores.append(np.mean(values))
all_system_true_scores.append(system_level_mos[key].iloc[0])
all_system_pred_scores = np.array(all_system_pred_scores)
all_system_true_scores = np.array(all_system_true_scores)
MSE = np.mean(((all_system_true_scores - all_system_pred_scores) ** 2))
(pearson_rho, _) = pearsonr(all_system_true_scores, all_system_pred_scores)
(spearman_rho, _) = spearmanr(all_system_true_scores, all_system_pred_scores)
tqdm.write(f'[{mode}] System-level MSE = {MSE:.4f}')
tqdm.write(f'[{mode}] System-level LCC = {pearson_rho:.4f}')
tqdm.write(f'[{mode}] System-level SRCC = {spearman_rho:.4f}')
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level MSE', MSE, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level LCC', pearson_rho, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level SRCC', spearman_rho, global_step=global_step)
if (mode == 'dev'):
if (avg_total_loss < self.best_scores['dev_loss']):
self.best_scores[mode] = avg_total_loss
save_names.append(f'{mode}-best.ckpt')
if (pearson_rho > self.best_scores['dev_LCC']):
self.best_scores['dev_LCC'] = pearson_rho
save_names.append(f'{mode}-LCC-best.ckpt')
if (spearman_rho > self.best_scores['dev_SRCC']):
self.best_scores['dev_SRCC'] = spearman_rho
save_names.append(f'{mode}-SRCC-best.ckpt')
if (mode == 'vcc2016_test'):
if (pearson_rho > self.best_scores['vcc2016_test_LCC']):
self.best_scores['vcc2016_test_LCC'] = pearson_rho
save_names.append(f'{mode}-LCC-best.ckpt')
if (spearman_rho > self.best_scores['vcc2016_test_SRCC']):
self.best_scores['vcc2016_test_SRCC'] = spearman_rho
save_names.append(f'{mode}-SRCC-best.ckpt')
return save_names
|
def preprocess(base_path, txt_file):
dataframe = pd.read_csv(Path(base_path, txt_file), index_col=False)
return dataframe
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling\n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n\n attention_weight:\n att_w : size (N, T, 1)\n\n return:\n utter_rep: size (N, H)\n '
softmax = nn.functional.softmax
att_w = softmax(self.W(batch_rep).squeeze((- 1))).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class Model(nn.Module):
def __init__(self, input_dim, clipping=False, attention_pooling=False, num_judges=5000, **kwargs):
super(Model, self).__init__()
self.mean_net_linear = nn.Linear(input_dim, 1)
self.mean_net_clipping = clipping
self.mean_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.bias_net_linear = nn.Linear(input_dim, 1)
self.bias_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.judge_embbeding = nn.Embedding(num_embeddings=num_judges, embedding_dim=input_dim)
def forward(self, features, judge_ids=None):
if (self.mean_net_pooling is not None):
x = self.mean_net_pooling(features)
segment_score = self.mean_net_linear(x)
else:
x = self.mean_net_linear(features)
segment_score = x.squeeze((- 1)).mean(dim=(- 1))
if self.mean_net_clipping:
segment_score = ((torch.tanh(segment_score) * 2) + 3)
if (judge_ids is None):
return segment_score.squeeze((- 1))
else:
time = features.shape[1]
judge_features = self.judge_embbeding(judge_ids)
judge_features = torch.stack([judge_features for i in range(time)], dim=1)
bias_features = (features + judge_features)
if (self.bias_net_pooling is not None):
y = self.bias_net_pooling(bias_features)
bias_score = self.bias_net_linear(y)
else:
y = self.bias_net_linear(bias_features)
bias_score = y.squeeze((- 1)).mean(dim=(- 1))
bias_score = (bias_score + segment_score)
return (segment_score.squeeze((- 1)), bias_score.squeeze((- 1)))
|
class MOSEIDataset(Dataset):
def __init__(self, split, data, path):
self.split = split
self.data = data
self.path = path
def __getitem__(self, idx):
wav_path = os.path.join(self.path, 'Segmented_Audio', self.split, self.data[idx][0])
(wav, sr) = torchaudio.load(wav_path)
label = self.data[idx][1]
return (wav.view((- 1)), torch.tensor(label).long())
def __len__(self):
return len(self.data)
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/downstream/example/config.yaml\n\n **kwargs: dict\n The arguments specified by the argparser in run_downstream.py\n in case you need it.\n "
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
(self.train_data, self.dev_data, self.test_data) = ([], [], [])
df = pd.read_csv(self.datarc['label_path'], encoding='latin-1')
for row in df.itertuples():
filename = (((row.file + '_') + str(row.index)) + '.wav')
if (self.datarc['num_class'] == 2):
label = row.label2a
elif (self.datarc['num_class'] == 3):
label = (row.label2b + 1)
elif (self.datarc['num_class'] == 6):
label = row.label6
elif (self.datarc['num_class'] == 7):
label = (row.label7 + 3)
else:
raise ValueError('Unsupported num_class')
if (row.split == 0):
self.train_data.append((filename, label))
elif (row.split == 1):
self.dev_data.append((filename, label))
elif (row.split == 2):
self.test_data.append((filename, label))
self.train_dataset = MOSEIDataset('train', self.train_data, self.datarc['data_dir'])
self.dev_dataset = MOSEIDataset('dev', self.dev_data, self.datarc['data_dir'])
self.test_dataset = MOSEIDataset('test', self.test_data, self.datarc['data_dir'])
self.connector = nn.Linear(upstream_dim, self.modelrc['input_dim'])
self.model = Model(output_class_num=self.datarc['num_class'], **self.modelrc)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.logging = os.path.join(self.expdir, 'log.log')
self.best = defaultdict((lambda : 0))
self.answer = []
def _get_train_dataloader(self, dataset, epoch: int):
from s3prl.utility.data import get_ddp_sampler
sampler = get_ddp_sampler(dataset, epoch)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
'\n Datalaoder Specs:\n Each dataloader should output a list in the following format:\n\n [[wav1, wav2, ...], your_other_contents1, your_other_contents2, ...]\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio without any preprocessing\n '
def get_train_dataloader(self, epoch: int):
return self._get_train_dataloader(self.train_dataset, epoch)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode, epoch: int=0):
if (mode == 'train'):
return eval(f'self.get_{mode}_dataloader')(epoch)
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, records, **kwargs):
"\n This function will be used in both train/dev/test, you can use\n self.training (bool) to control the different behavior for\n training or evaluation (dev/test)\n\n Args:\n mode: str\n 'train' or 'dev' or 'test'\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n records:\n defaultdict(list), by dumping contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records\n\n Note1. downstream/runner.py will call self.log_records\n 1. every log_step during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. log_step is defined in your downstream config\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n "
features = pad_sequence(features, batch_first=True)
features = self.connector(features)
predicted = self.model(features)
utterance_labels = labels
labels = torch.LongTensor(utterance_labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['predicted'] += predicted_classid.cpu().float().tolist()
records['original'] += labels.cpu().float().tolist()
if (not self.training):
pass
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
"\n This function will be used in both train/dev/test, you can use\n self.training (bool) to control the different behavior for\n training or evaluation (dev/test)\n\n Args:\n mode: str\n 'train' or 'dev' or 'test'\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
prefix = f'mosei/{mode}-'
average = torch.FloatTensor(records['acc']).mean().item()
f1 = sklearn.metrics.f1_score(records['original'], records['predicted'], average='macro')
logger.add_scalar(f'{prefix}acc', average, global_step=global_step)
if (mode in ['dev', 'test']):
print(f'{prefix}acc: {average}')
message = f'''{mode} at step {global_step}: {average} (acc), {f1} (f1)
'''
save_ckpt = []
if (average > self.best[prefix]):
self.best[prefix] = average
message = f'New best on {message}'
name = prefix.split('/')[(- 1)].split('-')[0]
save_ckpt.append(f'{name}-best.ckpt')
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
line = [f'''{f}
''' for f in records['predicted']]
file.writelines(line)
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
line = [f'''{f}
''' for f in records['original']]
file.writelines(line)
with open(self.logging, 'a') as f:
f.write(message)
if (not self.training):
pass
return save_ckpt
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
pooled = features.mean(dim=1)
predicted = self.linear(pooled)
return predicted
|
def label2a(a):
if (a < 0):
return 0
return 1
|
def label2b(a):
if (a < 0):
return (- 1)
if (a > 0):
return 1
return 0
|
def label7(a):
if (a < (- 2)):
return (- 3)
if (a < (- 1)):
return (- 2)
if (a < 0):
return (- 1)
if (a == 0):
return 0
if (a <= 1):
return 1
if (a <= 2):
return 2
return 3
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, hidden_size, dropout, **kwargs):
super(Model, self).__init__()
self.in_linear = nn.Linear(input_dim, hidden_size)
self.out_linear = nn.Linear(hidden_size, output_class_num)
self.drop = nn.Dropout(dropout)
self.act_fn = nn.functional.relu
def forward(self, features):
hidden = self.in_linear(features)
hidden = self.drop(hidden)
hidden = self.act_fn(hidden)
predicted = self.out_linear(hidden)
return predicted
|
class PhoneDataset(Dataset):
def __init__(self, split, bucket_size, libri_root, phone_path, bucket_file, sample_rate=16000, train_dev_seed=1337, **kwargs):
super(PhoneDataset, self).__init__()
self.libri_root = libri_root
self.phone_path = phone_path
self.sample_rate = sample_rate
self.class_num = 41
self.Y = {}
phone_file = open(os.path.join(phone_path, 'converted_aligned_phones.txt')).readlines()
for line in phone_file:
line = line.strip('\n').split(' ')
self.Y[line[0]] = [int(p) for p in line[1:]]
if ((split == 'train') or (split == 'dev')):
usage_list = open(os.path.join(phone_path, 'train_split.txt')).readlines()
random.seed(train_dev_seed)
random.shuffle(usage_list)
percent = int((len(usage_list) * 0.9))
usage_list = (usage_list[:percent] if (split == 'train') else usage_list[percent:])
elif (split == 'test'):
usage_list = open(os.path.join(phone_path, 'test_split.txt')).readlines()
else:
raise ValueError("Invalid 'split' argument for dataset: PhoneDataset!")
usage_list = {line.strip('\n'): None for line in usage_list}
print(((((('[Dataset] - # phone classes: ' + str(self.class_num)) + ', number of data for ') + split) + ': ') + str(len(usage_list))))
assert os.path.isdir(bucket_file), 'Please first run `preprocess/generate_len_for_bucket.py to get bucket file.'
table = pd.read_csv(os.path.join(bucket_file, 'train-clean-100.csv')).sort_values(by=['length'], ascending=False)
X = table['file_path'].tolist()
X_lens = table['length'].tolist()
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in zip(X, X_lens):
if (self._parse_x_name(x) in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
def _parse_x_name(self, x):
return x.split('/')[(- 1)].split('.')[0]
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.libri_root, wav_path))
return wav.view((- 1))
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file) for x_file in self.X[index]]
label_batch = [torch.LongTensor(self.Y[self._parse_x_name(x_file)]) for x_file in self.X[index]]
return (wav_batch, label_batch)
def collate_fn(self, items):
return (items[0][0], items[0][1])
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
predicted = self.linear(features)
return predicted
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class QUESST14Dataset(Dataset):
'QUESST 2014 dataset (English-only).'
def __init__(self, split, **kwargs):
dataset_root = Path(kwargs['dataset_root'])
doc_paths = english_audio_paths(dataset_root, 'language_key_utterances.lst')
query_paths = english_audio_paths(dataset_root, f'language_key_{split}.lst')
self.dataset_root = dataset_root
self.n_queries = len(query_paths)
self.n_docs = len(doc_paths)
self.data = (query_paths + doc_paths)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_path = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['gain', '-3.0']])
wav = wav.squeeze(0)
return (wav.numpy(), audio_path.with_suffix('').name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(wavs, audio_names) = zip(*samples)
return (wavs, audio_names)
|
def english_audio_paths(dataset_root_path, lst_name):
'Extract English audio paths.'
audio_paths = []
with open(((dataset_root_path / 'scoring') / lst_name)) as f:
for line in f:
(audio_path, lang) = tuple(line.strip().split())
if (lang != 'nnenglish'):
continue
audio_path = re.sub('^.*?\\/', '', audio_path)
audio_paths.append((dataset_root_path / audio_path))
return audio_paths
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim: int, downstream_expert: dict, expdir: str, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = Path(expdir)
self.train_dataset = QUESST14Trainset('dev', **self.datarc)
self.valid_dataset = QUESST14Trainset('eval', **self.datarc)
self.test_dataset = None
self.model = Model(input_dim=upstream_dim, **self.modelrc)
def get_dataloader(self, mode):
if (mode == 'train'):
return DataLoader(self.train_dataset, sampler=WeightedRandomSampler(weights=self.train_dataset.sample_weights, num_samples=len(self.train_dataset.sample_weights), replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.train_dataset.collate_fn)
if (mode == 'valid'):
return DataLoader(self.valid_dataset, sampler=WeightedRandomSampler(weights=self.valid_dataset.sample_weights, num_samples=self.datarc['valid_size'], replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.valid_dataset.collate_fn)
if (mode in ['dev', 'eval']):
self.test_dataset = QUESST14Testset(mode, **self.datarc)
return DataLoader(self.test_dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=self.test_dataset.collate_fn)
raise NotImplementedError
def forward(self, mode, features, infos, records, **kwargs):
if (mode in ['train', 'valid']):
features = torch.stack(features)
(prefix_sums, labels) = infos
labels = torch.cat(labels).to(features.device)
embs = self.model(features)
query_embs = embs[:self.datarc['batch_size']]
audio_embs = embs[self.datarc['batch_size']:]
max_similarities = torch.empty(len(labels)).to(labels.device)
for i in range(self.datarc['batch_size']):
similarities = F.cosine_similarity(query_embs[i:(i + 1)], audio_embs[prefix_sums[i]:prefix_sums[(i + 1)]])
max_similarities[i] = similarities.max()
pos_similarities = max_similarities[(labels > 0)]
neg_similarities = max_similarities[(labels < 0)]
pos_loss = (1 - pos_similarities).sum()
neg_loss = neg_similarities.clamp(0).sum()
loss = ((pos_loss + neg_loss) / self.datarc['batch_size'])
records['loss'].append(loss.item())
records['similarity-positive'] += pos_similarities.tolist()
records['similarity-negative'] += neg_similarities.tolist()
return loss
elif (mode in ['dev', 'eval']):
audio_tensors = torch.stack(features)
(prefix_sums, audio_names) = infos
embs = self.model(audio_tensors)
embs = embs.detach().cpu()
for i in range(len(audio_names)):
records['embs'].append(embs[prefix_sums[i]:prefix_sums[(i + 1)]])
records['audio_names'].append(audio_names[i])
else:
raise NotImplementedError
def log_records(self, mode, records, logger, global_step, **kwargs):
'Log training, validation information or test on a dataset.'
if (mode in ['train', 'valid']):
prefix = f'quesst14_embedding/{mode}'
for (key, val) in records.items():
average = (sum(val) / len(val))
logger.add_scalar(f'{prefix}-{key}', average, global_step=global_step)
elif (mode in ['dev', 'eval']):
query_embs = records['embs'][:self.test_dataset.n_queries]
doc_embs = records['embs'][self.test_dataset.n_queries:]
query_names = records['audio_names'][:self.test_dataset.n_queries]
doc_names = records['audio_names'][self.test_dataset.n_queries:]
results = {}
for (query_emb, query_name) in zip(tqdm(query_embs, desc='Query', ncols=0), query_names):
query_emb = query_emb[0:1].cuda()
scores = []
for (doc_emb, doc_name) in zip(tqdm(doc_embs, desc='Doc', ncols=0, leave=False), doc_names):
with torch.no_grad():
doc_emb = doc_emb.cuda()
similarities = F.cosine_similarity(query_emb, doc_emb)
score = similarities.max().detach().cpu()
scores.append(score)
scores = torch.stack(scores)
if (scores.std() < 0.1):
scores = torch.zeros_like(scores)
else:
scores = (((scores - scores.mean()) / (scores.std() + 1e-06)) + 0.5)
results[query_name] = list(zip(doc_names, scores.tolist()))
score_thresh = 0.5
root = etree.Element('stdlist', termlist_filename='benchmark.stdlist.xml', indexing_time='1.00', language='english', index_size='1', system_id='benchmark')
for (query_name, doc_scores) in results.items():
term_list = etree.SubElement(root, 'detected_termlist', termid=query_name, term_search_time='1.0', oov_term_count='1')
for (doc_name, score) in doc_scores:
etree.SubElement(term_list, 'term', file=doc_name, channel='1', tbeg='0.000', dur='0.00', score=f'{score:.4f}', decision=('YES' if (score > score_thresh) else 'NO'))
etree.ElementTree(root).write(str((self.expdir / 'benchmark.stdlist.xml')), encoding='UTF-8', pretty_print=True)
else:
raise NotImplementedError
|
class Model(nn.Module):
def __init__(self, input_dim, bottleneck_dim, hidden_dim, num_layers, **kwargs):
super(Model, self).__init__()
self.connector = nn.Linear(input_dim, bottleneck_dim)
self.rnn = nn.LSTM(input_size=bottleneck_dim, hidden_size=hidden_dim, num_layers=num_layers, batch_first=True)
self.attention_linear = nn.Linear(hidden_dim, 1)
def forward(self, features):
hiddens = F.relu(self.connector(features))
(lstm_outputs, _) = self.rnn(hiddens)
hiddens = torch.tanh(lstm_outputs)
attention_weights = F.softmax(self.attention_linear(hiddens), dim=1)
embeds = torch.sum((hiddens * attention_weights), dim=1)
return embeds
|
class QUESST14Testset(Dataset):
'QUESST 2014 testing dataset (English-only).'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
dataset_root = Path(kwargs['quesst2014_root'])
doc_paths = get_audio_paths(dataset_root, 'language_key_utterances.lst')
query_paths = get_audio_paths(dataset_root, f'language_key_{split}.lst')
self.dataset_root = dataset_root
self.n_queries = len(query_paths)
self.n_docs = len(doc_paths)
self.data = (query_paths + doc_paths)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_path = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['norm']])
wav = wav.squeeze(0)
src_len = len(wav)
tgt_len = (48000 if (src_len <= 48000) else (((src_len // 12000) + 1) * 12000))
wav = torch.cat([wav, torch.zeros((tgt_len - src_len))])
segments = wav.unfold(0, 48000, 12000).unbind(0)
return (segments, audio_path.with_suffix('').name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(segments, audio_names) = zip(*samples)
lengths = [len(segs) for segs in segments]
prefix_sums = list(accumulate(lengths, initial=0))
flattened = [seg for segs in segments for seg in segs]
return (flattened, (prefix_sums, audio_names))
|
def get_audio_paths(dataset_root_path, lst_name):
'Extract audio paths.'
audio_paths = []
with open(((dataset_root_path / 'scoring') / lst_name)) as f:
for line in f:
(audio_path, lang) = tuple(line.strip().split())
if (lang != 'nnenglish'):
continue
audio_path = re.sub('^.*?\\/', '', audio_path)
audio_paths.append((dataset_root_path / audio_path))
return audio_paths
|
class QUESST14Trainset(Dataset):
'QUESST 2014 training dataset.'
def __init__(self, split, **kwargs):
dataset_root = Path(kwargs['quesst2014_root'])
scoring_root = (dataset_root / 'scoring')
split_root = (scoring_root / f'groundtruth_quesst14_{split}')
query2positives = parse_rttm((split_root / f'quesst14_{split}.rttm'))
audio_names = parse_lst((scoring_root / 'language_key_utterances.lst'))
query_names = parse_lst((scoring_root / f'language_key_{split}.lst'))
print(f'[QUESST2014] # of audios: {len(audio_names)}')
print(f'[QUESST2014] # of queries: {len(query_names)}')
audio_set = set(audio_names)
query2negatives = {query_name: list((audio_set - set((query2positives[query_name] if (query_name in query2positives) else [])))) for query_name in query_names}
positive_pairs = [(query_name, audio_name) for query_name in query_names for audio_name in (set(query2positives[query_name]) & audio_set)]
negative_pairs = [(query_name, list(negative_audio_set)) for (query_name, negative_audio_set) in query2negatives.items()]
print(f'[QUESST2014] # of positive pairs: {len(positive_pairs)}')
print(f'[QUESST2014] # of negative pairs: {len(negative_pairs)}')
self.audio_root = (dataset_root / 'Audio')
self.query_root = (dataset_root / f'{split}_queries')
self.max_dur = 3.0
self.positive_pairs = positive_pairs
self.negative_pairs = negative_pairs
def __len__(self):
return (len(self.positive_pairs) + len(self.negative_pairs))
def __getitem__(self, idx):
if (idx < len(self.positive_pairs)):
(query_name, audio_name) = self.positive_pairs[idx]
else:
(query_name, audio_names) = self.negative_pairs[(idx - len(self.positive_pairs))]
audio_name = random.sample(audio_names, 1)[0]
query_path = (self.query_root / query_name).with_suffix('.wav')
audio_path = (self.audio_root / audio_name).with_suffix('.wav')
query_tensor = path2tensor(query_path)
audio_tensor = path2tensor(audio_path)
query_segment = crop_segment(query_tensor, self.max_dur)
audio_segments = unfold_segments(audio_tensor, self.max_dur)
label = torch.LongTensor([(1 if (idx < len(self.positive_pairs)) else (- 1))])
return (query_segment, audio_segments, label)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(query_segments, segments_list, labels) = zip(*samples)
flattened = [segment for segments in segments_list for segment in segments]
lengths = [len(segments) for segments in segments_list]
prefix_sums = list(accumulate(lengths, initial=0))
return ((list(query_segments) + flattened), (prefix_sums, labels))
@property
def sample_weights(self):
'Sample weights to balance positive and negative data.'
n_pos = len(self.positive_pairs)
n_neg = len(self.negative_pairs)
return (([(1 / n_pos)] * n_pos) + ([(1 / n_neg)] * n_neg))
|
def parse_rttm(rttm_path):
'Parse audio and query pairs from *.rttm.'
pattern = re.compile('LEXEME\\s+(quesst14_[0-9]+).*?(quesst14_(dev|eval)_[0-9]+)')
query2audios = defaultdict(list)
with open(rttm_path) as fd:
for line in fd:
match = pattern.match(line)
if (match is None):
continue
query2audios[match.group(2)].append(match.group(1))
return query2audios
|
def parse_lst(lst_path):
'Extract audio names of nnenglish.'
audio_names = []
with open(lst_path) as fd:
for line in fd:
(audio_path, lang) = tuple(line.strip().split())
if (lang != 'nnenglish'):
continue
audio_name = Path(audio_path).with_suffix('').name
audio_names.append(audio_name)
return audio_names
|
def path2tensor(filepath):
(tensor, _) = apply_effects_file(str(filepath), [['channels', '1'], ['rate', '16000'], ['norm']])
return tensor.squeeze(0)
|
def crop_segment(tensor, tgt_dur, sample_rate=16000):
src_dur = (len(tensor) / sample_rate)
random_shift = random.uniform(0, (src_dur - tgt_dur))
(audio_tensor, _) = apply_effects_tensor(tensor.unsqueeze(0), sample_rate, [['pad', f'{tgt_dur}', f'{tgt_dur}'], ['trim', f'{(tgt_dur + random_shift)}', f'{tgt_dur}']])
return audio_tensor.squeeze(0)
|
def unfold_segments(tensor, tgt_dur, sample_rate=16000):
seg_len = int((tgt_dur * sample_rate))
src_len = len(tensor)
hop_len = (seg_len // 4)
tgt_len = (seg_len if (src_len <= seg_len) else (((src_len // hop_len) + 1) * hop_len))
pad_len = (tgt_len - src_len)
front_pad_len = random.randint(0, pad_len)
tail_pad_len = (pad_len - front_pad_len)
padded_tensor = torch.cat([torch.zeros(front_pad_len), tensor, torch.zeros(tail_pad_len)])
segments = padded_tensor.unfold(0, seg_len, hop_len).unbind(0)
return segments
|
class ModelEntry():
def __init__(self, model, name, trainable, interfaces):
self.model = model
self.name = name
self.trainable = trainable
self.interfaces = interfaces
|
class Runner():
'\n Used to handle high-level concepts of a ML experiment\n eg. training loop, evaluation loop, upstream propagation, optimization, logging, checkpoint saving\n '
def __init__(self, args, config):
self.args = args
self.config = config
self.init_ckpt = (torch.load(self.args.init_ckpt, map_location='cpu') if self.args.init_ckpt else {})
self.upstream = self._get_upstream()
self.featurizer = self._get_featurizer()
self.downstream = self._get_downstream()
self.all_entries = [self.upstream, self.featurizer, self.downstream]
def _load_weight(self, model, name):
init_weight = self.init_ckpt.get(name)
if init_weight:
show(f'[Runner] - Loading {name} weights from the previous experiment')
model.load_state_dict(init_weight)
def _init_model(self, model, name, trainable, interfaces=None):
for interface in (interfaces or []):
assert hasattr(model, interface), interface
self._load_weight(model, name)
if (is_initialized() and trainable and any((p.requires_grad for p in model.parameters()))):
model = DDP(model, device_ids=[self.args.local_rank], find_unused_parameters=True)
for interface in (interfaces or []):
setattr(model, interface, getattr(model.module, interface))
return ModelEntry(model, name, trainable, interfaces)
def _get_upstream(self):
if (('from_hf_hub' in self.args) and (self.args.from_hf_hub == True)):
from huggingface_hub import snapshot_download
print(f'[Runner] - Downloading upstream model {self.args.upstream} from the Hugging Face Hub')
filepath = snapshot_download(self.args.upstream, self.args.upstream_revision, use_auth_token=True)
sys.path.append(filepath)
dependencies = (Path(filepath) / 'requirements.txt').resolve()
print('[Dependency] - The downloaded upstream model requires the following dependencies. Please make sure they are installed:')
for (idx, line) in enumerate((Path(filepath) / 'requirements.txt').open().readlines()):
print(f'{idx}. {line.strip()}')
print(f'You can install them by:')
print()
print(f'pip install -r {dependencies}')
print()
from expert import UpstreamExpert
Upstream = UpstreamExpert
ckpt_path = os.path.join(filepath, self.args.upstream_model_name)
else:
Upstream = getattr(hub, self.args.upstream)
ckpt_path = self.args.upstream_ckpt
upstream_refresh = self.args.upstream_refresh
if (is_initialized() and (get_rank() > 0)):
torch.distributed.barrier()
upstream_refresh = False
model = Upstream(ckpt=ckpt_path, model_config=self.args.upstream_model_config, refresh=upstream_refresh).to(self.args.device)
if (is_initialized() and (get_rank() == 0)):
torch.distributed.barrier()
return self._init_model(model=model, name='Upstream', trainable=self.args.upstream_trainable, interfaces=['get_downsample_rates'])
def _get_featurizer(self):
model = Featurizer(upstream=self.upstream.model, feature_selection=self.args.upstream_feature_selection, layer_selection=self.args.upstream_layer_selection, upstream_device=self.args.device, normalize=self.args.upstream_feature_normalize).to(self.args.device)
return self._init_model(model=model, name='Featurizer', trainable=True, interfaces=['output_dim', 'downsample_rate'])
def _get_downstream(self):
expert = importlib.import_module(f's3prl.downstream.{self.args.downstream}.expert')
Downstream = getattr(expert, 'DownstreamExpert')
model = Downstream(upstream_dim=self.featurizer.model.output_dim, upstream_rate=self.featurizer.model.downsample_rate, **self.config, **vars(self.args)).to(self.args.device)
return self._init_model(model=model, name='Downstream', trainable=True, interfaces=['get_dataloader', 'log_records'])
def _get_optimizer(self, model_params):
optimizer = get_optimizer(model_params, self.config['runner']['total_steps'], self.config['optimizer'])
self._load_weight(optimizer, 'Optimizer')
return optimizer
def _get_scheduler(self, optimizer):
scheduler = get_scheduler(optimizer, self.config['runner']['total_steps'], self.config['scheduler'])
self._load_weight(scheduler, 'Scheduler')
return scheduler
def _create_model_card(self, path):
model_card = MODEL_CARD_MARKDOWN.format(upstream_model=self.args.upstream)
with open(os.path.join(path, 'README.md'), 'w') as f:
f.write(model_card)
def train(self):
trainable_models = []
trainable_paras = []
for entry in self.all_entries:
if entry.trainable:
entry.model.train()
trainable_models.append(entry.model)
trainable_paras += list(entry.model.parameters())
else:
entry.model.eval()
amp = self.config['runner'].get('fp16', False)
if amp:
print('[Runner] - Enabled fp16 training')
scaler = torch.cuda.amp.GradScaler()
optimizer = self._get_optimizer(trainable_models)
scheduler = None
if self.config.get('scheduler'):
scheduler = self._get_scheduler(optimizer)
specaug = None
if self.config.get('specaug'):
from .specaug import SpecAug
specaug = SpecAug(**self.config['specaug'])
tqdm_file = (sys.stderr if is_leader_process() else open(os.devnull, 'w'))
pbar = tqdm(total=self.config['runner']['total_steps'], dynamic_ncols=True, desc='overall', file=tqdm_file)
init_step = self.init_ckpt.get('Step')
if init_step:
pbar.n = init_step
if is_leader_process():
logger = SummaryWriter(self.args.expdir)
batch_ids = []
backward_steps = 0
records = defaultdict(list)
epoch = self.init_ckpt.get('Epoch', 0)
train_split = self.config['runner'].get('train_dataloader', 'train')
while (pbar.n < pbar.total):
try:
dataloader = self.downstream.model.get_dataloader(train_split, epoch=epoch)
except TypeError as e:
if ("unexpected keyword argument 'epoch'" in str(e)):
dataloader = self.downstream.model.get_dataloader(train_split)
if (hasattr(dataloader, 'sampler') and isinstance(dataloader.sampler, DistributedSampler)):
dataloader.sampler.set_epoch(epoch)
else:
raise
for (batch_id, (wavs, *others)) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc='train', file=tqdm_file)):
try:
if (pbar.n >= pbar.total):
break
global_step = (pbar.n + 1)
wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs]
with torch.cuda.amp.autocast(enabled=amp):
if self.upstream.trainable:
features = self.upstream.model(wavs)
else:
with torch.no_grad():
features = self.upstream.model(wavs)
features = self.featurizer.model(wavs, features)
if specaug:
(features, _) = specaug(features)
loss = self.downstream.model(train_split, features, *others, records=records)
batch_ids.append(batch_id)
gradient_accumulate_steps = self.config['runner'].get('gradient_accumulate_steps')
loss = (loss / gradient_accumulate_steps)
if amp:
scaler.scale(loss).backward()
else:
loss.backward()
del loss
except RuntimeError as e:
if ('CUDA out of memory' in str(e)):
print(f'[Runner] - CUDA out of memory at step {global_step}')
if is_initialized():
raise
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
optimizer.zero_grad()
continue
else:
raise
backward_steps += 1
if ((backward_steps % gradient_accumulate_steps) > 0):
continue
if amp:
scaler.unscale_(optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(trainable_paras, self.config['runner']['gradient_clipping'])
if amp:
scaler.step(optimizer)
scaler.update()
elif math.isnan(grad_norm):
print(f'[Runner] - grad norm is NaN at step {global_step}')
else:
optimizer.step()
optimizer.zero_grad()
if scheduler:
scheduler.step()
if (not is_leader_process()):
batch_ids = []
records = defaultdict(list)
continue
if ((global_step % self.config['runner']['log_step']) == 0):
self.downstream.model.log_records(train_split, records=records, logger=logger, global_step=global_step, batch_ids=batch_ids, total_batch_num=len(dataloader))
batch_ids = []
records = defaultdict(list)
save_names = []
if ((global_step % self.config['runner']['eval_step']) == 0):
for split in self.config['runner']['eval_dataloaders']:
save_names += self.evaluate(split, logger, global_step)
if ((global_step % self.config['runner']['save_step']) == 0):
def check_ckpt_num(directory):
max_keep = self.config['runner']['max_keep']
ckpt_pths = glob.glob(f'{directory}/states-*.ckpt')
if (len(ckpt_pths) >= max_keep):
ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0])))
for ckpt_pth in ckpt_pths[:((len(ckpt_pths) - max_keep) + 1)]:
os.remove(ckpt_pth)
check_ckpt_num(self.args.expdir)
save_names.append(f'states-{global_step}.ckpt')
if (len(save_names) > 0):
all_states = {'Optimizer': optimizer.state_dict(), 'Step': global_step, 'Epoch': epoch, 'Args': self.args, 'Config': self.config}
for entry in self.all_entries:
if entry.trainable:
all_states[entry.name] = get_model_state(entry.model)
if scheduler:
all_states['Scheduler'] = scheduler.state_dict()
if is_initialized():
all_states['WorldSize'] = get_world_size()
save_paths = [os.path.join(self.args.expdir, name) for name in save_names]
tqdm.write(f'[Runner] - Save the checkpoint to:')
for (i, path) in enumerate(save_paths):
tqdm.write(f'{(i + 1)}. {path}')
torch.save(all_states, path)
pbar.update(1)
epoch += 1
pbar.close()
if self.args.push_to_hf_hub:
self.push_to_huggingface_hub()
if is_leader_process():
logger.close()
def evaluate(self, split=None, logger=None, global_step=0):
'evaluate function will always be called on a single process even during distributed training'
not_during_training = ((split is None) and (logger is None) and (global_step == 0))
if not_during_training:
split = self.args.evaluate_split
tempdir = tempfile.mkdtemp()
logger = SummaryWriter(tempdir)
random.seed(self.args.seed)
np.random.seed(self.args.seed)
torch.manual_seed(self.args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(self.args.seed)
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
trainings = []
for entry in self.all_entries:
trainings.append(entry.model.training)
entry.model.eval()
dataloader = self.downstream.model.get_dataloader(split)
evaluate_ratio = float(self.config['runner'].get('evaluate_ratio', 1))
evaluate_steps = round((len(dataloader) * evaluate_ratio))
batch_ids = []
records = defaultdict(list)
for (batch_id, (wavs, *others)) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc=split, total=evaluate_steps)):
if (batch_id > evaluate_steps):
break
wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs]
with torch.no_grad():
features = self.upstream.model(wavs)
features = self.featurizer.model(wavs, features)
self.downstream.model(split, features, *others, records=records, batch_id=batch_id)
batch_ids.append(batch_id)
save_names = self.downstream.model.log_records(split, records=records, logger=logger, global_step=global_step, batch_ids=batch_ids, total_batch_num=len(dataloader))
batch_ids = []
records = defaultdict(list)
if torch.cuda.is_available():
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
for (entry, training) in zip(self.all_entries, trainings):
if training:
entry.model.train()
if not_during_training:
logger.close()
shutil.rmtree(tempdir)
return ([] if (type(save_names) is not list) else save_names)
def inference(self):
filepath = Path(self.args.evaluate_split)
assert filepath.is_file(), filepath
filename = filepath.stem
if hasattr(self.downstream.model, 'load_audio'):
wav = self.downstream.model.load_audio(filepath)
else:
(wav, sr) = torchaudio.load(str(filepath))
assert (sr == SAMPLE_RATE), sr
wavs = [wav.view((- 1)).to(self.args.device)]
for entry in self.all_entries:
entry.model.eval()
with torch.no_grad():
features = self.upstream.model(wavs)
features = self.featurizer.model(wavs, features)
self.downstream.model.inference(features, [filename])
def push_to_huggingface_hub(self):
'Creates a downstream repository on the Hub and pushes training artifacts to it.'
if (self.args.hf_hub_org.lower() != 'none'):
organization = self.args.hf_hub_org
else:
organization = os.environ.get('HF_USERNAME')
huggingface_token = HfFolder.get_token()
print(f'[Runner] - Organisation to push fine-tuned model to: {organization}')
if (self.args.hub == 'huggingface'):
model_info = HfApi().model_info(self.args.upstream, token=huggingface_token)
downstream_model_id = model_info.sha
upstream_model_id = model_info.modelId.replace('/', '__')
else:
upstream_model_id = self.args.upstream.replace('/', '__')
downstream_model_id = str(uuid.uuid4())[:8]
repo_name = f'{upstream_model_id}__{downstream_model_id}'
repo_url = HfApi().create_repo(token=huggingface_token, name=repo_name, organization=organization, exist_ok=True, private=False)
print(f'[Runner] - Created Hub repo: {repo_url}')
HF_HUB_DIR = 'hf_hub'
REPO_ROOT_DIR = os.path.join(self.args.expdir, HF_HUB_DIR, repo_name)
REPO_TASK_DIR = os.path.join(REPO_ROOT_DIR, self.args.downstream, self.args.expname)
print(f'[Runner] - Cloning Hub repo to {REPO_ROOT_DIR}')
model_repo = Repository(local_dir=REPO_ROOT_DIR, clone_from=repo_url, use_auth_token=huggingface_token)
model_repo.git_pull()
shutil.copytree(self.args.expdir, REPO_TASK_DIR, dirs_exist_ok=True, ignore=shutil.ignore_patterns(HF_HUB_DIR))
checkpoints = list(Path(REPO_TASK_DIR).glob('*best*.ckpt'))
if (len(checkpoints) == 0):
print('[Runner] - Did not find a best checkpoint! Using the final checkpoint instead ...')
CKPT_PATH = os.path.join(REPO_TASK_DIR, f"states-{self.config['runner']['total_steps']}.ckpt")
elif (len(checkpoints) > 1):
print(f'[Runner] - More than one best checkpoint found! Using {checkpoints[0]} as default ...')
CKPT_PATH = checkpoints[0]
else:
print(f'[Runner] - Found best checkpoint {checkpoints[0]}!')
CKPT_PATH = checkpoints[0]
shutil.move(CKPT_PATH, os.path.join(REPO_TASK_DIR, 'model.ckpt'))
model_repo.lfs_track('*.ckpt')
self._create_model_card(REPO_ROOT_DIR)
print('[Runner] - Pushing model files to the Hub ...')
model_repo.push_to_hub()
print('[Runner] - Training run complete!')
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
"\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n LibriMix offeres different options for the users. For\n clean source separation, src=['mix_clean'] and tgt=['s1', 's2'].\n Please see https://github.com/JorisCos/LibriMix for details\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n "
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 2))
cond_list = ['s1', 's2', 'noise', 'mix_clean', 'mix_both', 'mix_single']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=None)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=None)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.nn.functional.sigmoid, 'tanh': torch.nn.functional.tanh}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!'.format(output_dir))
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
if (not os.path.exists('{}/{}'.format(wav_dir, cond))):
continue
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def get_utt2path(wav_scp_file):
utt2path = {}
with open(wav_scp_file, 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(utt, path) = line.split()
utt2path[utt] = path
return utt2path
|
def main():
random.seed(args.seed)
with open('{}/s1/utt2spk'.format(args.src_dir), 'r') as fh:
content = fh.readlines()
uttlist = []
for line in content:
line = line.strip('\n')
utt = line.split()[0]
uttlist.append(utt)
uttlist.sort()
num_utt_ori = len(uttlist)
random.shuffle(uttlist)
uttlist = uttlist[:args.sample]
uttlist.sort()
print('Selecting {} utts from {} utts'.format(len(uttlist), num_utt_ori))
for dset in ['mix_both', 'mix_clean', 'mix_single', 'noise', 's1', 's2']:
(src_dset, tgt_dset) = ('{}/{}'.format(args.src_dir, dset), '{}/{}'.format(args.tgt_dir, dset))
os.makedirs(tgt_dset)
with open('{}/utt2spk'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
with open('{}/spk2utt'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
utt2path = get_utt2path('{}/wav.scp'.format(src_dset))
with open('{}/wav.scp'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt2path[utt]))
return 0
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
"\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n LibriMix offeres different options for the users. For\n clean source separation, src=['mix_clean'] and tgt=['s1', 's2'].\n Please see https://github.com/JorisCos/LibriMix for details\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n "
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 2))
cond_list = ['s1', 's2', 'noise', 'mix_clean', 'mix_both', 'mix_single']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=None)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=None)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh', 'none']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.sigmoid, 'tanh': torch.nn.functional.tanh, 'none': torch.nn.Identity()}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!'.format(output_dir))
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
if (not os.path.exists('{}/{}'.format(wav_dir, cond))):
continue
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def get_utt2path(wav_scp_file):
utt2path = {}
with open(wav_scp_file, 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(utt, path) = line.split()
utt2path[utt] = path
return utt2path
|
def main():
random.seed(args.seed)
with open('{}/s1/utt2spk'.format(args.src_dir), 'r') as fh:
content = fh.readlines()
uttlist = []
for line in content:
line = line.strip('\n')
utt = line.split()[0]
uttlist.append(utt)
uttlist.sort()
num_utt_ori = len(uttlist)
random.shuffle(uttlist)
uttlist = uttlist[:args.sample]
uttlist.sort()
print('Selecting {} utts from {} utts'.format(len(uttlist), num_utt_ori))
for dset in ['mix_both', 'mix_clean', 'mix_single', 'noise', 's1', 's2']:
(src_dset, tgt_dset) = ('{}/{}'.format(args.src_dir, dset), '{}/{}'.format(args.tgt_dir, dset))
os.makedirs(tgt_dset)
with open('{}/utt2spk'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
with open('{}/spk2utt'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
utt2path = get_utt2path('{}/wav.scp'.format(src_dset))
with open('{}/wav.scp'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt2path[utt]))
return 0
|
class DownstreamExpert(SpeakerExpert):
'\n Basically the same as the speaker utterance expert, except handles the speaker frame-wise label\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, expdir, **kwargs)
def forward(self, mode, features, labels, records, **kwargs):
'\n Args:\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n labels:\n the frame-wise spekaer labels\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
lengths = torch.LongTensor([len(l) for l in features])
features = pad_sequence(features, batch_first=True)
labels = labels.unsqueeze((- 1)).expand(features.size(0), features.size(1)).to(features.device)
predicted = self.model(features)
class_num = predicted.size((- 1))
loss = self.objective(predicted.reshape((- 1), class_num), labels.reshape((- 1)))
predicted_classid = predicted.max(dim=(- 1)).indices
sames = (predicted_classid == labels)
for (s, l) in zip(sames, lengths):
records['acc'] += s[:l].tolist()
return loss
|
class SpeakerDataset(Dataset):
def __init__(self, split, bucket_size, libri_root, split_file, bucket_file, sample_rate=16000, train_dev_seed=1337, **kwargs):
self.libri_root = libri_root
self.split_file = split_file
self.sample_rate = sample_rate
assert os.path.isdir(bucket_file), 'Please first run `preprocess/generate_len_for_bucket.py to get bucket file.'
self.table = pd.read_csv(os.path.join(bucket_file, 'train-clean-100.csv')).sort_values(by=['length'], ascending=False)
X = self.table['file_path'].tolist()
X_lens = self.table['length'].tolist()
if (((split == 'train') or (split == 'dev')) and os.path.isfile(os.path.join(split_file, 'train_split.txt'))):
usage_list = open(os.path.join(split_file, 'train_split.txt')).readlines()
random.seed(train_dev_seed)
random.shuffle(usage_list)
percent = int((len(usage_list) * 0.9))
usage_list = (usage_list[:percent] if (split == 'train') else usage_list[percent:])
elif ((split == 'test') and os.path.isfile(os.path.join(split_file, 'test_split.txt'))):
usage_list = open(os.path.join(split_file, 'test_split.txt')).readlines()
else:
raise NotImplementedError('Invalid `split` argument!')
usage_list = {line.strip('\n'): None for line in usage_list}
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in zip(X, X_lens):
if (self._parse_x_name(x) in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
print('[Dataset] - Computing speaker class...')
speakers = self._get_all_speakers(X)
self.speaker2idx = self._compute_speaker2idx(speakers)
self.class_num = len(self.speaker2idx)
print(((((('[Dataset] - # possible speaker classes: ' + str(self.class_num)) + ', number of data for ') + split) + ': ') + str(len(usage_list))))
def _parse_x_name(self, x):
return x.split('/')[(- 1)].split('.')[0]
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.libri_root, wav_path))
return wav.view((- 1))
def _get_speaker_from_path(self, x):
return x.split('/')[(- 1)].split('.')[0].split('-')[0]
def _get_all_speakers(self, X):
speaker_set = {}
for x in X:
speaker = self._get_speaker_from_path(x)
if (speaker not in speaker_set):
speaker_set[speaker] = 0
else:
speaker_set[speaker] += 1
return speaker_set
def _compute_speaker2idx(self, speakers):
idx = 0
speaker2idx = {}
for speaker in sorted(speakers):
if ((speaker not in speaker2idx) and (speakers[speaker] > SPEAKER_THRESHOLD)):
speaker2idx[speaker] = idx
idx += 1
return speaker2idx
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file) for x_file in self.X[index]]
label_batch = torch.LongTensor([self.speaker2idx[self._get_speaker_from_path(x_file)] for x_file in self.X[index]])
return (wav_batch, label_batch)
def collate_fn(self, items):
return (items[0][0], items[0][1])
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.train_dataset = SpeakerDataset('train', self.datarc['train_batch_size'], **self.datarc)
self.dev_dataset = SpeakerDataset('dev', self.datarc['eval_batch_size'], **self.datarc)
self.test_dataset = SpeakerDataset('test', self.datarc['eval_batch_size'], **self.datarc)
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
self.objective = nn.CrossEntropyLoss()
self.logging = os.path.join(expdir, 'log.log')
self.best = defaultdict((lambda : 0))
def _get_train_dataloader(self, dataset):
return DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=1, shuffle=False, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)
'\n Datalaoder Specs:\n Each dataloader should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents1, your_other_contents2, ...]\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with dim()==1 and sample_rate==16000\n '
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, records, **kwargs):
'\n Args:\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n labels:\n the utterance-wise spekaer labels\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features = torch.stack([f.mean(dim=0) for f in features], dim=0)
labels = labels.to(features.device)
predicted = self.model(features)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
prefix = f'libri_speaker/{mode}-'
average = torch.FloatTensor(records['acc']).mean().item()
logger.add_scalar(f'{prefix}acc', average, global_step=global_step)
message = f'''{prefix}|step:{global_step}|acc:{average}
'''
save_ckpt = []
if (average > self.best[prefix]):
self.best[prefix] = average
message = f'best|{message}'
name = prefix.split('/')[(- 1)].split('-')[0]
save_ckpt.append(f'best-states-{name}.ckpt')
with open(self.logging, 'a') as f:
f.write(message)
print(message)
return save_ckpt
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
predicted = self.linear(features)
return predicted
|
class SpeechCommandsBaseDataset(Dataset):
'12-class Speech Commands base dataset.'
def __init__(self):
self.class2index = {CLASSES[i]: i for i in range(len(CLASSES))}
self.class_num = 12
self.data = []
def __getitem__(self, idx):
(class_name, audio_path) = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), EFFECTS)
wav = wav.squeeze(0).numpy()
fileid = '-'.join(Path(audio_path).parts[(- 2):])
return (wav, self.class2index[class_name], fileid)
def __len__(self):
return len(self.data)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
return zip(*samples)
|
class SpeechCommandsDataset(SpeechCommandsBaseDataset):
'Training and validation dataset.'
def __init__(self, data_list, **kwargs):
super().__init__()
data = [((class_name, audio_path) if (class_name in self.class2index.keys()) else ('_unknown_', audio_path)) for (class_name, audio_path) in data_list]
data += [('_silence_', audio_path) for audio_path in Path(kwargs['speech_commands_root'], '_background_noise_').glob('*.wav')]
class_counts = {class_name: 0 for class_name in CLASSES}
for (class_name, _) in data:
class_counts[class_name] += 1
sample_weights = [(len(data) / class_counts[class_name]) for (class_name, _) in data]
self.data = data
self.sample_weights = sample_weights
def __getitem__(self, idx):
(wav, label, stem) = super().__getitem__(idx)
if (label == self.class2index['_silence_']):
random_offset = randint(0, (len(wav) - 16000))
wav = wav[random_offset:(random_offset + 16000)]
return (wav, label, stem)
|
class SpeechCommandsTestingDataset(SpeechCommandsBaseDataset):
'Testing dataset.'
def __init__(self, **kwargs):
super().__init__()
self.data = [(class_dir.name, audio_path) for class_dir in Path(kwargs['speech_commands_test_root']).iterdir() if class_dir.is_dir() for audio_path in class_dir.glob('*.wav')]
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim: int, downstream_expert: dict, expdir: str, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
(train_list, valid_list) = split_dataset(self.datarc['speech_commands_root'])
self.train_dataset = SpeechCommandsDataset(train_list, **self.datarc)
self.dev_dataset = SpeechCommandsDataset(valid_list, **self.datarc)
self.test_dataset = SpeechCommandsTestingDataset(**self.datarc)
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(input_dim=self.modelrc['projector_dim'], output_dim=self.train_dataset.class_num, **model_conf)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.register_buffer('best_score', torch.zeros(1))
def _get_balanced_train_dataloader(self, dataset, drop_last=False):
sampler = WeightedRandomSampler(dataset.sample_weights, len(dataset.sample_weights))
if is_initialized():
sampler = DistributedSamplerWrapper(sampler)
return DataLoader(dataset, sampler=sampler, batch_size=self.datarc['batch_size'], drop_last=drop_last, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_balanced_dev_dataloader(self, dataset, drop_last=False):
return DataLoader(dataset, sampler=WeightedRandomSampler(dataset.sample_weights, len(dataset.sample_weights)), batch_size=self.datarc['batch_size'], drop_last=drop_last, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_dataloader(self, dataset):
return DataLoader(dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_dataloader(self, mode):
if (mode == 'train'):
return self._get_balanced_train_dataloader(self.train_dataset, drop_last=True)
elif (mode == 'dev'):
return self._get_balanced_dev_dataloader(self.dev_dataset, drop_last=False)
elif (mode == 'test'):
return self._get_dataloader(self.test_dataset)
else:
raise NotImplementedError
def forward(self, mode, features, labels, filenames, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['loss'].append(loss.item())
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['filename'] += filenames
records['predict'] += [CLASSES[idx] for idx in predicted_classid.cpu().tolist()]
records['truth'] += [CLASSES[idx] for idx in labels.cpu().tolist()]
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ['loss', 'acc']:
values = records[key]
average = (sum(values) / len(values))
logger.add_scalar(f'speech_commands/{mode}-{key}', average, global_step=global_step)
with open(Path(self.expdir, 'log.log'), 'a') as f:
if (key == 'acc'):
print(f'{mode} {key}: {average}')
f.write(f'''{mode} at step {global_step}: {average}
''')
if ((mode == 'dev') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
f.write(f'''New best on {mode} at step {global_step}: {average}
''')
save_names.append(f'{mode}-best.ckpt')
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
lines = [f'''{f} {i}
''' for (f, i) in zip(records['filename'], records['predict'])]
file.writelines(lines)
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
lines = [f'''{f} {i}
''' for (f, i) in zip(records['filename'], records['truth'])]
file.writelines(lines)
return save_names
|
def split_dataset(root_dir: Union[(str, Path)], max_uttr_per_class=((2 ** 27) - 1)) -> Tuple[(List[Tuple[(str, str)]], List[Tuple[(str, str)]])]:
'Split Speech Commands into 3 set.\n \n Args:\n root_dir: speech commands dataset root dir\n max_uttr_per_class: predefined value in the original paper\n \n Return:\n train_list: [(class_name, audio_path), ...]\n valid_list: as above\n '
(train_list, valid_list) = ([], [])
for entry in Path(root_dir).iterdir():
if ((not entry.is_dir()) or (entry.name == '_background_noise_')):
continue
for audio_path in entry.glob('*.wav'):
speaker_hashed = re.sub('_nohash_.*$', '', audio_path.name)
hashed_again = hashlib.sha1(speaker_hashed.encode('utf-8')).hexdigest()
percentage_hash = ((int(hashed_again, 16) % (max_uttr_per_class + 1)) * (100.0 / max_uttr_per_class))
if (percentage_hash < 10):
valid_list.append((entry.name, audio_path))
elif (percentage_hash < 20):
pass
else:
train_list.append((entry.name, audio_path))
return (train_list, valid_list)
|
class Model(nn.Module):
'\n Not used in SUPERB Benchmark\n '
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
hidden_dim = kwargs['hidden_dim']
self.connector = nn.Linear(input_dim, hidden_dim)
self.fc1 = nn.Linear(hidden_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_class_num)
def forward(self, features):
features = F.relu(self.connector(features))
features = self.fc1(features)
pooled = features.mean(dim=1)
predicted = self.fc2(pooled)
return predicted
|
class AdditionalDataset():
@classmethod
def from_tsv(cls, file, key, bpe_tokenizer=None, pre_tokenizer=None):
data = []
with open(file, 'r') as file:
reader = csv.DictReader(file, delimiter='\t', quotechar=None, doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE)
for line in reader:
data.append(line[key])
return cls(data, bpe_tokenizer, pre_tokenizer)
def __init__(self, data, dictionary, bpe_tokenizer=None, pre_tokenizer=None):
self.data = data
self.bpe_tokenizer = bpe_tokenizer
self.pre_tokenizer = pre_tokenizer
self.dictionary = dictionary
def _create_target(self, index):
tokenized = self._tokenize_text(self.data[index])
target = self.dictionary.encode_line(tokenized, add_if_not_exist=False, append_eos=True).long()
return target
def get_addtional_input(self, id_list):
target = [self._create_target(id) for id in id_list]
batched_target = fairseq.data.data_utils.collate_tokens(target, self.dictionary.pad(), self.dictionary.eos(), left_pad=False, move_eos_to_beginning=False)
target_lengths = torch.tensor([t.size(0) for t in target], dtype=torch.long)
prev_output_tokens = fairseq.data.data_utils.collate_tokens(target, self.dictionary.pad(), self.dictionary.eos(), left_pad=False, move_eos_to_beginning=True)
ntokens = sum((t.size(0) for t in target))
return {'target': batched_target, 'prev_output_tokens': prev_output_tokens, 'target_lengths': target_lengths, 'ntokens': ntokens}
def _tokenize_text(self, text):
if (self.pre_tokenizer is not None):
text = self.pre_tokenizer.encode(text)
if (self.bpe_tokenizer is not None):
text = self.bpe_tokenizer.encode(text)
return text
|
class S3prl_SpeechToTextTask(SpeechToTextTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load_dataset(self, split, max_feature_len=(- 1), epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = S3prl_SpeechToTextDatasetCreator.from_tsv(self.args.data, self.data_cfg, split, self.tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed, upstream_rate=self.upstream_rate, max_feature_len=max_feature_len)
def build_model(self, args, input_dim):
args.input_feat_per_channel = input_dim
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTask, self).build_model(args)
|
class S3prl_SpeechToTextDatasetCreator(SpeechToTextDatasetCreator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
KEY_SAMPLE_RATE = 'sr'
DEFAULT_SAMPLE_RATE = 16000
@classmethod
def _from_list(cls, split_name: str, is_train_split, samples: List[List[Dict]], data_cfg: S2TDataConfig, tgt_dict, pre_tokenizer, bpe_tokenizer, upstream_rate, max_feature_len) -> SpeechToTextDataset:
(audio_paths, n_frames, src_texts, tgt_texts, ids) = ([], [], [], [], [])
(speakers, src_langs, tgt_langs) = ([], [], [])
srs = []
for s in samples:
ids.extend([ss[cls.KEY_ID] for ss in s])
audio_paths.extend([op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s])
n_frames.extend([int(ss[cls.KEY_N_FRAMES]) for ss in s])
tgt_texts.extend([ss[cls.KEY_TGT_TEXT] for ss in s])
src_texts.extend([ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s])
speakers.extend([ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s])
src_langs.extend([ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s])
tgt_langs.extend([ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s])
srs.extend([int(ss.get(cls.KEY_SAMPLE_RATE, cls.DEFAULT_SAMPLE_RATE)) for ss in s])
return S3prl_SpeechToTextDataset(split_name, is_train_split, data_cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, srs=srs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, upstream_rate=upstream_rate, max_feature_len=max_feature_len)
@classmethod
def from_tsv(cls, root: str, data_cfg: S2TDataConfig, splits: str, tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split: bool, epoch: int, seed: int, upstream_rate: int, max_feature_len: int) -> SpeechToTextDataset:
_splits = splits.split(',')
assert (len(_splits) == 1), 'do not support multiple files training'
split = _splits[0]
tsv_path = op.join(root, f'{split}.tsv')
if (not op.isfile(tsv_path)):
raise FileNotFoundError(f'Dataset not found: {tsv_path}')
with open(tsv_path) as f:
reader = csv.DictReader(f, delimiter='\t', quotechar=None, doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE)
samples = [dict(e) for e in reader]
assert (len(samples) > 0)
return cls._from_list(split, is_train_split, [samples], data_cfg, tgt_dict, pre_tokenizer, bpe_tokenizer, upstream_rate, max_feature_len)
|
class S3prl_SpeechToTextDataset(SpeechToTextDataset):
TARGET_RATE = 16000
def __init__(self, *args, srs=Optional[List[int]], upstream_rate=160, max_feature_len=(- 1), **kwargs):
super().__init__(*args, **kwargs)
self.srs = srs
self.max_feature_len = max_feature_len
self.max_wav_len = (max_feature_len * upstream_rate)
self.resamplers = {}
for sr in set(srs):
self.resamplers[sr] = torchaudio.transforms.Resample(orig_freq=sr, new_freq=self.TARGET_RATE)
for i in range(len(self.n_frames)):
new_n_frames = (((self.n_frames[i] * self.TARGET_RATE) / self.srs[i]) / upstream_rate)
if ((self.max_feature_len > 0) and (new_n_frames > max_feature_len)):
new_n_frames = max_feature_len
self.n_frames[i] = int(new_n_frames)
def __getitem__(self, index: int) -> Tuple[(str, int, torch.Tensor, Optional[torch.Tensor])]:
(source, sr) = torchaudio.load(self.audio_paths[index])
assert (self.srs[index] == sr)
source = self.resamplers[sr](source)
source = torch.mean(source, dim=0)
source = source.view((- 1))
if (self.feature_transforms is not None):
assert (not self.data_cfg.use_audio_input)
source = self.feature_transforms(source)
source = source.float()
if (self.max_feature_len > 0):
if (source.size(0) > self.max_wav_len):
print(f'wav too long({source.size(0)}), truncate to {self.max_wav_len} (id={index})')
source = source[:self.max_wav_len]
target = None
if (self.tgt_texts is not None):
tokenized = self.tokenize_text(self.tgt_texts[index])
target = self.tgt_dict.encode_line(tokenized, add_if_not_exist=False, append_eos=True).long()
if self.data_cfg.prepend_tgt_lang_tag:
lang_tag = self.LANG_TAG_TEMPLATE.format(self.tgt_langs[index])
lang_tag_idx = self.tgt_dict.index(lang_tag)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
return (self.ids[index], index, source, target)
def collater(self, samples: List[Tuple[(str, int, torch.Tensor, torch.Tensor)]]):
ids = [sample[0] for sample in samples]
samples = [sample[1:] for sample in samples]
output_dict = super().collater(samples)
output_dict['utt_id'] = ids
wavs = []
for i in range(output_dict['nsentences']):
wav = output_dict['net_input']['src_tokens'][i]
length = output_dict['net_input']['src_lengths'][i].item()
wavs.append(wav[:length].numpy())
return (wavs, output_dict)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n upstream_rate: int\n 160: for upstream with 10 ms per frame\n 320: for upstream with 20 ms per frame\n \n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/example/config.yaml\n\n expdir: string\n The expdir from command-line argument, you should save all results into\n this directory, like some logging files.\n\n **kwargs: dict\n All the arguments specified by the argparser in run_downstream.py\n and all the other fields in config.yaml, in case you need it.\n \n Note1. Feel free to add new argument for __init__ as long as it is\n a command-line argument or a config field. You can check the constructor\n code in downstream/runner.py\n "
super(DownstreamExpert, self).__init__()
print(downstream_expert)
self.expdir = expdir
self.src_lang = downstream_expert['src_lang']
self.tgt_lang = downstream_expert['tgt_lang']
self.post_process = downstream_expert['post_process']
self.output_prefix = downstream_expert['output_prefix']
self.upstream_rate = upstream_rate
self.datarc = downstream_expert['datarc']
self.max_positions = downstream_expert['modelrc']['max_source_positions']
self.upstream_rate = downstream_expert.get('upstream_rate', upstream_rate)
if (self.upstream_rate < 0):
self.upstream_rate = upstream_rate
assert ((self.upstream_rate % upstream_rate) == 0)
self.downsample_ratio = int((self.upstream_rate / upstream_rate))
self.downsample_method = downstream_expert.get('downsample_method', 'drop')
if (self.downsample_method == 'concat'):
upstream_dim *= self.downsample_ratio
self.task = S3prl_SpeechToTextTask.setup_task(Namespace(**downstream_expert['taskrc']))
self.task.upstream_rate = self.upstream_rate
self.data_dir = downstream_expert['taskrc']['data']
self.criterion = self.task.build_criterion(Namespace(**downstream_expert['criterionrc']))
modelrc = Namespace(**downstream_expert['modelrc'])
assert (modelrc.arch in fairseq.models.ARCH_CONFIG_REGISTRY)
fairseq.models.ARCH_CONFIG_REGISTRY[modelrc.arch](modelrc)
self.model = self.task.build_model(modelrc, upstream_dim)
self.generator = self.task.build_generator([self.model], Namespace(**downstream_expert['generatorrc']))
self.batch_itr = {}
self.use_asr = downstream_expert['taskrc']['use_asr']
if self.use_asr:
rc = downstream_expert['asrrc']
self.asr_datarc = rc['datarc']
self.asr_weight = rc['weight']
self.asr_dict = Dictionary.load(f"{self.data_dir}/{rc['vocab_file']}")
asr_bperc = rc['bpe_tokenizer'].copy()
asr_bperc['sentencepiece_model'] = f"{self.data_dir}/{asr_bperc['sentencepiece_model']}"
self.asr_bpe = encoders.build_bpe(Namespace(**asr_bperc))
self.asr_task = S3prl_SpeechToTextTask.setup_task(Namespace(**downstream_expert['taskrc']))
self.asr_dict.add_symbol('<blank>')
self.asr_task.tgt_dict = self.asr_dict
self.asr_head = nn.Linear(modelrc.encoder_embed_dim, len(self.asr_dict))
self.additional_dataset = {}
self.register_buffer('best_score', torch.zeros(1))
def get_dataloader(self, split, epoch: int=0):
"\n Args:\n mode: string\n 'train', 'dev' or 'test'\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
data_split = self.datarc[split]
if (data_split not in self.batch_itr):
self.task.load_dataset(split=data_split, max_feature_len=self.max_positions)
self.batch_itr[data_split] = self.task.get_batch_iterator(self.task.dataset(data_split), max_tokens=self.datarc['max_tokens'], max_positions=self.max_positions, num_workers=self.datarc['num_workers'], ignore_invalid_inputs=False, epoch=(epoch + 1))
return self.batch_itr[data_split].next_epoch_itr()
def forward(self, mode, features, input_dict, records, **kwargs):
"\n Args:\n mode: string\n 'train', 'dev' or 'test' for this forward step\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n your_other_contents1, ... :\n in the order defined by your dataloader (dataset + collate_fn)\n these are all in cpu, and you can move them to the same device\n as features\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records (also customized by you)\n\n Note1. downstream/runner.py will call self.log_records\n 1. every `log_step` during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n "
device = features[0].device
features = self.downsample(features)
features_length = torch.LongTensor([len(feature) for feature in features])
features = pad_sequence(features, batch_first=True, padding_value=0.0)
input_dict['net_input']['src_tokens'] = features
input_dict['net_input']['src_lengths'] = features_length
input_dict = fairseq.utils.move_to_cuda(input_dict, device=device)
if self.use_asr:
asr_input_dict = self._create_asr_input_dict(input_dict, mode)
asr_input_dict = fairseq.utils.move_to_cuda(asr_input_dict, device=device)
loss = torch.FloatTensor(0)
if (mode in ['train', 'dev']):
encoder_out = self.model.encoder(src_tokens=input_dict['net_input']['src_tokens'], src_lengths=input_dict['net_input']['src_lengths'])
st_decoder_out = self.model.decoder(prev_output_tokens=input_dict['net_input']['prev_output_tokens'], encoder_out=encoder_out)
(st_loss, _) = self.criterion.compute_loss(self.model, st_decoder_out, input_dict)
loss = st_loss
if self.use_asr:
asr_loss = self.count_asr_loss(encoder_out, asr_input_dict)
loss = (((1 - self.asr_weight) * st_loss) + (self.asr_weight * asr_loss))
loss /= input_dict['nsentences']
records['loss'].append(loss.item())
if self.use_asr:
records['st_loss'].append(st_loss.item())
records['asr_loss'].append(asr_loss.item())
if (mode in ['dev', 'test']):
records['ids'] += input_dict['id'].cpu().tolist()
records['utt_ids'] += input_dict['utt_id']
(hyps, refs) = self._inference_step(input_dict)
records['hyps'] += hyps
records['refs'] += refs
if self.use_asr:
(asr_hyps, asr_refs) = self._inference_step_asr(asr_input_dict)
records['asr_hyps'] += asr_hyps
records['asr_refs'] += asr_refs
return loss
def downsample(self, features):
if (self.downsample_ratio == 1):
return features
new_features = []
for feature in features:
if (self.downsample_method == 'drop'):
feature = feature[::self.downsample_ratio]
elif (self.downsample_method == 'concat'):
N = (feature.size(0) % self.downsample_ratio)
if (N != 0):
feature = F.pad(feature, (0, 0, 0, (self.downsample_ratio - N)))
feature = feature.view((feature.size(0) // self.downsample_ratio), (feature.size(1) * self.downsample_ratio))
elif (self.downsample_method == 'average'):
N = (feature.size(0) % self.downsample_ratio)
if (N != 0):
feature = F.pad(feature, (0, 0, 0, (self.downsample_ratio - N)))
feature = feature.view((feature.size(0) // self.downsample_ratio), self.downsample_ratio, feature.size(1)).mean(dim=1)
else:
raise NotImplementedError
new_features.append(feature)
return new_features
def _create_asr_input_dict(self, input_dict, mode):
if (mode not in self.additional_dataset):
dataset = AdditionalDataset.from_tsv(f'{self.data_dir}/{self.datarc[mode]}.tsv', self.asr_datarc['key'], self.asr_dict, self.asr_bpe)
self.additional_dataset[mode] = dataset
additional_data = self.additional_dataset[mode].get_addtional_input(input_dict['id'])
asr_input_dict = input_dict.copy()
asr_input_dict['net_input'] = input_dict['net_input'].copy()
asr_input_dict['net_input']['prev_output_tokens'] = additional_data['prev_output_tokens']
asr_input_dict['target'] = additional_data['target']
asr_input_dict['target_lengths'] = additional_data['target_lengths']
asr_input_dict['ntokens'] = additional_data['ntokens']
return asr_input_dict
def count_asr_loss(self, encoder_out, input_dict):
hidden = encoder_out['encoder_out'][0]
log_prob = self.asr_head(hidden).log_softmax(2)
hidden_length = self.model.encoder.subsample.get_out_seq_lens_tensor(input_dict['net_input']['src_lengths'])
targets = input_dict['target']
target_lengths = input_dict['target_lengths']
loss = nn.functional.ctc_loss(log_prob, targets, hidden_length, target_lengths, blank=self.asr_dict.index('<blank>'), reduction='sum', zero_infinity=True)
return loss
def _decode(self, toks, dictionary):
toks = toks[(toks != dictionary.pad())]
s = dictionary.string(toks.int().cpu(), self.post_process)
return (s if s else '<unk>')
def _inference_step(self, input_dict):
output = self.generator.generate([self.model], input_dict)
hyps = []
refs = []
for i in range(len(output)):
hyps.append(self._decode(output[i][0]['tokens'], self.task.target_dictionary))
refs.append(self._decode(input_dict['target'][i], self.task.target_dictionary))
return (hyps, refs)
def _inference_step_asr(self, input_dict):
encoder_out = self.model.encoder(src_tokens=input_dict['net_input']['src_tokens'], src_lengths=input_dict['net_input']['src_lengths'])
hidden = encoder_out['encoder_out'][0]
logit = self.asr_head(hidden)
predict = logit.argmax(dim=(- 1)).transpose(0, 1)
hyps = []
refs = []
for i in range(len(predict)):
predict_ids = predict[i].unique_consecutive()
predict_ids = predict_ids[(predict_ids != self.asr_dict.index('<blank>'))]
hyps.append(self._decode(predict_ids, self.asr_dict))
refs.append(self._decode(input_dict['target'][i], self.asr_dict))
return (hyps, refs)
def _metric(self, hyps, refs):
tok = ('zh' if (self.tgt_lang == 'zh') else '13a')
bleu = sacrebleu.corpus_bleu(hyps, [refs], tokenize=tok)
return bleu
def _asr_metric(self, hyps, refs):
ce = 0
we = 0
c_total = 0
w_total = 0
for (hyp, ref) in zip(hyps, refs):
normalized_hyp = hyp.translate(str.maketrans('', '', ''.join(list((set(string.punctuation) - set("'-")))))).lower()
normalized_ref = ref.translate(str.maketrans('', '', ''.join(list((set(string.punctuation) - set("'-")))))).lower()
ce += editdistance.eval(normalized_hyp, normalized_ref)
c_total += len(normalized_ref)
hyp_w = normalized_hyp.split()
ref_w = normalized_ref.split()
we += editdistance.eval(hyp_w, ref_w)
w_total += len(ref_w)
cer = (ce / c_total)
wer = (we / w_total)
return (cer, wer)
def log_records(self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n mode: string\n 'train':\n records and batchids contain contents for `log_step` batches\n `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n 'dev' or 'test' :\n records and batchids contain contents for the entire evaluation dataset\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{your_task_name}/{mode}-{key}' as key name to log your contents,\n preventing conflict with the logging of other tasks\n\n global_step:\n The global_step when training, which is helpful for Tensorboard logging\n\n batch_ids:\n The batches contained in records when enumerating over the dataloader\n\n total_batch_num:\n The total amount of batches in the dataloader\n \n Return:\n a list of string\n Each string is a filename we wish to use to save the current model\n according to the evaluation result, like the best.ckpt on the dev set\n You can return nothing or an empty list when no need to save the checkpoint\n "
save_names = []
if (mode in ['train', 'dev']):
ave_loss = (sum(records['loss']) / len(records['loss']))
logger.add_scalar(f'st/{mode}-loss', ave_loss, global_step=global_step)
if self.use_asr:
ave_st_loss = (sum(records['st_loss']) / len(records['st_loss']))
logger.add_scalar(f'st/{mode}-st_loss', ave_st_loss, global_step=global_step)
ave_asr_loss = (sum(records['asr_loss']) / len(records['asr_loss']))
logger.add_scalar(f'st/{mode}-asr_loss', ave_asr_loss, global_step=global_step)
if (mode in ['dev', 'test']):
bleu = self._metric(records['hyps'], records['refs'])
logger.add_scalar(f'st/{mode}-bleu', bleu.score, global_step=global_step)
for i in range(4):
logger.add_scalar(f'st/{mode}-bleu{(i + 1)}', bleu.precisions[i], global_step=global_step)
if ((bleu.score > self.best_score) and (mode == 'dev')):
self.best_score = (torch.ones(1) * bleu.score)
save_names.append(f'{mode}-best.ckpt')
with open(f'{self.expdir}/{self.output_prefix}-st-{mode}.tsv', 'w') as f:
print('utt_id', 'hyp', 'ref', sep='\t', file=f)
results = list(zip(records['ids'], records['hyps'], records['refs'], records['utt_ids']))
results.sort(key=(lambda x: x[0]))
for (idx, hyp, ref, utt_id) in results:
print(utt_id, hyp, ref, sep='\t', file=f)
print(bleu)
if self.use_asr:
(cer, wer) = self._asr_metric(records['asr_hyps'], records['asr_refs'])
logger.add_scalar(f'st/{mode}-asr-cer', cer, global_step=global_step)
logger.add_scalar(f'st/{mode}-asr-wer', wer, global_step=global_step)
with open(f'{self.expdir}/{self.output_prefix}-asr-{mode}.tsv', 'w') as f:
print('utt_id', 'hyp', 'ref', sep='\t', file=f)
results = list(zip(records['ids'], records['asr_hyps'], records['asr_refs'], records['utt_ids']))
results.sort(key=(lambda x: x[0]))
for (idx, hyp, ref, utt_id) in results:
print(utt_id, hyp, ref, sep='\t', file=f)
tqdm.write(f'[cer]:{cer}, [wer]:{wer}')
return save_names
|
def verbose(args, text):
if args.verbose:
print(text)
|
def length(s):
return len(s.split())
|
def verbose(args, text):
if args.verbose:
print(text)
|
def create_sentencepiece(filenames, model_type, vocab_size, output_prefix):
sp.SentencePieceTrainer.train(input=','.join(filenames), model_prefix=output_prefix, vocab_size=vocab_size, model_type=model_type, character_coverage=1.0, unk_id=UNK_TOKEN_ID, bos_id=BOS_TOKEN_ID, eos_id=EOS_TOKEN_ID, pad_id=PAD_TOKEN_ID)
spm = sp.SentencePieceProcessor(model_file=f'{output_prefix}.model')
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (vocab.get(UNK_TOKEN_ID) == UNK_TOKEN)
assert (vocab.get(BOS_TOKEN_ID) == BOS_TOKEN)
assert (vocab.get(EOS_TOKEN_ID) == EOS_TOKEN)
assert (vocab.get(PAD_TOKEN_ID) == PAD_TOKEN)
vocab = {i: s for (i, s) in vocab.items() if (s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN})}
with open(f'{output_prefix}.txt', 'w') as f:
for (_, s) in sorted(vocab.items(), key=(lambda x: x[0])):
print(f'{s} 1', file=f)
|
def verbose(args, text):
if args.verbose:
print(text)
|
class SpeakerVerifi_train(Dataset):
def __init__(self, vad_config, key_list, file_path, meta_data, max_timestep=None, n_jobs=12):
self.roots = file_path
self.root_key = key_list
self.max_timestep = max_timestep
self.vad_c = vad_config
self.dataset = []
self.all_speakers = []
for index in range(len(self.root_key)):
cache_path = ((Path(os.path.dirname(__file__)) / '.wav_lengths') / f'{self.root_key[index]}_length.pt')
cache_path.parent.mkdir(exist_ok=True)
root = Path(self.roots[index])
if (not cache_path.is_file()):
def trimmed_length(path):
(wav_sample, _) = apply_effects_file(path, EFFECTS)
wav_sample = wav_sample.squeeze(0)
length = wav_sample.shape[0]
return length
wav_paths = find_files(root)
wav_lengths = Parallel(n_jobs=n_jobs)((delayed(trimmed_length)(path) for path in tqdm.tqdm(wav_paths, desc='Preprocessing')))
wav_tags = [Path(path).parts[(- 3):] for path in wav_paths]
torch.save([wav_tags, wav_lengths], str(cache_path))
else:
(wav_tags, wav_lengths) = torch.load(str(cache_path))
wav_paths = [root.joinpath(*tag) for tag in wav_tags]
speaker_dirs = [f.stem for f in root.iterdir() if f.is_dir()]
self.all_speakers.extend(speaker_dirs)
for (path, length) in zip(wav_paths, wav_lengths):
if (length > self.vad_c['min_sec']):
self.dataset.append(path)
self.all_speakers.sort()
self.speaker_num = len(self.all_speakers)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
path = self.dataset[idx]
(wav, _) = apply_effects_file(str(path), EFFECTS)
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
tags = Path(path).parts[(- 3):]
utterance_id = '-'.join(tags).replace('.wav', '')
label = self.all_speakers.index(tags[0])
return (wav.numpy(), utterance_id, label)
def collate_fn(self, samples):
return zip(*samples)
|
class SpeakerVerifi_test(Dataset):
def __init__(self, vad_config, file_path, meta_data):
self.root = file_path
self.meta_data = meta_data
self.necessary_dict = self.processing()
self.vad_c = vad_config
self.dataset = self.necessary_dict['spk_paths']
self.pair_table = self.necessary_dict['pair_table']
def processing(self):
pair_table = []
spk_paths = set()
with open(self.meta_data, 'r') as f:
usage_list = f.readlines()
for pair in usage_list:
list_pair = pair.split()
pair_1 = os.path.join(self.root, list_pair[1])
pair_2 = os.path.join(self.root, list_pair[2])
spk_paths.add(pair_1)
spk_paths.add(pair_2)
one_pair = [list_pair[0], pair_1, pair_2]
pair_table.append(one_pair)
return {'spk_paths': list(spk_paths), 'total_spk_num': None, 'pair_table': pair_table}
def __len__(self):
return len(self.necessary_dict['spk_paths'])
def __getitem__(self, idx):
x_path = self.dataset[idx]
x_name = x_path
(wav, _) = apply_effects_file(x_path, EFFECTS)
wav = wav.squeeze(0)
return (wav.numpy(), x_name)
def collate_fn(self, data_sample):
(wavs, x_names) = zip(*data_sample)
return (wavs, x_names)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n\n Note 1.\n dataloaders should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents, ...]\n\n where wav1, wav2 ... are in variable length\n and wav1 is in torch.FloatTensor\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = expdir
train_file_path = ((Path(self.datarc['file_path']) / 'dev') / 'wav')
test_file_path = ((Path(self.datarc['file_path']) / 'test') / 'wav')
train_config = {'vad_config': self.datarc['vad_config'], 'file_path': [train_file_path], 'key_list': ['Voxceleb1'], 'meta_data': self.datarc['train_meta_data'], 'max_timestep': self.datarc['max_timestep']}
self.train_dataset = SpeakerVerifi_train(**train_config)
dev_config = {'vad_config': self.datarc['vad_config'], 'file_path': train_file_path, 'meta_data': self.datarc['dev_meta_data']}
self.dev_dataset = SpeakerVerifi_test(**dev_config)
test_config = {'vad_config': self.datarc['vad_config'], 'file_path': test_file_path, 'meta_data': self.datarc['test_meta_data']}
self.test_dataset = SpeakerVerifi_test(**test_config)
self.connector = nn.Linear(self.upstream_dim, self.modelrc['input_dim'])
agg_dim = self.modelrc['module_config'][self.modelrc['module']].get('agg_dim', self.modelrc['input_dim'])
ModelConfig = {'input_dim': self.modelrc['input_dim'], 'agg_dim': agg_dim, 'agg_module_name': self.modelrc['agg_module'], 'module_name': self.modelrc['module'], 'hparams': self.modelrc['module_config'][self.modelrc['module']], 'utterance_module_name': self.modelrc['utter_module']}
self.model = Model(**ModelConfig)
objective_config = {'speaker_num': self.train_dataset.speaker_num, 'hidden_dim': self.modelrc['input_dim'], **self.modelrc['LossConfig'][self.modelrc['ObjectiveLoss']]}
self.objective = eval(self.modelrc['ObjectiveLoss'])(**objective_config)
self.score_fn = nn.CosineSimilarity(dim=(- 1))
self.eval_metric = EER
self.register_buffer('best_score', (torch.ones(1) * 100))
def get_dataloader(self, mode):
"\n Args:\n mode: string\n 'train', 'dev' or 'test'\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
if (mode == 'train'):
return self._get_train_dataloader(self.train_dataset)
elif (mode == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (mode == 'test'):
return self._get_eval_dataloader(self.test_dataset)
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def forward(self, mode, features, utter_idx, labels=None, records=None, **kwargs):
'\n Args:\n features:\n the features extracted by upstream\n put in the device assigned by command-line args\n\n labels:\n the speaker labels\n\n records:\n defaultdict(list), by appending scalars into records,\n these scalars will be averaged and logged on Tensorboard\n\n logger:\n Tensorboard SummaryWriter, given here for logging/debugging\n convenience, please use "self.downstream/your_content_name" as key\n name to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features_pad = pad_sequence(features, batch_first=True)
if (self.modelrc['module'] == 'XVector'):
attention_mask = [torch.ones((feature.shape[0] - 14)) for feature in features]
else:
attention_mask = [torch.ones(feature.shape[0]) for feature in features]
attention_mask_pad = pad_sequence(attention_mask, batch_first=True)
attention_mask_pad = ((1.0 - attention_mask_pad) * (- 100000.0))
features_pad = self.connector(features_pad)
if (mode == 'train'):
agg_vec = self.model(features_pad, attention_mask_pad.cuda())
labels = torch.LongTensor(labels).to(features_pad.device)
loss = self.objective(agg_vec, labels)
records['loss'].append(loss.item())
return loss
elif (mode in ['dev', 'test']):
agg_vec = self.model.inference(features_pad, attention_mask_pad.cuda())
agg_vec = torch.nn.functional.normalize(agg_vec, dim=(- 1))
utt_name = utter_idx
for idx in range(len(agg_vec)):
records[utt_name[idx]] = agg_vec[idx].cpu().detach()
return torch.tensor(0)
def log_records(self, mode, records, logger, global_step, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
save_names = []
if (mode == 'train'):
loss = torch.FloatTensor(records['loss']).mean().item()
logger.add_scalar(f'sv-voxceleb1/{mode}-loss', loss, global_step=global_step)
print(f'sv-voxceleb1/{mode}-loss: {loss}')
elif (mode in ['dev', 'test']):
trials = self.test_dataset.pair_table
labels = []
scores = []
def names(name):
return '-'.join(name.split('/')[(- 3):]).split('.')[0]
for (label, name1, name2) in trials:
labels.append(label)
score = self.score_fn(records[name1], records[name2]).numpy()
records['scores'].extend([score])
records['pair_names'].extend([f'{names(name1)}_{names(name2)}'])
scores.append(score)
(eer, *others) = self.eval_metric(np.array(labels, dtype=int), np.array(scores))
logger.add_scalar(f'sv-voxceleb1/{mode}-EER', eer, global_step=global_step)
print(f'sv-voxceleb1/{mode}-EER: {eer}')
if ((eer < self.best_score) and (mode == 'dev')):
self.best_score = (torch.ones(1) * eer)
save_names.append(f'{mode}-best.ckpt')
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
line = [f'''{name} {score}
''' for (name, score) in zip(records['pair_names'], records['scores'])]
file.writelines(line)
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
line = [f'''{name} {score}
''' for (name, score) in zip(records['pair_names'], records['labels'])]
file.writelines(line)
return save_names
def separate_data(self, agg_vec):
assert ((len(agg_vec) % 2) == 0)
total_num = (len(agg_vec) // 2)
feature1 = agg_vec[:total_num]
feature2 = agg_vec[total_num:]
return (feature1, feature2)
|
def collect_speaker_ids(roots, speaker_num):
all_speaker = []
all_speaker.extend([f.path for f in os.scandir(roots) if f.is_dir()])
ids = [[speaker.split('/')[(- 3)], speaker.split('/')[(- 1)]] for speaker in all_speaker]
vox1 = []
for id in ids:
if (id[0] == roots.split('/')[(- 2)]):
vox1.append(id[1])
dev_speaker = random.sample(vox1, k=speaker_num)
vox1_train = [ids for ids in vox1 if (ids not in dev_speaker)]
train_speaker = []
train_speaker.extend(vox1_train)
return (train_speaker, dev_speaker)
|
def construct_dev_speaker_id_txt(dev_speakers, dev_txt_name):
f = open(dev_txt_name, 'w')
for dev in dev_speakers:
f.write(dev)
f.write('\n')
f.close()
return
|
def sample_wavs_and_dump_txt(root, dev_ids, numbers, meta_data_name):
wav_list = []
count_positive = 0
print(f'generate {numbers} sample pairs')
for _ in trange(numbers):
prob = random.random()
if (prob > 0.5):
dev_id_pair = random.sample(dev_ids, 2)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[1]))).split('/')[(- 3):])
label = '0'
wav_list.append(' '.join([label, sample1, sample2]))
else:
dev_id_pair = random.sample(dev_ids, 1)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
label = '1'
count_positive += 1
wav_list.append(' '.join([label, sample1, sample2]))
print('finish, then dump file ..')
f = open(meta_data_name, 'w')
for data in wav_list:
f.write((data + '\n'))
f.close()
return wav_list
|
def EER(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
s = interp1d(fpr, tpr)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
eer = brentq(a, 0.0, 1.0)
thresh = interp1d(fpr, thresholds)(eer)
return (eer, thresh)
|
def eer_yist_f(labels, scores):
'\n Args:\n labels: (N,1) with value being 0 or 1\n scores: (N,1) within [-1, 1]\n\n Returns:\n equal_error_rates\n threshold\n '
joints = sorted(zip(scores, labels), key=(lambda x: x[0]))
(sorted_scores, sorted_labels) = zip(*joints)
total_ones = sum(sorted_labels)
total_zeros = (len(sorted_labels) - total_ones)
prefsum_ones = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=1), initial=0))
prefsum_zeros = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=0), initial=0))
ext_scores = [(- 1.0), *sorted_scores, 1.0]
(thresh_left, thresh_right) = (0, len(ext_scores))
while True:
if (thresh_left == thresh_right):
break
thresh_idx = ((thresh_left + thresh_right) // 2)
nb_false_positives = (total_zeros - prefsum_zeros[thresh_idx])
nb_false_negatives = prefsum_ones[thresh_idx]
if (nb_false_positives > nb_false_negatives):
thresh_left = thresh_idx
elif (nb_false_positives < nb_false_negatives):
thresh_right = thresh_idx
else:
break
thresh = ((ext_scores[thresh_idx] + ext_scores[(thresh_idx + 1)]) / 2)
false_negative_ratio = (nb_false_negatives / len(labels))
false_positive_ratio = (nb_false_positives / len(labels))
equal_error_rate = ((false_positive_ratio + false_negative_ratio) / 2)
return (equal_error_rate, thresh)
|
def _count_labels(counted_so_far, label, label_to_count=0):
return ((counted_so_far + 1) if (label == label_to_count) else counted_so_far)
|
def compute_metrics(input_x_speaker, ylabel):
wav1 = []
wav2 = []
for i in range(len(ylabel)):
wav1.append(input_x_speaker[i].unsqueeze(0))
wav2.append(input_x_speaker[(len(ylabel) + i)].unsqueeze(0))
wav1 = torch.stack(wav1)
wav2 = torch.stack(wav2)
ylabel = torch.stack(ylabel).cpu().detach().long().tolist()
scores = self.score_fn(wav1, wav2).squeeze().cpu().detach().tolist()
return (scores, ylabel)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim: int, downstream_expert: dict, expdir: str, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.lossrc = downstream_expert['lossrc']
self.expdir = Path(expdir)
self.train_dataset = None
self.valid_dataset = None
self.test_dataset = None
self.model = Model(input_dim=upstream_dim, **self.modelrc)
self.objective = nn.CosineEmbeddingLoss(**self.lossrc)
def get_dataloader(self, mode):
if (mode == 'train'):
self.train_dataset = SWS2013Dataset('dev', **self.datarc)
self.valid_dataset = SWS2013Dataset('eval', **self.datarc)
return DataLoader(self.train_dataset, sampler=WeightedRandomSampler(weights=self.train_dataset.sample_weights, num_samples=len(self.train_dataset.sample_weights), replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.train_dataset.collate_fn)
if (mode == 'valid'):
return DataLoader(self.valid_dataset, sampler=WeightedRandomSampler(weights=self.valid_dataset.sample_weights, num_samples=self.datarc['valid_size'], replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.valid_dataset.collate_fn)
if (mode in ['dev', 'eval']):
self.test_dataset = QUESST14Dataset(mode, **self.datarc)
return DataLoader(self.test_dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=self.test_dataset.collate_fn)
if (mode == 'sws2013_eval'):
self.test_dataset = SWS2013Testset('eval', **self.datarc)
return DataLoader(self.test_dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=self.test_dataset.collate_fn)
raise NotImplementedError
def forward(self, mode, features, labels, records, **kwargs):
if (mode in ['train', 'valid']):
audio_tensors = torch.stack(features[:(len(features) // 2)])
query_tensors = torch.stack(features[(len(features) // 2):])
labels = torch.cat(labels).to(audio_tensors.device)
audio_embs = self.model(audio_tensors)
query_embs = self.model(query_tensors)
loss = self.objective(audio_embs, query_embs, labels)
records['loss'].append(loss.item())
with torch.no_grad():
similarities = F.cosine_similarity(audio_embs, query_embs)
records['similarity-positive'] += similarities[(labels > 0)].tolist()
records['similarity-negative'] += similarities[(labels < 0)].tolist()
return loss
elif (mode in ['dev', 'eval', 'sws2013_eval']):
audio_tensors = torch.stack(features)
(lengths, audio_names) = labels
embs = self.model(audio_tensors)
embs = embs.detach().cpu()
offset = 0
for (length, audio_name) in zip(lengths, audio_names):
records['embs'].append(embs[offset:(offset + length)])
records['audio_names'].append(audio_name)
offset += length
else:
raise NotImplementedError
def log_records(self, mode, records, logger, global_step, **kwargs):
'Log training, validation information or test on a dataset.'
if (mode in ['train', 'valid']):
prefix = f'sws2013/{mode}'
for (key, val) in records.items():
average = (sum(val) / len(val))
logger.add_scalar(f'{prefix}-{key}', average, global_step=global_step)
elif (mode in ['dev', 'eval', 'sws2013_eval']):
query_embs = records['embs'][:self.test_dataset.n_queries]
doc_embs = records['embs'][self.test_dataset.n_queries:]
query_names = records['audio_names'][:self.test_dataset.n_queries]
doc_names = records['audio_names'][self.test_dataset.n_queries:]
results = {}
for (query_emb, query_name) in zip(tqdm(query_embs, desc='Query', ncols=0), query_names):
query_emb = query_emb[0:1].cuda()
scores = []
for (doc_emb, doc_name) in zip(tqdm(doc_embs, desc='Doc', ncols=0, leave=False), doc_names):
with torch.no_grad():
doc_emb = doc_emb.cuda()
similarities = F.cosine_similarity(query_emb, doc_emb)
score = similarities.max().detach().cpu()
scores.append(score)
scores = torch.stack(scores)
scores = ((scores - scores.mean()) / (scores.std() + 1e-06))
results[query_name] = list(zip(doc_names, scores.tolist()))
score_thresh = 0
root = etree.Element('stdlist', termlist_filename='benchmark.stdlist.xml', indexing_time='1.00', language='english', index_size='1', system_id='benchmark')
for (query_name, doc_scores) in results.items():
term_list = etree.SubElement(root, 'detected_termlist', termid=query_name, term_search_time='1.0', oov_term_count='1')
for (doc_name, score) in doc_scores:
etree.SubElement(term_list, 'term', file=doc_name, channel='1', tbeg='0.000', dur='0.00', score=f'{score:.4f}', decision=('YES' if (score > score_thresh) else 'NO'))
tree = etree.ElementTree(root)
tree.write(str((self.expdir / 'benchmark.stdlist.xml')), encoding='UTF-8', pretty_print=True)
else:
raise NotImplementedError
|
class Model(nn.Module):
def __init__(self, input_dim, bottleneck_dim, hidden_dim, **kwargs):
super(Model, self).__init__()
self.connector = nn.Linear(input_dim, bottleneck_dim)
self.fc1 = nn.Linear(bottleneck_dim, hidden_dim)
self.attention_linear = nn.Linear(hidden_dim, 1)
def forward(self, features):
hiddens = F.relu(self.connector(features))
hiddens = torch.tanh(self.fc1(hiddens))
attention_weights = F.softmax(self.attention_linear(hiddens), dim=1)
embeds = torch.sum((hiddens * attention_weights), dim=1)
return embeds
|
class QUESST14Dataset(Dataset):
'QUESST 2014 dataset (English-only).'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
dataset_root = Path(kwargs['quesst2014_root'])
doc_paths = get_audio_paths(dataset_root, 'language_key_utterances.lst')
query_paths = get_audio_paths(dataset_root, f'language_key_{split}.lst')
self.dataset_root = dataset_root
self.n_queries = len(query_paths)
self.n_docs = len(doc_paths)
self.data = (query_paths + doc_paths)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_path = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['norm'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['pad', '0', '3']])
segments = wav.squeeze(0).unfold(0, 48000, 12000).unbind(0)
return (segments, len(segments), audio_path.with_suffix('').name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(segments, lengths, audio_names) = zip(*samples)
segments = [seg for segs in segments for seg in segs]
return (segments, (lengths, audio_names))
|
def get_audio_paths(dataset_root_path, lst_name):
'Extract audio paths.'
audio_paths = []
with open(((dataset_root_path / 'scoring') / lst_name)) as f:
for line in f:
(audio_path, lang) = tuple(line.strip().split())
audio_path = re.sub('^.*?\\/', '', audio_path)
audio_paths.append((dataset_root_path / audio_path))
return audio_paths
|
class SWS2013Dataset(Dataset):
'SWS 2013 dataset.'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
dataset_root = Path(kwargs['sws2013_root'])
split_root = (Path(kwargs['sws2013_scoring_root']) / f'sws2013_{split}')
audio2dur = parse_ecf((split_root / 'sws2013.ecf.xml'))
query2audios = parse_rttm((split_root / f'sws2013_{split}.rttm'))
query2tensors = find_queries((dataset_root / f'{split}_queries'))
print(f'[SWS2013] # of audios: {len(audio2dur)}')
print(f'[SWS2013] # of queries: {len(query2tensors)}')
all_audio_set = set(audio2dur.keys())
query2audio_set = {query: set((audio_info['audio'] for audio_info in audio_infos)) for (query, audio_infos) in query2audios.items()}
query2audio_compl_set = {query: (all_audio_set - audio_set) for (query, audio_set) in query2audio_set.items()}
(positive_pairs, negative_pairs) = ([], [])
for (query, tensors) in query2tensors.items():
for query_tensor in tensors:
negative_pairs.append({'query_tensor': query_tensor, 'audio_set': (query2audio_compl_set[query] if (query in query2audio_compl_set) else all_audio_set)})
if (query not in query2audios):
continue
for audio_info in query2audios[query]:
positive_pairs.append({'query_tensor': query_tensor, 'audio': audio_info['audio'], 'offset': audio_info['offset'], 'duration': audio_info['duration']})
print(f'[SWS2013] # of positive pairs: {len(positive_pairs)}')
self.audio_dir = (dataset_root / 'Audio')
self.audio2dur = audio2dur
self.max_dur = 3.0
self.positive_pairs = positive_pairs
self.negative_pairs = negative_pairs
def __len__(self):
return (len(self.positive_pairs) + len(self.negative_pairs))
def __getitem__(self, idx):
if (idx < len(self.positive_pairs)):
pair = self.positive_pairs[idx]
audio_path = (self.audio_dir / pair['audio']).with_suffix('.wav')
audio_tensor = path2segment(audio_path, pair['duration'], self.max_dur, pair['offset'])
else:
pair = self.negative_pairs[(idx - len(self.positive_pairs))]
sample_audio = random.sample(pair['audio_set'], 1)[0]
audio_dur = self.audio2dur[sample_audio]
audio_path = (self.audio_dir / sample_audio).with_suffix('.wav')
audio_tensor = path2segment(audio_path, audio_dur, self.max_dur, 0.0)
audio_tensor = audio_tensor.squeeze(0)
query_tensor = tensor2segment(pair['query_tensor'], self.max_dur)
label = torch.LongTensor([(1 if (idx < len(self.positive_pairs)) else (- 1))])
return (audio_tensor, query_tensor, label)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(audio_tensors, query_tensors, labels) = zip(*samples)
return ((audio_tensors + query_tensors), labels)
@property
def sample_weights(self):
'Sample weights to balance positive and negative data.'
n_pos = len(self.positive_pairs)
n_neg = len(self.negative_pairs)
return (([(1 / n_pos)] * n_pos) + ([(1 / n_neg)] * n_neg))
|
def parse_rttm(rttm_path):
'Parse audio and query pairs from *.rttm.'
pattern = re.compile('LEXEME\\s+(sws2013_[0-9]+).*?([0-9]\\.[0-9]+)\\s+([0-9]\\.[0-9]+)\\s+(sws2013_(dev|eval)_[0-9]+)')
query2audios = defaultdict(list)
with open(rttm_path) as fd:
for line in fd:
match = pattern.match(line)
if (match is None):
continue
query2audios[match.group(4)].append({'audio': match.group(1), 'offset': float(match.group(2)), 'duration': float(match.group(3))})
return query2audios
|
def parse_ecf(ecf_path):
'Find audios from sws2013.ecf.xml.'
root = ET.parse(str(ecf_path)).getroot()
audio2dur = {}
for excerpt in root.findall('excerpt'):
audio_name = excerpt.attrib['audio_filename'].replace('Audio/', '').replace('.wav', '')
duration = float(excerpt.attrib['dur'])
audio2dur[audio_name] = duration
return audio2dur
|
def find_queries(query_dir_path):
'Find all queries under sws2013_dev & sws2013_eval.'
pattern = re.compile('(_[0-9]{2})?\\.wav')
query2tensors = defaultdict(list)
for query_path in tqdm(list(query_dir_path.glob('*.wav')), ncols=0, desc='Load queries'):
query_name = pattern.sub('', query_path.name)
(wav_tensor, sample_rate) = apply_effects_file(str(query_path), [['channels', '1'], ['rate', '16000'], ['norm']])
(trimmed, _) = apply_effects_tensor(wav_tensor, sample_rate, [['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse']])
wav_tensor = (trimmed if (trimmed.size(1) >= (sample_rate * 0.5)) else wav_tensor)
wav_tensor = wav_tensor.squeeze(0)
query2tensors[query_name].append(wav_tensor)
return query2tensors
|
def path2segment(filepath, src_dur, tgt_dur, offset):
random_shift = random.uniform(0, (src_dur - tgt_dur))
(audio_tensor, _) = apply_effects_file(str(filepath), [['channels', '1'], ['rate', '16000'], ['norm'], ['pad', f'{tgt_dur}', f'{tgt_dur}'], ['trim', f'{((tgt_dur + offset) + random_shift)}', f'{tgt_dur}']])
return audio_tensor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.