code stringlengths 17 6.64M |
|---|
def tensor2segment(tensor, tgt_dur, sample_rate=16000):
src_dur = (len(tensor) / sample_rate)
random_shift = random.uniform(0, (src_dur - tgt_dur))
(audio_tensor, _) = apply_effects_tensor(tensor.unsqueeze(0), sample_rate, [['pad', f'{tgt_dur}', f'{tgt_dur}'], ['trim', f'{(tgt_dur + random_shift)}', f'{tgt_dur}']])
return audio_tensor.squeeze(0)
|
class SWS2013Testset(Dataset):
'SWS 2013 testset.'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
scoring_root = Path(kwargs['sws2013_scoring_root'])
audio_names = parse_ecf(((scoring_root / f'sws2013_{split}') / 'sws2013.ecf.xml'))
query_names = parse_tlist(((scoring_root / f'sws2013_{split}') / f'sws2013_{split}.tlist.xml'))
self.dataset_root = Path(kwargs['sws2013_root'])
self.split = split
self.n_queries = len(query_names)
self.n_docs = len(audio_names)
self.data = (query_names + audio_names)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_name = self.data[idx]
audio_path = (((self.dataset_root / f'{self.split}_queries') / audio_name) if (idx < self.n_queries) else ((self.dataset_root / 'Audio') / audio_name))
audio_path = audio_path.with_suffix('.wav')
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['norm'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['pad', '0', '3']])
segments = wav.squeeze(0).unfold(0, 48000, 12000).unbind(0)
return (segments, len(segments), audio_name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(segments, lengths, audio_names) = zip(*samples)
segments = [seg for segs in segments for seg in segs]
return (segments, (lengths, audio_names))
|
def parse_ecf(ecf_path):
'Find audio paths from sws2013.ecf.xml.'
root = ET.parse(str(ecf_path)).getroot()
audio_names = []
for excerpt in root.findall('excerpt'):
audio_name = excerpt.attrib['audio_filename'].replace('Audio/', '').replace('.wav', '')
audio_names.append(audio_name)
return audio_names
|
def parse_tlist(tlist_path):
'Find audio paths from sws2013_eval.tlist.xml.'
root = ET.parse(str(tlist_path)).getroot()
audio_names = []
for term in root.findall('term'):
audio_names.append(term.attrib['termid'])
return audio_names
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc[self.modelrc['select']]
self.model = model_cls(self.upstream_dim, output_class_num=self.train_dataset.class_num, **model_conf)
|
def timit_posteriorgram_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def timit_posteriorgram_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return timit_posteriorgram_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def timit_posteriorgram(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/fb2hkvetp26wges/convbank.ckpt?dl=1'
return timit_posteriorgram_url(*args, refresh=refresh, **kwargs)
|
class ConvBank(nn.Module):
def __init__(self, input_dim, output_class_num, kernels, cnn_size, hidden_size, dropout, **kwargs):
super(ConvBank, self).__init__()
self.drop_p = dropout
self.in_linear = nn.Linear(input_dim, hidden_size)
latest_size = hidden_size
self.cnns = nn.ModuleList()
assert (len(kernels) > 0)
for kernel in kernels:
self.cnns.append(nn.Conv1d(latest_size, cnn_size, kernel, padding=(kernel // 2)))
latest_size = (cnn_size * len(kernels))
self.out_linear = nn.Linear(latest_size, output_class_num)
def forward(self, features):
hidden = F.dropout(F.relu(self.in_linear(features)), p=self.drop_p)
conv_feats = []
hidden = hidden.transpose(1, 2).contiguous()
for cnn in self.cnns:
conv_feats.append(cnn(hidden))
hidden = torch.cat(conv_feats, dim=1).transpose(1, 2).contiguous()
hidden = F.dropout(F.relu(hidden), p=self.drop_p)
predicted = self.out_linear(hidden)
return predicted
|
class UpstreamExpert(nn.Module):
def __init__(self, ckpt, **kwargs):
super(UpstreamExpert, self).__init__()
ckpt = torch.load(ckpt, map_location='cpu')
args = ckpt['Args']
self.upstream = getattr(s3prl.hub, args.upstream)()
self.featurizer = Featurizer(self.upstream, 'last_hidden_state', 'cpu')
config = ckpt['Config']
modelrc = config['downstream_expert']['modelrc']
model_cls = eval(modelrc['select'])
model_conf = modelrc[modelrc['select']]
self.model = model_cls(self.featurizer.output_dim, output_class_num=TIMIT_PHONE_CLASSES, **model_conf)
self.model.load_state_dict(UpstreamExpert._fix_state_key(ckpt['Downstream']))
@staticmethod
def _fix_state_key(states):
keys = list(states.keys())
for key in keys:
new_key = '.'.join(key.split('.')[1:])
states[new_key] = states[key]
states.pop(key)
return states
def get_downsample_rates(self, key: str) -> int:
return self.upstream.get_downsample_rates(key)
def forward(self, wavs):
'\n Args:\n wavs:\n list of unpadded wavs [wav1, wav2, ...]\n each wav is in torch.FloatTensor with sample rate 16000\n and already put in the device assigned by command-line args\n\n Return:\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n '
feats = self.upstream(wavs)
feats = self.featurizer(wavs, feats)
feats_length = [len(f) for f in feats]
feats = pad_sequence(feats, batch_first=True)
posteriors = self.model(feats)
posteriors = [F.softmax(p[:l], dim=(- 1)) for (p, l) in zip(posteriors, feats_length)]
posteriors = pad_sequence(posteriors, batch_first=True)
return {'last_hidden_state': posteriors, 'hidden_states': [posteriors]}
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, hidden_size, dropout, **kwargs):
super(Model, self).__init__()
self.in_linear = nn.Linear(input_dim, hidden_size)
self.out_linear = nn.Linear(hidden_size, output_class_num)
self.drop = nn.Dropout(dropout)
self.act_fn = nn.functional.relu
def forward(self, features):
hidden = self.in_linear(features)
hidden = self.drop(hidden)
hidden = self.act_fn(hidden)
predicted = self.out_linear(hidden)
return predicted
|
class PhoneDataset(Dataset):
def __init__(self, split, bucket_size, data_root, phone_path, bucket_file, sample_rate=16000, train_dev_seed=1337, **kwargs):
super(PhoneDataset, self).__init__()
self.data_root = data_root
self.phone_path = phone_path
self.sample_rate = sample_rate
self.class_num = 39
self.Y = {}
phone_file = open(os.path.join(phone_path, 'converted_aligned_phones.txt')).readlines()
for line in phone_file:
line = line.strip('\n').split(' ')
self.Y[line[0]] = [int(p) for p in line[1:]]
if (split == 'train'):
train_list = open(os.path.join(phone_path, 'train_split.txt')).readlines()
usage_list = [line for line in train_list if (line.split('-')[2][:2] in ('SI', 'SX'))]
elif ((split == 'dev') or (split == 'test')):
test_list = open(os.path.join(phone_path, 'test_split.txt')).readlines()
usage_list = [line for line in test_list if (line.split('-')[2][:2] != 'SA')]
if (split == 'dev'):
usage_list = [line for line in usage_list if (not (line.split('-')[1].lower() in TEST_SPEAKERS))]
else:
usage_list = [line for line in usage_list if (line.split('-')[1].lower() in TEST_SPEAKERS)]
else:
raise ValueError("Invalid 'split' argument for dataset: PhoneDataset!")
usage_list = {line.strip('\n'): None for line in usage_list}
print(((((('[Dataset] - # phone classes: ' + str(self.class_num)) + ', number of data for ') + split) + ': ') + str(len(usage_list))))
assert os.path.isdir(bucket_file), 'Please first run `preprocess/generate_len_for_bucket.py to get bucket file.'
table = pd.read_csv(os.path.join(bucket_file, ('TRAIN.csv' if (split == 'train') else 'TEST.csv'))).sort_values(by=['length'], ascending=False)
X = table['file_path'].tolist()
X_lens = table['length'].tolist()
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in zip(X, X_lens):
if (self._parse_x_name(x).upper() in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
def _parse_x_name(self, x):
return '-'.join(x.split('.')[0].split('/')[1:])
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.data_root, wav_path))
assert (sr == self.sample_rate), f'Sample rate mismatch: real {sr}, config {self.sample_rate}'
return wav.view((- 1))
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file) for x_file in self.X[index]]
label_batch = [torch.LongTensor(self.Y[self._parse_x_name(x_file).upper()]) for x_file in self.X[index]]
return (wav_batch, label_batch)
def collate_fn(self, items):
return (items[0][0], items[0][1])
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
predicted = self.linear(features)
return predicted
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class SpeakerClassifiDataset(Dataset):
def __init__(self, mode, file_path, meta_data, max_timestep=None):
self.root = file_path
self.speaker_num = 1251
self.meta_data = meta_data
self.max_timestep = max_timestep
self.usage_list = open(self.meta_data, 'r').readlines()
cache_path = os.path.join(CACHE_PATH, f'{mode}.pkl')
if os.path.isfile(cache_path):
print(f'[SpeakerClassifiDataset] - Loading file paths from {cache_path}')
with open(cache_path, 'rb') as cache:
dataset = pickle.load(cache)
else:
dataset = eval('self.{}'.format(mode))()
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
with open(cache_path, 'wb') as cache:
pickle.dump(dataset, cache)
print(f'[SpeakerClassifiDataset] - there are {len(dataset)} files found')
self.dataset = dataset
self.label = self.build_label(self.dataset)
def build_label(self, train_path_list):
y = []
for path in train_path_list:
id_string = path.split('/')[(- 3)]
y.append((int(id_string[2:]) - 10001))
return y
@classmethod
def label2speaker(self, labels):
return [f'id{(label + 10001)}' for label in labels]
def train(self):
dataset = []
print('search specified wav name for training set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 1):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching training set wav')
return dataset
def dev(self):
dataset = []
print('search specified wav name for dev set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 2):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching dev set wav')
return dataset
def test(self):
dataset = []
print('search specified wav name for test set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 3):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching test set wav')
return dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(wav, sr) = torchaudio.load(self.dataset[idx])
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
length = self.max_timestep
def path2name(path):
return Path('-'.join(Path(path).parts[(- 3):])).stem
path = self.dataset[idx]
return (wav.numpy(), self.label[idx], path2name(path))
def collate_fn(self, samples):
return zip(*samples)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = expdir
root_dir = Path(self.datarc['file_path'])
self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])
self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])
self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(input_dim=self.modelrc['projector_dim'], output_dim=self.train_dataset.speaker_num, **model_conf)
self.objective = nn.CrossEntropyLoss()
self.register_buffer('best_score', torch.zeros(1))
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, filenames, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['loss'].append(loss.item())
records['filename'] += filenames
records['predict_speaker'] += SpeakerClassifiDataset.label2speaker(predicted_classid.cpu().tolist())
records['truth_speaker'] += SpeakerClassifiDataset.label2speaker(labels.cpu().tolist())
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ['acc', 'loss']:
average = torch.FloatTensor(records[key]).mean().item()
logger.add_scalar(f'voxceleb1/{mode}-{key}', average, global_step=global_step)
with open((Path(self.expdir) / 'log.log'), 'a') as f:
if (key == 'acc'):
print(f'{mode} {key}: {average}')
f.write(f'''{mode} at step {global_step}: {average}
''')
if ((mode == 'dev') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
f.write(f'''New best on {mode} at step {global_step}: {average}
''')
save_names.append(f'{mode}-best.ckpt')
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
lines = [f'''{f} {p}
''' for (f, p) in zip(records['filename'], records['predict_speaker'])]
file.writelines(lines)
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
lines = [f'''{f} {l}
''' for (f, l) in zip(records['filename'], records['truth_speaker'])]
file.writelines(lines)
return save_names
|
class DownstreamExpert(SpeakerExpert):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, expdir, **kwargs)
def forward(self, mode, features, lengths, labels, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
predicted = predicted.transpose((- 1), (- 2))
labels = [labels[index].expand(features_len[index]) for index in range(len(labels))]
labels = pad_sequence(labels, padding_value=(- 100), batch_first=True)
loss = self.objective(predicted, labels)
predicted = predicted.transpose((- 1), (- 2))
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid[(labels != (- 100))] == labels[(labels != (- 100))]).view((- 1)).cpu().float().tolist()
records['loss'].append(loss.item())
return loss
|
class SpeakerVerifi_train(Dataset):
def __init__(self, vad_config, file_path, meta_data, max_timestep=None):
self.roots = file_path
self.root_key = list(self.roots.keys())
self.max_timestep = max_timestep
self.vad_c = vad_config
self.dataset = []
self.all_speakers = []
for key in self.root_key:
cache_path = f'./downstream/voxceleb2_amsoftmax_segment_eval/cache_wav_paths/cache_{key}.p'
p = Path(self.roots[key])
if os.path.isfile(cache_path):
cache_wavs_dict = pickle.load(open(cache_path, 'rb'))
self.all_speakers.extend(list(cache_wavs_dict.keys()))
for speaker_id in list(cache_wavs_dict.keys()):
for wavs in cache_wavs_dict[speaker_id]:
self.dataset.append(str(((p / speaker_id) / wavs)))
else:
speaker_wav_dict = {}
speaker_dirs = [f.path.split('/')[(- 1)] for f in os.scandir(self.roots[key]) if f.is_dir()]
self.all_speakers.extend(speaker_dirs)
print('search all wavs paths')
start = time.time()
for speaker in tqdm.tqdm(speaker_dirs):
speaker_dir = (p / speaker)
wav_list = find_files(speaker_dir)
speaker_wav_dict[speaker] = []
for wav in wav_list:
(wav, _) = apply_effects_file(str((speaker_dir / wav)), EFFECTS)
wav = wav.squeeze(0)
length = wav.shape[0]
if (length > self.vad_c['min_sec']):
self.dataset.append(str((speaker_dir / wav)))
speaker_wav_dict[speaker].append('/'.join(wav.split('/')[(- 2):]))
end = time.time()
print(f'search all wavs paths costs {(end - start)} seconds')
print(f'save wav paths to {cache_path}! so we can directly load all_path in next time!')
pickle.dump(speaker_wav_dict, open(cache_path, 'wb'))
self.speaker_num = len(self.all_speakers)
self.necessary_dict = self.processing()
self.label_mapping_spk_id = {}
self.build_label_mapping()
self.label = self.build_label(self.dataset)
def processing(self):
speaker_num = len(self.all_speakers)
return {'spk_paths': self.all_speakers, 'total_spk_num': speaker_num, 'pair_table': None}
def build_label_mapping(self):
spk_count = 0
for speaker_id in self.all_speakers:
self.label_mapping_spk_id[speaker_id.split('/')[(- 1)]] = spk_count
spk_count += 1
def build_label(self, train_path_list):
y = []
for path in train_path_list:
id_string = path.split('/')[(- 3)]
y.append(self.label_mapping_spk_id[id_string])
return y
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(wav, _) = torchaudio.load(self.dataset[idx])
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
length = self.max_timestep
return (wav, torch.tensor([self.label[idx]]).long())
def collate_fn(self, samples):
(wavs, labels) = ([], [])
None_list1 = []
None_list2 = []
None_list3 = []
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
None_list1.append(None)
None_list2.append(None)
None_list3.append(None)
return (wavs, labels, None_list1, None_list2, None_list3)
|
class SpeakerVerifi_dev(Dataset):
def __init__(self, vad_config, segment_config, file_path, meta_data):
self.root = file_path
self.meta_data = meta_data
self.segment_config = segment_config
self.vad_c = vad_config
self.pair_dict = self.preprocessing()
cache_path = f'./downstream/voxceleb2_amsoftmax_segment_eval/cache_wav_paths/cache_dev_segment.p'
if os.path.isfile(cache_path):
self.dataset = pickle.load(open(cache_path, 'rb'))
else:
self.dataset = self.segment_processing()
pickle.dump(self.dataset, open(cache_path, 'wb'))
def segment_processing(self):
wav_list = self.pair_dict['wav_table']
utterance_id = 0
segment_list = []
print('processing test set to segments')
for wav_info in tqdm.tqdm(wav_list):
label_info = wav_info[0]
pair_info = wav_info[1]
(wav, _) = apply_effects_file(wav_info[2], EFFECTS)
wav = wav.squeeze(0)
index_end = (len(wav) - self.segment_config['window'])
segment_num = (index_end // self.segment_config['stride'])
if (index_end < 0):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, 0, len(wav), wav_info[2]])
else:
for index in range(0, index_end, self.segment_config['stride']):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, index, (index + self.segment_config['window']), wav_info[2]])
utterance_id += 1
return segment_list
def preprocessing(self):
wav_table = []
pair_id = 0
with open(self.meta_data, 'r') as f:
usage_list = f.readlines()
for pair in usage_list:
list_pair = pair.split()
pair_1 = os.path.join(self.root, list_pair[1])
pair_2 = os.path.join(self.root, list_pair[2])
wav1 = (list_pair[0], str(pair_id), pair_1)
wav2 = (list_pair[0], str(pair_id), pair_2)
wav_table.append(wav1)
wav_table.append(wav2)
pair_id += 1
return {'wav_table': wav_table}
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(label_info, pair_id, utter_id, seg_info, start, end, path) = self.dataset[idx]
(wav, _) = torchaudio.load(path)
wav = wav.squeeze(0)
seg_tensor = wav[start:end]
return (label_info, pair_id, utter_id, seg_info, seg_tensor)
def collate_fn(self, data_sample):
label_list = []
pair_list = []
utterid_list = []
seg_num_list = []
seg_tensor_list = []
for samples in data_sample:
label_list.append(samples[0])
pair_list.append(samples[1])
utterid_list.append(samples[2])
seg_num_list.append(samples[3])
seg_tensor_list.append(samples[4])
return (seg_tensor_list, label_list, pair_list, utterid_list, seg_num_list)
|
class SpeakerVerifi_test(Dataset):
def __init__(self, vad_config, segment_config, file_path, meta_data):
self.root = file_path
self.meta_data = meta_data
self.segment_config = segment_config
self.vad_c = vad_config
self.pair_dict = self.preprocessing()
cache_path = f'./downstream/voxceleb2_amsoftmax_segment_eval/cache_wav_paths/cache_test_segment.p'
if os.path.isfile(cache_path):
self.dataset = pickle.load(open(cache_path, 'rb'))
else:
self.dataset = self.segment_processing()
pickle.dump(self.dataset, open(cache_path, 'wb'))
def segment_processing(self):
wav_list = self.pair_dict['wav_table']
utterance_id = 0
segment_list = []
print('processing test set to segments')
for wav_info in tqdm.tqdm(wav_list):
label_info = wav_info[0]
pair_info = wav_info[1]
(wav, _) = torchaudio.load(wav_info[2])
wav = wav.squeeze(0)
index_end = (len(wav) - self.segment_config['window'])
segment_num = (index_end // self.segment_config['stride'])
if (index_end < 0):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, 0, len(wav), wav_info[2]])
else:
for index in range(0, index_end, self.segment_config['stride']):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, index, (index + self.segment_config['window']), wav_info[2]])
utterance_id += 1
return segment_list
def preprocessing(self):
wav_table = []
pair_id = 0
with open(self.meta_data, 'r') as f:
usage_list = f.readlines()
for pair in usage_list:
list_pair = pair.split()
pair_1 = os.path.join(self.root, list_pair[1])
pair_2 = os.path.join(self.root, list_pair[2])
wav1 = (list_pair[0], str(pair_id), pair_1)
wav2 = (list_pair[0], str(pair_id), pair_2)
wav_table.append(wav1)
wav_table.append(wav2)
pair_id += 1
return {'wav_table': wav_table}
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(label_info, pair_id, utter_id, seg_info, start, end, path) = self.dataset[idx]
(wav, _) = torchaudio.load(path)
wav = wav.squeeze(0)
seg_tensor = wav[start:end]
return (label_info, pair_id, utter_id, seg_info, seg_tensor)
def collate_fn(self, data_sample):
label_list = []
pair_list = []
utterid_list = []
seg_num_list = []
seg_tensor_list = []
for samples in data_sample:
label_list.append(samples[0])
pair_list.append(samples[1])
utterid_list.append(samples[2])
seg_num_list.append(samples[3])
seg_tensor_list.append(samples[4])
return (seg_tensor_list, label_list, pair_list, utterid_list, seg_num_list)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n\n Note 1.\n dataloaders should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents, ...]\n\n where wav1, wav2 ... are in variable length\n and wav1 is in torch.FloatTensor\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.train_dataset = SpeakerVerifi_train(self.datarc['vad_config'], **self.datarc['train'])
self.dev_dataset = SpeakerVerifi_dev(self.datarc['vad_config'], self.datarc['segment_config'], **self.datarc['dev'])
self.test_dataset = SpeakerVerifi_test(self.datarc['vad_config'], self.datarc['segment_config'], **self.datarc['test'])
self.connector = nn.Linear(self.upstream_dim, self.modelrc['input_dim'])
self.model = Model(input_dim=self.modelrc['input_dim'], agg_dim=self.modelrc['agg_dim'], agg_module=self.modelrc['agg_module'], config=self.modelrc)
self.objective = AdMSoftmaxLoss(self.modelrc['input_dim'], self.train_dataset.speaker_num, s=30.0, m=0.4)
self.score_fn = nn.CosineSimilarity(dim=(- 1))
self.eval_metric = EER
def get_dataloader(self, mode):
"\n Args:\n mode: string\n 'train', 'dev' or 'test'\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
if (mode == 'train'):
return self._get_train_dataloader(self.train_dataset)
elif (mode == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (mode == 'test'):
return self._get_eval_dataloader(self.test_dataset)
def _get_train_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=True, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def forward(self, mode, features, labels, pair_list, utterid_list, seg_num_list, records, **kwargs):
'\n Args:\n features:\n the features extracted by upstream\n put in the device assigned by command-line args\n\n labels:\n the speaker labels\n\n records:\n defaultdict(list), by appending scalars into records,\n these scalars will be averaged and logged on Tensorboard\n\n logger:\n Tensorboard SummaryWriter, given here for logging/debugging\n convenience, please use "self.downstream/your_content_name" as key\n name to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features_pad = pad_sequence(features, batch_first=True)
if (self.modelrc['module'] == 'XVector'):
attention_mask = [torch.ones((feature.shape[0] - 14)) for feature in features]
else:
attention_mask = [torch.ones(feature.shape[0]) for feature in features]
attention_mask_pad = pad_sequence(attention_mask, batch_first=True)
attention_mask_pad = ((1.0 - attention_mask_pad) * (- 100000.0))
features_pad = self.connector(features_pad)
agg_vec = self.model(features_pad, attention_mask_pad.cuda())
if self.training:
labels = torch.LongTensor(labels).to(features_pad.device)
loss = self.objective(agg_vec, labels)
return loss
else:
agg_vec = (agg_vec / torch.norm(agg_vec, dim=(- 1)).unsqueeze((- 1)))
if (len(labels) > 1):
agg_vec_list = [vec for vec in agg_vec]
for index in range(len(agg_vec)):
records[f'utterid_{utterid_list[index]}'].append(agg_vec[index])
records[f'utterid_info'].append(f'utterid_{utterid_list[index]}')
records[f'pairid_info'].append(f'pairid_{pair_list[index]}')
records[f'pairid_{pair_list[index]}'].append(f'utterid_{utterid_list[index]}')
records[f'pairid_{pair_list[index]}_label'].append(labels[index])
else:
records[f'utterid_{utterid_list[0]}'].append(agg_vec[0])
records[f'utterid_info'].append(f'utterid_{utterid_list[0]}')
records[f'pairid_info'].append(f'pairid_{pair_list[0]}')
records[f'pairid_{pair_list[0]}'].append(f'utterid_{utterid_list[0]}')
records[f'pairid_{pair_list[0]}_label'].append(labels[0])
return torch.tensor(0)
def log_records(self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
if (not self.training):
records = self.declutter(records)
EER_result = self.eval_metric(np.array(records['ylabels']), np.array(records['scores']))
records['EER'] = EER_result[0]
logger.add_scalar((f'{mode}-' + 'EER'), records['EER'], global_step=global_step)
def declutter(self, records):
utterance_ids = set(records['utterid_info'])
for index in utterance_ids:
records[index] = torch.mean(torch.stack(records[index]), dim=0)
pair_ids = set(records['pairid_info'])
for index in pair_ids:
wav_set = list(set(records[index]))
if (len(wav_set) == 1):
wavs1 = records[wav_set[0]]
wavs2 = records[wav_set[0]]
else:
wavs1 = records[wav_set[0]]
wavs2 = records[wav_set[1]]
score = self.score_fn(wavs1, wavs2).squeeze().cpu().detach().tolist()
ylabel = list(set(records[f'{index}_label']))[0]
records['ylabels'].append(ylabel)
records['scores'].append(score)
return records
|
def EER(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
s = interp1d(fpr, tpr)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
eer = brentq(a, 0.0, 1.0)
thresh = interp1d(fpr, thresholds)(eer)
return (eer, thresh)
|
def eer_yist_f(labels, scores):
'\n Args:\n labels: (N,1) with value being 0 or 1\n scores: (N,1) within [-1, 1]\n\n Returns:\n equal_error_rates\n threshold\n '
joints = sorted(zip(scores, labels), key=(lambda x: x[0]))
(sorted_scores, sorted_labels) = zip(*joints)
total_ones = sum(sorted_labels)
total_zeros = (len(sorted_labels) - total_ones)
prefsum_ones = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=1), initial=0))
prefsum_zeros = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=0), initial=0))
ext_scores = [(- 1.0), *sorted_scores, 1.0]
(thresh_left, thresh_right) = (0, len(ext_scores))
while True:
if (thresh_left == thresh_right):
break
thresh_idx = ((thresh_left + thresh_right) // 2)
nb_false_positives = (total_zeros - prefsum_zeros[thresh_idx])
nb_false_negatives = prefsum_ones[thresh_idx]
if (nb_false_positives > nb_false_negatives):
thresh_left = thresh_idx
elif (nb_false_positives < nb_false_negatives):
thresh_right = thresh_idx
else:
break
thresh = ((ext_scores[thresh_idx] + ext_scores[(thresh_idx + 1)]) / 2)
false_negative_ratio = (nb_false_negatives / len(labels))
false_positive_ratio = (nb_false_positives / len(labels))
equal_error_rate = ((false_positive_ratio + false_negative_ratio) / 2)
return (equal_error_rate, thresh)
|
def _count_labels(counted_so_far, label, label_to_count=0):
return ((counted_so_far + 1) if (label == label_to_count) else counted_so_far)
|
def compute_metrics(input_x_speaker, ylabel):
wav1 = []
wav2 = []
for i in range(len(ylabel)):
wav1.append(input_x_speaker[i].unsqueeze(0))
wav2.append(input_x_speaker[(len(ylabel) + i)].unsqueeze(0))
wav1 = torch.stack(wav1)
wav2 = torch.stack(wav2)
ylabel = torch.stack(ylabel).cpu().detach().long().tolist()
scores = self.score_fn(wav1, wav2).squeeze().cpu().detach().tolist()
return (scores, ylabel)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n\n Note 1.\n dataloaders should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents, ...]\n\n where wav1, wav2 ... are in variable length\n and wav1 is in torch.FloatTensor\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.seed = kwargs['seed']
self.train_dataset = AudioBatchData(**self.datarc['train'], batch_size=self.datarc['train_batch_size'])
self.dev_dataset = SpeakerVerifi_dev(**self.datarc['dev'])
self.test_dataset = SpeakerVerifi_test(**self.datarc['test'])
self.connector = nn.Linear(upstream_dim, self.modelrc['input_dim'])
self.model = Model(input_dim=self.modelrc['input_dim'], agg_module=self.modelrc['agg_module'], config=self.modelrc)
self.objective = GE2E()
self.score_fn = nn.CosineSimilarity(dim=(- 1))
self.eval_metric = EER
def _get_train_dataloader(self, dataset):
return self.train_dataset.getDataLoader(batchSize=1, numWorkers=0)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def forward(self, features, lengths, labels, records=None, logger=None, prefix=None, global_step=0, **kwargs):
'\n Args:\n features:\n the features extracted by upstream\n put in the device assigned by command-line args\n\n labels:\n the frame-wise phone labels\n\n records:\n defaultdict(list), by appending scalars into records,\n these scalars will be averaged and logged on Tensorboard\n\n logger:\n Tensorboard SummaryWriter, given here for logging/debugging\n convenience, please use "self.downstream/your_content_name" as key\n name to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features_pad = pad_sequence(features, batch_first=True)
attention_mask = [torch.ones(feature.shape[0]) for feature in features]
attention_mask_pad = pad_sequence(attention_mask, batch_first=True)
attention_mask_pad = ((1.0 - attention_mask_pad) * (- 100000.0))
features_pad = self.connector(features_pad)
agg_vec = self.model(features_pad, attention_mask_pad.cuda())
agg_vec = (agg_vec / torch.norm(agg_vec, dim=(- 1)).unsqueeze((- 1)))
if self.training:
GE2E_matrix = agg_vec.reshape((- 1), self.train_dataset.utter_number, agg_vec.shape[(- 1)])
loss = self.objective(GE2E_matrix)
return loss
else:
(vec1, vec2) = self.separate_data(agg_vec, labels)
scores = self.score_fn(vec1, vec2).squeeze().cpu().detach().tolist()
ylabels = torch.stack(labels).cpu().detach().long().tolist()
if (len(ylabels) > 1):
records['scores'].extend(scores)
records['ylabels'].extend(ylabels)
else:
records['scores'].append(scores)
records['ylabels'].append(ylabels)
return torch.tensor(0)
def log_records(self, records, logger, prefix, global_step, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
if (not self.training):
EER_result = self.eval_metric(np.array(records['ylabels']), np.array(records['scores']))
records['EER'] = EER_result[0]
logger.add_scalar((f'{prefix}' + 'EER'), records['EER'], global_step=global_step)
def separate_data(self, agg_vec, ylabel):
total_num = len(ylabel)
feature1 = agg_vec[:total_num]
feature2 = agg_vec[total_num:]
return (feature1, feature2)
|
def collect_speaker_ids(roots, speaker_num):
all_speaker = []
for key in list(roots.keys()):
all_speaker.extend([f.path for f in os.scandir(roots[key]) if f.is_dir()])
ids = [[speaker.split('/')[(- 3)], speaker.split('/')[(- 1)]] for speaker in all_speaker]
vox1 = []
vox2 = []
for id in ids:
if (id[0] == roots['Voxceleb1'].split('/')[(- 2)]):
vox1.append(id[1])
if (id[0] == roots['Voxceleb2'].split('/')[(- 2)]):
vox2.append(id[1])
dev_speaker = random.sample(vox1, k=speaker_num)
vox1_train = [ids for ids in vox1 if (ids not in dev_speaker)]
train_speaker = []
train_speaker.extend(vox1_train)
train_speaker.extend(vox2)
return (train_speaker, dev_speaker)
|
def construct_dev_speaker_id_txt(dev_speakers, dev_txt_name):
f = open(dev_txt_name, 'w')
for dev in dev_speakers:
f.write(dev)
f.write('\n')
f.close()
return
|
def sample_wavs_and_dump_txt(root, dev_ids, numbers, meta_data_name):
wav_list = []
count_positive = 0
for _ in range(numbers):
prob = random.random()
if (prob > 0.5):
dev_id_pair = random.sample(dev_ids, 2)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[1]))).split('/')[(- 3):])
label = '0'
wav_list.append(' '.join([label, sample1, sample2]))
else:
dev_id_pair = random.sample(dev_ids, 1)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
label = '1'
count_positive += 1
wav_list.append(' '.join([label, sample1, sample2]))
f = open(meta_data_name, 'w')
for data in wav_list:
f.write((data + '\n'))
f.close()
return wav_list
|
def EER(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
s = interp1d(fpr, tpr)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
eer = brentq(a, 0.0, 1.0)
thresh = interp1d(fpr, thresholds)(eer)
return (eer, thresh)
|
def eer_yist_f(labels, scores):
'\n Args:\n labels: (N,1) with value being 0 or 1\n scores: (N,1) within [-1, 1]\n\n Returns:\n equal_error_rates\n threshold\n '
joints = sorted(zip(scores, labels), key=(lambda x: x[0]))
(sorted_scores, sorted_labels) = zip(*joints)
total_ones = sum(sorted_labels)
total_zeros = (len(sorted_labels) - total_ones)
prefsum_ones = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=1), initial=0))
prefsum_zeros = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=0), initial=0))
ext_scores = [(- 1.0), *sorted_scores, 1.0]
(thresh_left, thresh_right) = (0, len(ext_scores))
while True:
if (thresh_left == thresh_right):
break
thresh_idx = ((thresh_left + thresh_right) // 2)
nb_false_positives = (total_zeros - prefsum_zeros[thresh_idx])
nb_false_negatives = prefsum_ones[thresh_idx]
if (nb_false_positives > nb_false_negatives):
thresh_left = thresh_idx
elif (nb_false_positives < nb_false_negatives):
thresh_right = thresh_idx
else:
break
thresh = ((ext_scores[thresh_idx] + ext_scores[(thresh_idx + 1)]) / 2)
false_negative_ratio = (nb_false_negatives / len(labels))
false_positive_ratio = (nb_false_positives / len(labels))
equal_error_rate = ((false_positive_ratio + false_negative_ratio) / 2)
return (equal_error_rate, thresh)
|
def _count_labels(counted_so_far, label, label_to_count=0):
return ((counted_so_far + 1) if (label == label_to_count) else counted_so_far)
|
def compute_metrics(input_x_speaker, ylabel):
wav1 = []
wav2 = []
for i in range(len(ylabel)):
wav1.append(input_x_speaker[i].unsqueeze(0))
wav2.append(input_x_speaker[(len(ylabel) + i)].unsqueeze(0))
wav1 = torch.stack(wav1)
wav2 = torch.stack(wav2)
ylabel = torch.stack(ylabel).cpu().detach().long().tolist()
scores = self.score_fn(wav1, wav2).squeeze().cpu().detach().tolist()
return (scores, ylabel)
|
def options(only_registered_ckpt: bool=False):
all_options = []
for (name, value) in globals().items():
torch_hubconf_policy = ((not name.startswith('_')) and callable(value))
if (torch_hubconf_policy and (name != 'options')):
if (only_registered_ckpt and (name.endswith('_local') or name.endswith('_url') or name.endswith('_gdriveid') or name.endswith('_custom'))):
continue
all_options.append(name)
return all_options
|
def main():
try:
cls = getattr(problem, sys.argv[1])
except:
available_problems = [name for name in dir(problem) if ((not name.startswith('_')) and isinstance(getattr(problem, name), type))]
print(traceback.format_exc())
print(f'''Usage:
1. s3prl-main [PROBLEM] -h
2. python3 -m s3prl.main [PROBLEM] -h
3. python3 s3prl/main.py [PROBLEM] -h
PROBLEM should be an available class name in the s3prl.problem package.
Available options: {', '.join(available_problems)}''')
exit(0)
cls().main(sys.argv[2:])
|
def accuracy(xs, ys, item_same_fn=None):
if isinstance(xs, (tuple, list)):
assert isinstance(ys, (tuple, list))
return _accuracy_impl(xs, ys, item_same_fn)
elif isinstance(xs, dict):
assert isinstance(ys, dict)
keys = sorted(list(xs.keys()))
xs = [xs[k] for k in keys]
ys = [ys[k] for k in keys]
return _accuracy_impl(xs, ys, item_same_fn)
else:
raise ValueError
|
def _accuracy_impl(xs, ys, item_same_fn=None):
item_same_fn = (item_same_fn or (lambda x, y: (x == y)))
same = [int(item_same_fn(x, y)) for (x, y) in zip(xs, ys)]
return (sum(same) / len(same))
|
def ter(hyps: List[Union[(str, List[str])]], refs: List[Union[(str, List[str])]]) -> float:
'Token error rate calculator.\n\n Args:\n hyps (List[Union[str, List[str]]]): List of hypotheses.\n refs (List[Union[str, List[str]]]): List of references.\n\n Returns:\n float: Averaged token error rate overall utterances.\n '
error_tokens = 0
total_tokens = 0
for (h, r) in zip(hyps, refs):
error_tokens += ed.eval(h, r)
total_tokens += len(r)
return (float(error_tokens) / float(total_tokens))
|
def wer(hyps: List[str], refs: List[str]) -> float:
'Word error rate calculator.\n\n Args:\n hyps (List[str]): List of hypotheses.\n refs (List[str]): List of references.\n\n Returns:\n float: Averaged word error rate overall utterances.\n '
hyps = [h.split(' ') for h in hyps]
refs = [r.split(' ') for r in refs]
return ter(hyps, refs)
|
def per(hyps: List[str], refs: List[str]) -> float:
'Phoneme error rate calculator.\n\n Args:\n hyps (List[str]): List of hypotheses.\n refs (List[str]): List of references.\n\n Returns:\n float: Averaged phoneme error rate overall utterances.\n '
return wer(hyps, refs)
|
def cer(hyps: List[str], refs: List[str]) -> float:
'Character error rate calculator.\n\n Args:\n hyps (List[str]): List of hypotheses.\n refs (List[str]): List of references.\n\n Returns:\n float: Averaged character error rate overall utterances.\n '
return ter(hyps, refs)
|
def compute_eer(labels: List[int], scores: List[float]):
'Compute equal error rate.\n\n Args:\n scores (List[float]): List of hypotheses.\n labels (List[int]): List of references.\n\n Returns:\n eer (float): Equal error rate.\n treshold (float): The treshold to accept a target trial.\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores, pos_label=1)
eer = brentq((lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x))), 0.0, 1.0)
threshold = interp1d(fpr, thresholds)(eer)
return (eer, threshold)
|
def compute_minDCF(labels: List[int], scores: List[float], p_target: float=0.01, c_miss: int=1, c_fa: int=1):
'Compute MinDCF.\n Computes the minimum of the detection cost function. The comments refer to\n equations in Section 3 of the NIST 2016 Speaker Recognition Evaluation Plan.\n\n Args:\n scores (List[float]): List of hypotheses.\n labels (List[int]): List of references.\n p (float): The prior probability of positive class.\n c_miss (int): The cost of miss.\n c_fa (int): The cost of false alarm.\n\n Returns:\n min_dcf (float): The calculated min_dcf.\n min_c_det_threshold (float): The treshold to calculate min_dcf.\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores, pos_label=1)
fnr = (1.0 - tpr)
min_c_det = float('inf')
min_c_det_threshold = thresholds[0]
for i in range(0, len(fnr)):
c_det = (((c_miss * fnr[i]) * p_target) + ((c_fa * fpr[i]) * (1 - p_target)))
if (c_det < min_c_det):
min_c_det = c_det
min_c_det_threshold = thresholds[i]
c_def = min((c_miss * p_target), (c_fa * (1 - p_target)))
min_dcf = (min_c_det / c_def)
return (min_dcf, min_c_det_threshold)
|
def clean(ref: str) -> str:
ref = re.sub('B\\-(\\S+) ', '', ref)
ref = re.sub(' E\\-(\\S+)', '', ref)
return ref
|
def parse(hyp: str, ref: str) -> Tuple[(str, str, str, str)]:
gex = re.compile('B\\-(\\S+) (.+?) E\\-\\1')
hyp = re.sub(' +', ' ', hyp)
ref = re.sub(' +', ' ', ref)
hyp_slots = gex.findall(hyp)
ref_slots = gex.findall(ref)
ref_slots = ';'.join([':'.join([x[1], x[0]]) for x in ref_slots])
if (len(hyp_slots) > 0):
hyp_slots = ';'.join([':'.join([clean(x[1]), x[0]]) for x in hyp_slots])
else:
hyp_slots = ''
ref = clean(ref)
hyp = clean(hyp)
return (ref, hyp, ref_slots, hyp_slots)
|
def get_slot_dict(hyp: str, ref: str) -> Tuple[(Dict[(str, List[str])], Dict[(str, List[str])])]:
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(hyp, ref)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
(ref_dict, hyp_dict) = ({}, {})
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
return (ref_dict, hyp_dict)
|
def slot_type_f1(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
F1s = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
if ((len(hyp_dict.keys()) == 0) and (len(ref_dict.keys()) == 0)):
F1 = 1.0
elif (len(hyp_dict.keys()) == 0):
F1 = 0.0
elif (len(ref_dict.keys()) == 0):
F1 = 0.0
else:
(P, R) = (0.0, 0.0)
for slot in ref_dict:
if (slot in hyp_dict):
R += 1
R = (R / len(ref_dict.keys()))
for slot in hyp_dict:
if (slot in ref_dict):
P += 1
P = (P / len(hyp_dict.keys()))
F1 = ((((2 * P) * R) / (P + R)) if ((P + R) > 0) else 0.0)
F1s.append(F1)
return (sum(F1s) / len(F1s))
|
def slot_value_cer(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
(value_hyps, value_refs) = ([], [])
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_cer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_cer = cer([hyp_v], [ref_v])
if (min_cer > tmp_cer):
min_cer = tmp_cer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return cer(value_hyps, value_refs)
|
def slot_value_wer(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
value_hyps = []
value_refs = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_wer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_wer = wer([hyp_v], [ref_v])
if (min_wer > tmp_wer):
min_wer = tmp_wer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return wer(value_hyps, value_refs)
|
def slot_edit_f1(hypothesis: List[str], groundtruth: List[str], loop_over_all_slot: bool, **kwargs) -> float:
slot2F1 = {}
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
unique_slots = list(ref_dict.keys())
if loop_over_all_slot:
unique_slots += [x for x in hyp_dict if (x not in ref_dict)]
for slot in unique_slots:
TP = 0
FP = 0
FN = 0
if (slot not in ref_dict):
for hyp_v in hyp_dict[slot]:
FP += 1
else:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
FN += 1
else:
match = False
for hyp_v in hyp_dict[slot]:
if (hyp_v == ref_v):
match = True
break
if match:
TP += 1
else:
FN += 1
FP += 1
slot2F1.setdefault(slot, [0, 0, 0])
slot2F1[slot][0] += TP
slot2F1[slot][1] += FN
slot2F1[slot][2] += FP
(all_TPs, all_FNs, all_FPs) = (0, 0, 0)
for slot in slot2F1.keys():
all_TPs += slot2F1[slot][0]
all_FNs += slot2F1[slot][1]
all_FPs += slot2F1[slot][2]
return ((2 * all_TPs) / (((2 * all_TPs) + all_FPs) + all_FNs))
|
def slot_edit_f1_full(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=True, **kwargs)
|
def slot_edit_f1_part(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=False, **kwargs)
|
class BeamDecoder(object):
'Beam decoder powered by flashlight.\n\n Args:\n token (str, optional): Path to dictionary file. Defaults to "".\n lexicon (str, optional): Path to lexicon file. Defaults to "".\n lm (str, optional): Path to KenLM file. Defaults to "".\n nbest (int, optional): Returns nbest hypotheses. Defaults to 1.\n beam (int, optional): Beam size. Defaults to 5.\n beam_size_token (int, optional): Token beam size. Defaults to -1.\n beam_threshold (float, optional): Beam search log prob threshold. Defaults to 25.0.\n lm_weight (float, optional): language model weight. Defaults to 2.0.\n word_score (float, optional): score for words appearance in the transcription. Defaults to -1.0.\n unk_score (float, optional): score for unknown word appearance in the transcription. Defaults to -math.inf.\n sil_score (float, optional): score for silence appearance in the transcription. Defaults to 0.0.\n '
def __init__(self, token: str='', lexicon: str='', lm: str='', nbest: int=1, beam: int=5, beam_size_token: int=(- 1), beam_threshold: float=25.0, lm_weight: float=2.0, word_score: float=(- 1.0), unk_score: float=(- math.inf), sil_score: float=0.0):
try:
from flashlight.lib.text.decoder import CriterionType, KenLM, LexiconDecoder, LexiconDecoderOptions, SmearingMode, Trie
from flashlight.lib.text.dictionary import Dictionary, create_word_dict, load_words
except ImportError:
logger.error(f'Please install Flashlight Text from https://github.com/flashlight/text to enable {__class__.__name__}')
raise
if (token == ''):
token = _urls_to_filepaths(TOKEN_URL)
if (lexicon == ''):
lexicon = _urls_to_filepaths(LEXICON_URL_1)
if (lm == ''):
lm = _urls_to_filepaths(LM_URL_1)
self.nbest = nbest
self.token_dict = Dictionary(token)
self.lexicon = load_words(lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.lm = KenLM(lm, self.word_dict)
self.sil_idx = self.token_dict.get_index('|')
self.unk_idx = self.word_dict.get_index('<unk>')
self.trie = Trie(self.token_dict.index_size(), self.sil_idx)
start_state = self.lm.start(False)
for (word, spellings) in self.lexicon.items():
usr_idx = self.word_dict.get_index(word)
(_, score) = self.lm.score(start_state, usr_idx)
for spelling in spellings:
spelling_idxs = [self.token_dict.get_index(tok) for tok in spelling]
self.trie.insert(spelling_idxs, usr_idx, score)
self.trie.smear(SmearingMode.MAX)
if (beam_size_token == (- 1)):
beam_size_token = self.token_dict.index_size()
self.options = LexiconDecoderOptions(beam_size=beam, beam_size_token=beam_size_token, beam_threshold=beam_threshold, lm_weight=lm_weight, word_score=word_score, unk_score=unk_score, sil_score=sil_score, log_add=False, criterion_type=CriterionType.CTC)
self.blank_idx = self.token_dict.get_index('#')
self.decoder = LexiconDecoder(self.options, self.trie, self.lm, self.sil_idx, self.blank_idx, self.unk_idx, [], False)
def get_tokens(self, idxs: Iterable) -> torch.LongTensor:
'Normalize tokens by handling CTC blank, ASG replabels, etc.\n\n Args:\n idxs (Iterable): Token ID list output by self.decoder\n\n Returns:\n torch.LongTensor: Token ID list after normalization.\n '
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter((lambda x: (x != self.blank_idx)), idxs)
return torch.LongTensor(list(idxs))
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
'Returns frame numbers corresponding to every non-blank token.\n\n Args:\n token_idxs (List[int]): IDs of decoded tokens.\n\n Returns:\n List[int]: Frame numbers corresponding to every non-blank token.\n '
timesteps = []
for (i, token_idx) in enumerate(token_idxs):
if (token_idx == self.blank_idx):
continue
if ((i == 0) or (token_idx != token_idxs[(i - 1)])):
timesteps.append(i)
return timesteps
def decode(self, emissions: torch.Tensor) -> List[List[dict]]:
'Decode sequence.\n\n Args:\n emissions (torch.Tensor): Emission probabilities (in log scale).\n\n Returns:\n List[List[dict]]: Decoded hypotheses.\n '
emissions = emissions.float().contiguous().cpu()
(B, T, N) = emissions.size()
hyps = []
for b in range(B):
emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0)))
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[:self.nbest]
hyps.append([dict(tokens=self.get_tokens(result.tokens), score=result.score, timesteps=self.get_timesteps(result.tokens), words=[self.word_dict.get_entry(x) for x in result.words if (x >= 0)]) for result in nbest_results])
return hyps
|
class FrameLevel(nn.Module):
"\n The common frame-to-frame probing model\n\n Args:\n input_size (int): input size\n output_size (int): output size\n hidden_sizes (List[int]): a list of hidden layers' hidden size.\n by default is [256] to project all different input sizes to the same dimension.\n set empty list to use the vanilla single layer linear model\n activation_type (str): the activation class name in :obj:`torch.nn`. Set None to\n disable activation and the model is pure linear. Default: None\n activation_conf (dict): the arguments for initializing the activation class.\n Default: empty dict\n "
def __init__(self, input_size: int, output_size: int, hidden_sizes: List[int]=None, activation_type: str=None, activation_conf: dict=None):
super().__init__()
self._indim = input_size
self._outdim = output_size
hidden_sizes = (hidden_sizes or [256])
latest_size = input_size
hidden_layers = []
if (len(hidden_sizes) > 0):
for size in hidden_sizes:
hidden_layers.append(nn.Linear(latest_size, size))
if (activation_type is not None):
hidden_layers.append(getattr(nn, activation_type)(**(activation_conf or {})))
latest_size = size
self.hidden_layers = nn.Sequential(*hidden_layers)
self.final_proj = nn.Linear(latest_size, output_size)
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x, x_len):
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n tuple\n\n 1. ys (torch.FloatTensor): (batch_size, seq_len, output_size)\n 2. ys_len (torch.LongTensor): (batch_size, )\n '
ys = self.hidden_layers(x)
ys = self.final_proj(ys)
return (ys, x_len)
|
class UtteranceLevel(nn.Module):
"\n Args:\n input_size (int): input_size\n output_size (int): output_size\n hidden_sizes (List[int]): a list of hidden layers' hidden size.\n by default is [256] to project all different input sizes to the same dimension.\n set empty list to use the vanilla single layer linear model\n activation_type (str): the activation class name in :obj:`torch.nn`. Set None to\n disable activation and the model is pure linear. Default: None\n activation_conf (dict): the arguments for initializing the activation class.\n Default: empty dict\n pooling_type (str): the pooling class name in :obj:`s3prl.nn.pooling`. Default: MeanPooling\n pooling_conf (dict): the arguments for initializing the pooling class.\n Default: empty dict\n "
def __init__(self, input_size: int, output_size: int, hidden_sizes: List[int]=None, activation_type: str=None, activation_conf: dict=None, pooling_type: str='MeanPooling', pooling_conf: dict=None):
super().__init__()
self._indim = input_size
self._outdim = output_size
hidden_sizes = (hidden_sizes or [256])
latest_size = input_size
hidden_layers = []
if (len(hidden_sizes) > 0):
for size in hidden_sizes:
hidden_layers.append(nn.Linear(latest_size, size))
if (activation_type is not None):
hidden_layers.append(getattr(nn, activation_type)(**(activation_conf or {})))
latest_size = size
self.hidden_layers = nn.Sequential(*hidden_layers)
pooling_conf = (pooling_conf or {})
self.pooling = getattr(pooling, pooling_type)(latest_size, **pooling_conf)
latest_size = self.pooling.output_size
self.final_proj = nn.Linear(latest_size, output_size)
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x, x_len):
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n torch.FloatTensor\n\n (batch_size, output_size)\n '
x = self.hidden_layers(x)
x_pooled = self.pooling(x, x_len)
y = self.final_proj(x_pooled)
return y
|
class HearFullyConnectedPrediction(torch.nn.Module):
'\n The specific prediction head used in the Hear Benchmark.\n Modified from: https://github.com/hearbenchmark/hear-eval-kit/blob/855964977238e89dfc76394aa11c37010edb6f20/heareval/predictions/task_predictions.py#L142\n\n Args:\n input_size (int): input_size\n output_size (int): output_size\n hidden_size (int): hidden size across all layers. Default: 1024\n hidden_layers (int): number of hidden layers, all in :code:`hidden_size`. Default: 2\n norm_after_activation (bool): whether to norm after activation. Default: False\n dropout (float): dropout ratio. Default: 0.1\n initialization (str): initialization method name available in :obj:`torch.nn.init`\n hidden_norm (str): normalization method name available in :obj:`torch.nn`\n pooling_type (str): the pooling class name in :obj:`s3prl.nn.pooling`. Default: MeanPooling\n pooling_conf (dict): the arguments for initializing the pooling class.\n Default: empty dict\n '
def __init__(self, input_size: int, output_size: int, hidden_size: int=1024, hidden_layers: int=2, norm_after_activation: bool=False, dropout: float=0.1, initialization: str='xavier_uniform_', hidden_norm: str='BatchNorm1d', pooling_type: str=None, pooling_conf: dict=None):
super().__init__()
self._input_size = input_size
self._output_size = output_size
initialization = getattr(torch.nn.init, initialization)
hidden_norm = getattr(torch.nn, hidden_norm)
curdim = input_size
if (pooling_type is not None):
pooling_cls = getattr(pooling, pooling_type)
self.pooling = pooling_cls(input_size, **(pooling_conf or {}))
curdim = self.pooling.output_size
hidden_modules: List[torch.nn.Module] = []
last_activation = 'linear'
if hidden_layers:
for i in range(hidden_layers):
linear = torch.nn.Linear(curdim, hidden_size)
initialization(linear.weight, gain=torch.nn.init.calculate_gain(last_activation))
hidden_modules.append(linear)
if (not norm_after_activation):
hidden_modules.append(hidden_norm(hidden_size))
hidden_modules.append(torch.nn.Dropout(dropout))
hidden_modules.append(torch.nn.ReLU())
if norm_after_activation:
hidden_modules.append(hidden_norm(hidden_size))
curdim = hidden_size
last_activation = 'relu'
self.hidden = torch.nn.Sequential(*hidden_modules)
else:
self.hidden = torch.nn.Identity()
self.projection = torch.nn.Linear(curdim, output_size)
initialization(self.projection.weight, gain=torch.nn.init.calculate_gain(last_activation))
@property
def input_size(self) -> int:
return self._input_size
@property
def output_size(self) -> int:
return self._output_size
def forward(self, x, x_len) -> torch.Tensor:
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n tuple:\n\n 1. y (torch.FloatTensor)\n 2. y_len (torch.LongTensor)\n\n if :code:`pooling_type` is None, :code:`y` is (batch_size, seq_len, output_size) and :code:`y_len` is (batch_size, )\n if not None, :code:`y` is (batch_size, output_size) and :code:`y_len` is (batch_size, ) in all 1s.\n '
if hasattr(self, 'pooling'):
x = self.pooling(x, x_len)
x_len = x.new_ones(len(x))
shape = x.shape
if (len(shape) == 3):
(bs, ts, hidden_size) = x.shape
x = x.reshape((bs * ts), hidden_size)
x = self.hidden(x)
x = self.projection(x)
if (len(shape) == 3):
x = x.reshape(bs, ts, (- 1))
return (x, x_len)
|
class AbsUpstream(nn.Module):
'\n The upstream model should follow this interface. Please subclass it.\n '
@property
def num_layer(self) -> int:
'\n number of hidden states\n '
raise NotImplementedError
@property
def hidden_sizes(self) -> List[int]:
'\n hidden size of each hidden state\n '
raise NotImplementedError
@property
def downsample_rates(self) -> List[int]:
'\n downsample rate from 16 KHz waveforms for each hidden state\n '
raise NotImplementedError
def forward(self, wavs: torch.FloatTensor, wavs_len: torch.LongTensor) -> Tuple[(List[torch.FloatTensor], List[torch.LongTensor])]:
'\n Args:\n wavs (torch.FloatTensor): (batch_size, seq_len, 1)\n wavs_len (torch.LongTensor): (batch_size, )\n\n Returns:\n tuple:\n\n 1. all_hs (List[torch.FloatTensor]): all the hidden states\n 2. all_hs_len (List[torch.LongTensor]): the lengths for all the hidden states\n '
raise NotImplementedError
|
class AbsFeaturizer(nn.Module):
"\n The featurizer should follow this interface. Please subclass it.\n The featurizer's mission is to reduce (standardize) the multiple hidden\n states from :obj:`AbsUpstream` into a single hidden state, so that\n the downstream model can use it as a conventional representation.\n "
@property
def output_size(self) -> int:
'\n The output size after hidden states reduction\n '
raise NotImplementedError
@property
def downsample_rate(self) -> int:
'\n The downsample rate from 16 KHz waveform of the reduced single hidden state\n '
raise NotImplementedError
def forward(self, all_hs: List[torch.FloatTensor], all_hs_len: List[torch.LongTensor]) -> Tuple[(torch.FloatTensor, torch.LongTensor)]:
'\n Args:\n all_hs (List[torch.FloatTensor]): all the hidden states\n all_hs_len (List[torch.LongTensor]): the lengths for all the hidden states\n\n Returns:\n tuple:\n\n 1. hs (torch.FloatTensor)\n 2. hs_len (torch.LongTensor)\n '
raise NotImplementedError
|
class AbsFrameModel(nn.Module):
'\n The frame-level model interface.\n '
@property
def input_size(self) -> int:
raise NotImplementedError
@property
def output_size(self) -> int:
raise NotImplementedError
def forward(self, x: torch.FloatTensor, x_len: torch.LongTensor) -> Tuple[(torch.FloatTensor, torch.LongTensor)]:
raise NotImplementedError
|
class AbsUtteranceModel(nn.Module):
'\n The utterance-level model interface, which pools the temporal dimension.\n '
@property
def input_size(self) -> int:
raise NotImplementedError
@property
def output_size(self) -> int:
raise NotImplementedError
def forward(self, x: torch.FloatTensor, x_len: torch.LongTensor) -> torch.FloatTensor:
raise NotImplementedError
|
class FrameLevelLinear(FrameLevel):
'\n The frame-level linear probing model used in SUPERB Benchmark\n '
def __init__(self, input_size: int, output_size: int, hidden_size: int=256):
super().__init__(input_size, output_size, hidden_sizes=[hidden_size])
|
class MeanPoolingLinear(UtteranceLevel):
'\n The utterance-level linear probing model used in SUPERB Benchmark\n '
def __init__(self, input_size: int, output_size: int, hidden_size: int=256):
super().__init__(input_size, output_size, hidden_sizes=[hidden_size])
|
class MeanPooling(nn.Module):
'\n Computes Temporal Average Pooling (MeanPooling over time) Module\n '
def __init__(self, input_size: int):
super().__init__()
self._in_size = input_size
@property
def input_size(self) -> int:
return self._in_size
@property
def output_size(self) -> int:
return self._in_size
def forward(self, xs: torch.Tensor, xs_len: torch.LongTensor):
'\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n Returns:\n torch.Tensor: Output tensor (#batch, input_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
pooled = torch.mean(x[:x_len], dim=0)
pooled_list.append(pooled)
return torch.stack(pooled_list)
|
class TemporalStatisticsPooling(nn.Module):
'\n TemporalStatisticsPooling\n Paper: X-vectors: Robust DNN Embeddings for Speaker Recognition\n Link: http://www.danielpovey.com/files/2018_icassp_xvectors.pdf\n '
def __init__(self, input_size: int):
super().__init__()
self._input_size = input_size
@property
def input_size(self) -> int:
return self._input_size
@property
def output_size(self) -> int:
return (self._input_size * 2)
def forward(self, xs, xs_len):
'\n Computes Temporal Statistics Pooling Module\n\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n\n Returns:\n torch.Tensor: Output tensor (#batch, output_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
mean = torch.mean(x[:x_len], dim=0)
std = torch.std(x[:x_len], dim=0)
pooled = torch.cat((mean, std), dim=(- 1))
pooled_list.append(pooled)
return torch.stack(pooled_list)
|
class SelfAttentivePooling(nn.Module):
'\n SelfAttentivePooling\n Paper: Self-Attentive Speaker Embeddings for Text-Independent Speaker Verification\n Link: https://danielpovey.com/files/2018_interspeech_xvector_attention.pdf\n '
def __init__(self, input_size: int):
super().__init__()
self._indim = input_size
self.sap_linear = nn.Linear(input_size, input_size)
self.attention = nn.Parameter(torch.FloatTensor(input_size, 1))
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._indim
def forward(self, xs, xs_len):
'\n Computes Self-Attentive Pooling Module\n\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n\n Returns:\n torch.Tensor: Output tensor (#batch, input_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
x = x[:x_len].unsqueeze(0)
h = torch.tanh(self.sap_linear(x))
w = torch.matmul(h, self.attention).squeeze(dim=2)
w = F.softmax(w, dim=1).view(x.size(0), x.size(1), 1)
x = torch.sum((x * w), dim=1)
pooled_list.append(x.squeeze(0))
return torch.stack(pooled_list)
|
class AttentiveStatisticsPooling(nn.Module):
'\n AttentiveStatisticsPooling\n Paper: Attentive Statistics Pooling for Deep Speaker Embedding\n Link: https://arxiv.org/pdf/1803.10963.pdf\n '
def __init__(self, input_size: int):
super().__init__()
self._indim = input_size
self.sap_linear = nn.Linear(input_size, input_size)
self.attention = nn.Parameter(torch.FloatTensor(input_size, 1))
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return (self._indim * 2)
def forward(self, xs, xs_len):
'\n Computes Attentive Statistics Pooling Module\n\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n\n Returns:\n torch.Tensor: Output tensor (#batch, input_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
x = x[:x_len].unsqueeze(0)
h = torch.tanh(self.sap_linear(x))
w = torch.matmul(h, self.attention).squeeze(dim=2)
w = F.softmax(w, dim=1).view(x.size(0), x.size(1), 1)
mu = torch.sum((x * w), dim=1)
rh = torch.sqrt((torch.sum(((x ** 2) * w), dim=1) - (mu ** 2)).clamp(min=1e-05))
x = torch.cat((mu, rh), 1).squeeze(0)
pooled_list.append(x)
return torch.stack(pooled_list)
|
class PredictorIdentity(nn.Module):
'\n This nn module is used as a predictor placeholder for certain SSL problems.\n '
def __init__(self, **kwargs):
super(PredictorIdentity, self).__init__()
def forward(self, output: Output):
'\n Args:\n output (s3prl.Output): An Output module\n\n Return:\n output (s3prl.Output): exactly the same as input, an Output module\n '
return output
|
class PredictorMockingjay(nn.Module):
'\n The predictor model for SSL pre-training tasks.\n Currently supporting SSL problems of Mockingjay, Tera, and Audio Albert.\n '
def __init__(self, config, output_dim, input_dim=None, **kwargs):
'\n Args:\n config (TransformerConfig):\n A `TransformerConfig` class instance with the configuration to build a new model,\n can also be a `dict` that initializes the TransformerConfig class\n output_dim (int):\n The output dimension of predictor\n input_dim (int):\n The input dimension of predictor, if `None` is given, then use the `hidden_size` defined in `config`.\n Default: None\n '
super(PredictorMockingjay, self).__init__()
if (type(config) is dict):
config = TransformerConfig(**config)
self.output_size = output_dim
if (input_dim is None):
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = nn.Linear(input_dim, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.output = nn.Linear(config.hidden_size, self.output_size)
def forward(self, inputs, output_states=False):
'\n Args:\n inputs (torch.LongTensor):\n A torch.LongTensor of shape [batch_size, sequence_length, input_dim]\n output_states (bool):\n A boolean which controls whether to return the `hidden_states` of the predictor.\n Default: False\n Return:\n Output (s3prl.Output):\n An Output module that contains `prediction` and/or `hidden_states`.\n '
hidden_states = inputs.hidden_states
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
prediction = self.output(hidden_states)
if output_states:
return Output(hidden_states=hidden_states, prediction=prediction)
else:
return Output(prediction=prediction)
|
class softmax(nn.Module):
'\n The standard softmax loss in an unified interface for all speaker-related softmax losses\n '
def __init__(self, input_size: int, output_size: int):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.fc = nn.Linear(input_size, output_size)
self.criertion = nn.CrossEntropyLoss()
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x: torch.Tensor, label: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, input_size)\n label (torch.LongTensor): (batch_size, )\n\n Returns:\n loss (torch.float)\n logit (torch.Tensor): (batch_size, )\n '
assert (x.size()[0] == label.size()[0])
assert (x.size()[1] == self.input_size)
x = F.normalize(x, dim=1)
x = self.fc(x)
loss = self.criertion(x, label)
return (loss, x)
|
class amsoftmax(nn.Module):
'\n AMSoftmax\n\n Args:\n input_size (int): The input feature size\n output_size (int): The output feature size\n margin (float): Hyperparameter denotes the margin to the decision boundry\n scale (float): Hyperparameter that scales the cosine value\n '
def __init__(self, input_size: int, output_size: int, margin: float=0.2, scale: float=30):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.margin = margin
self.scale = scale
self.W = torch.nn.Parameter(torch.randn(input_size, output_size), requires_grad=True)
self.ce = nn.CrossEntropyLoss()
nn.init.xavier_normal_(self.W, gain=1)
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x: torch.Tensor, label: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, input_size)\n label (torch.LongTensor): (batch_size, )\n\n Returns:\n loss (torch.float)\n logit (torch.Tensor): (batch_size, )\n '
assert (x.size()[0] == label.size()[0])
assert (x.size()[1] == self.input_size)
x_norm = torch.norm(x, p=2, dim=1, keepdim=True).clamp(min=1e-12)
x_norm = torch.div(x, x_norm)
w_norm = torch.norm(self.W, p=2, dim=0, keepdim=True).clamp(min=1e-12)
w_norm = torch.div(self.W, w_norm)
costh = torch.mm(x_norm, w_norm)
label_view = label.view((- 1), 1)
if label_view.is_cuda:
label_view = label_view.cpu()
delt_costh = torch.zeros(costh.size()).scatter_(1, label_view, self.margin)
if x.is_cuda:
delt_costh = delt_costh.cuda()
costh_m = (costh - delt_costh)
costh_m_s = (self.scale * costh_m)
loss = self.ce(costh_m_s, label)
return (loss, costh_m_s)
|
class TDNN(nn.Module):
'\n TDNN as defined by https://www.danielpovey.com/files/2015_interspeech_multisplice.pdf.\n\n Context size and dilation determine the frames selected\n (although context size is not really defined in the traditional sense).\n\n For example:\n\n context size 5 and dilation 1 is equivalent to [-2,-1,0,1,2]\n\n context size 3 and dilation 2 is equivalent to [-2, 0, 2]\n\n context size 1 and dilation 1 is equivalent to [0]\n\n Args:\n input_size (int): The input feature size\n output_size (int): The output feature size\n context_size (int): See example\n dilation (int): See example\n dropout_p (float): (default, 0.0) The dropout rate\n batch_norm (bool): (default, False) Use batch norm for TDNN layers\n '
def __init__(self, input_size: int, output_size: int, context_size: int, dilation: int, dropout_p: float=0.0, batch_norm: bool=True):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.context_size = context_size
self.dilation = dilation
self.dropout_p = dropout_p
self.batch_norm = batch_norm
self.kernel = nn.Linear((input_size * context_size), output_size)
self.nonlinearity = nn.ReLU()
if batch_norm:
self.bn = nn.BatchNorm1d(output_size)
if dropout_p:
self.drop = nn.Dropout(p=dropout_p)
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x: torch.Tensor):
'\n Args:\n x (torch.FloatTensor): (batch, seq_len, input_size)\n\n Returns:\n torch.FloatTensor: (batch, seq_len, output_size)\n '
(_, _, d) = x.shape
assert (d == self.input_size), 'Input size was wrong. Expected ({}), got ({})'.format(self.input_size, d)
x = x.unsqueeze(1)
x = F.unfold(x, (self.context_size, self.input_size), stride=(1, self.input_size), dilation=(self.dilation, 1))
x = x.transpose(1, 2)
x = self.kernel(x)
x = self.nonlinearity(x)
if self.dropout_p:
x = self.drop(x)
if self.batch_norm:
x = x.transpose(1, 2)
x = self.bn(x)
x = x.transpose(1, 2)
return x
|
class XVectorBackbone(nn.Module):
'\n The TDNN layers the same as in https://danielpovey.com/files/2018_odyssey_xvector_lid.pdf.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 1500) The size of the speaker embedding\n dropout_p (float): (default, 0.0) The dropout rate\n batch_norm (bool): (default, False) Use batch norm for TDNN layers\n '
def __init__(self, input_size: int, output_size: int=1500, dropout_p: float=0.0, batch_norm: False=True):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.module = nn.Sequential(TDNN(input_size=input_size, output_size=512, context_size=5, dilation=1, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=512, context_size=3, dilation=2, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=512, context_size=3, dilation=3, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=512, context_size=1, dilation=1, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=output_size, context_size=1, dilation=1, dropout_p=dropout_p, batch_norm=batch_norm))
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x: torch.Tensor):
'\n Args:\n x (torch.FloatTensor): (batch, seq_len, input_size)\n\n output:\n torch.FloatTensor: (batch, seq_len, output_size)\n '
x = self.module(x)
return x
|
class _SEModule(nn.Module):
def __init__(self, channels, bottleneck=128):
super().__init__()
self.se = nn.Sequential(nn.AdaptiveAvgPool1d(1), nn.Conv1d(channels, bottleneck, kernel_size=1, padding=0), nn.ReLU(), nn.Conv1d(bottleneck, channels, kernel_size=1, padding=0), nn.Sigmoid())
def forward(self, input):
x = self.se(input)
return (input * x)
|
class _Bottle2neck(nn.Module):
def __init__(self, inplanes, planes, kernel_size=None, dilation=None, scale=8):
super().__init__()
width = int(math.floor((planes / scale)))
self.conv1 = nn.Conv1d(inplanes, (width * scale), kernel_size=1)
self.bn1 = nn.BatchNorm1d((width * scale))
self.nums = (scale - 1)
convs = []
bns = []
num_pad = (math.floor((kernel_size / 2)) * dilation)
for i in range(self.nums):
convs.append(nn.Conv1d(width, width, kernel_size=kernel_size, dilation=dilation, padding=num_pad))
bns.append(nn.BatchNorm1d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv1d((width * scale), planes, kernel_size=1)
self.bn3 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU()
self.width = width
self.se = _SEModule(planes)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if (i == 0):
sp = spx[i]
else:
sp = (sp + spx[i])
sp = self.convs[i](sp)
sp = self.relu(sp)
sp = self.bns[i](sp)
if (i == 0):
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.nums]), 1)
out = self.conv3(out)
out = self.relu(out)
out = self.bn3(out)
out = self.se(out)
out += residual
return out
|
class ECAPA_TDNN(nn.Module):
'\n ECAPA-TDNN model as in https://arxiv.org/abs/2005.07143.\n\n Reference code: https://github.com/TaoRuijie/ECAPA-TDNN.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 1536) The size of the speaker embedding\n C (int): (default, 1024) The channel dimension\n '
def __init__(self, input_size: int=80, output_size: int=1536, C: int=1024, **kwargs):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.conv1 = nn.Conv1d(input_size, C, kernel_size=5, stride=1, padding=2)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(C)
self.layer1 = _Bottle2neck(C, C, kernel_size=3, dilation=2, scale=8)
self.layer2 = _Bottle2neck(C, C, kernel_size=3, dilation=3, scale=8)
self.layer3 = _Bottle2neck(C, C, kernel_size=3, dilation=4, scale=8)
self.layer4 = nn.Conv1d((3 * C), output_size, kernel_size=1)
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x: torch.FloatTensor):
'\n Args:\n x (torch.FloatTensor): size (batch, seq_len, input_size)\n\n Returns:\n x (torch.FloatTensor): size (batch, seq_len, output_size)\n '
x = self.conv1(x.transpose(1, 2).contiguous())
x = self.relu(x)
x = self.bn1(x)
x1 = self.layer1(x)
x2 = self.layer2((x + x1))
x3 = self.layer3(((x + x1) + x2))
x = self.layer4(torch.cat((x1, x2, x3), dim=1))
x = self.relu(x)
x = x.transpose(1, 2).contiguous()
return x
|
class SpeakerEmbeddingExtractor(nn.Module):
'\n The speaker embedding extractor module.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 1500) The size of the speaker embedding\n backbone (str): (default, XVector) Use which kind of speaker model\n pooling_type (str): (default, TAP) Use which kind of pooling method\n '
def __init__(self, input_size: int, output_size: int=1500, backbone: str='XVector', pooling_type: str='TemporalAveragePooling'):
super().__init__()
self._indim = input_size
self._outdim = output_size
if (backbone == 'XVector'):
self.backbone = XVectorBackbone(input_size=input_size, output_size=output_size)
self.offset = XVECTOR_TDNNS_LENGTH_REDUCTION
elif (backbone == 'ECAPA-TDNN'):
self.backbone = ECAPA_TDNN(input_size=input_size, output_size=output_size)
self.offset = ECAPA_TDNNS_LENGTH_REDUCTION
else:
raise ValueError('{} backbone type is not defined'.format(backbone))
if ((pooling_type == 'TemporalAveragePooling') or (pooling_type == 'TAP')):
self.pooling = TemporalAveragePooling(self.backbone.output_size)
elif ((pooling_type == 'TemporalStatisticsPooling') or (pooling_type == 'TSP')):
self.pooling = TemporalStatisticsPooling(self.backbone.output_size)
elif ((pooling_type == 'SelfAttentivePooling') or (pooling_type == 'SAP')):
self.pooling = SelfAttentivePooling(self.backbone.output_size)
elif ((pooling_type == 'AttentiveStatisticsPooling') or (pooling_type == 'ASP')):
self.pooling = AttentiveStatisticsPooling(self.backbone.output_size)
else:
raise ValueError('{} pooling type is not defined'.format(pooling_type))
self._outdim = self.pooling.output_size
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x: torch.Tensor, xlen: torch.LongTensor=None):
'\n Args:\n x (torch.Tensor): size (batch, seq_len, input_size)\n xlen (torch.LongTensor): size (batch, )\n\n Returns:\n x (torch.Tensor): size (batch, output_size)\n '
x = self.backbone(x)
if (xlen is not None):
xlen = torch.LongTensor([max((item - self.offset), 0) for item in xlen])
else:
xlen = torch.LongTensor(([x.shape[1]] * x.shape[0]))
x = self.pooling(x, xlen)
return x
|
class _UtteranceExtractor(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.linear1 = nn.Linear(input_size, output_size)
self.linear2 = nn.Linear(output_size, output_size)
self.act_fn = nn.ReLU()
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x_BxH):
hid_BxH = self.linear1(x_BxH)
hid_BxH = self.act_fn(hid_BxH)
if self.training:
hid_BxH = self.linear2(hid_BxH)
hid_BxH = self.act_fn(hid_BxH)
return hid_BxH
|
class SuperbXvector(nn.Module):
'\n The Xvector used in the SUPERB Benchmark with the exact default arguments.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 512) The size of the speaker embedding\n hidden_size (int): (default, 512) The major hidden size in the network\n aggregation_size (int): (default, 1500) The output size of the x-vector, which is usually large\n dropout_p (float): (default, 0.0) The dropout rate\n batch_norm (bool): (default, False) Use batch norm for TDNN layers\n '
def __init__(self, input_size: int, output_size: int=512, hidden_size: int=512, aggregation_size: int=1500, dropout_p: float=0.0, batch_norm: bool=False):
super().__init__()
self._input_size = input_size
self._output_size = output_size
self.projector = nn.Linear(input_size, hidden_size)
self.tdnns = XVectorBackbone(hidden_size, aggregation_size, dropout_p=dropout_p, batch_norm=batch_norm)
latest_size = self.tdnns.output_size
self.pooling = TemporalStatisticsPooling(latest_size)
latest_size = self.pooling.output_size
self.affine = _UtteranceExtractor(latest_size, output_size)
@property
def input_size(self) -> int:
return self._input_size
@property
def output_size(self) -> int:
return self._output_size
def forward(self, x, x_len):
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n torch.FloatTensor: (batch_size, output_size)\n '
x = self.projector(x)
x = self.tdnns(x)
x_len = (x_len - XVECTOR_TDNNS_LENGTH_REDUCTION)
assert ((x_len <= 0).sum() == 0), 'The input sequence is too short for the X-vector model'
x = self.pooling(x, x_len)
x = self.affine(x)
return x
|
class TransformerConfig(object):
'\n Configuration class to store the configuration of a `TransformerModel`.\n '
def __init__(self, hidden_size: int=768, num_hidden_layers: int=3, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, initializer_range: float=0.02, layer_norm_eps: float=1e-12, share_layer: bool=False, pre_layer_norm: bool=False):
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.share_layer = share_layer
self.pre_layer_norm = pre_layer_norm
|
def prune_linear_layer(layer, index, dim=0):
'\n Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n '
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if (layer.bias is not None):
if (dim == 1):
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=(layer.bias is not None)).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if (layer.bias is not None):
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
|
def gelu(x):
"\n Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n "
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
def swish(x):
return (x * torch.sigmoid(x))
|
class TransformerLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
'\n Construct a layernorm module in the TF style (epsilon inside the square root).\n '
super(TransformerLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias)
|
class TransformerInputRepresentations(nn.Module):
'\n Construct the input representation from spectrogram, and position encodings.\n '
def __init__(self, config, input_dim):
super(TransformerInputRepresentations, self).__init__()
self.hidden_size = config.hidden_size
self.spec_transform = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, spec, pos_enc):
spec_transformed = self.spec_transform(spec)
input_representations = (spec_transformed + pos_enc)
input_representations = self.LayerNorm(input_representations)
input_representations = self.dropout(input_representations)
return input_representations
|
class TransformerSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerSelfAttention, self).__init__()
if ((config.hidden_size % config.num_attention_heads) != 0):
raise ValueError(('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int((config.hidden_size / config.num_attention_heads))
self.all_head_size = (self.num_attention_heads * self.attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = (x.size()[:(- 1)] + (self.num_attention_heads, self.attention_head_size))
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose((- 1), (- 2)))
attention_scores = (attention_scores / math.sqrt(self.attention_head_size))
attention_scores = (attention_scores + attention_mask)
attention_probs = nn.Softmax(dim=(- 1))(attention_scores)
attention_probs = self.dropout(attention_probs)
if (head_mask is not None):
attention_probs = (attention_probs * head_mask)
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = (context_layer.size()[:(- 2)] + (self.all_head_size,))
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return (attention_probs, context_layer)
return context_layer
|
class TransformerSelfOutput(nn.Module):
def __init__(self, config):
super(TransformerSelfOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerAttention, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.self = TransformerSelfAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.output = TransformerSelfOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def prune_heads(self, heads):
if (len(heads) == 0):
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view((- 1)).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = (self.self.num_attention_heads - len(heads))
self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
def forward(self, input_tensor, attention_mask, head_mask=None):
if self.pre_layer_norm:
self_output = self.LayerNorm(input_tensor)
self_output = self.self(self_output, attention_mask, head_mask)
else:
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
(attentions, self_output) = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return (attentions, attention_output)
return attention_output
|
class TransformerIntermediate(nn.Module):
def __init__(self, config):
super(TransformerIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class TransformerOutput(nn.Module):
def __init__(self, config):
super(TransformerOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerLayer, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.attention = TransformerAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.intermediate = TransformerIntermediate(config)
self.output = TransformerOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
(attentions, attention_output) = attention_output
if self.pre_layer_norm:
intermediate_output = self.LayerNorm(attention_output)
intermediate_output = self.intermediate(intermediate_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return (attentions, layer_output)
return layer_output
|
class TransformerEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False, **kwargs):
super(TransformerEncoder, self).__init__()
if (type(config) is dict):
config = TransformerConfig(**config)
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
layer = TransformerLayer(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
if config.share_layer:
self.layer = nn.ModuleList([layer for _ in range(config.num_hidden_layers)])
else:
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
if self.pre_layer_norm:
LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.LayerNorm = nn.ModuleList([copy.deepcopy(LayerNorm) for _ in range((config.num_hidden_layers + 1))])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
all_encoder_layers = []
all_attentions = []
for (i, layer_module) in enumerate(self.layer):
if output_all_encoded_layers:
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[i](hidden_states))
else:
all_encoder_layers.append(hidden_states)
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
(attentions, hidden_states) = hidden_states
all_attentions.append(attentions)
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[(- 1)](hidden_states))
else:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return (all_attentions, all_encoder_layers)
return all_encoder_layers
|
class TransformerInitModel(nn.Module):
'\n An abstract class to handle weights initialization.\n '
def __init__(self, config, output_attentions, *inputs, **kwargs):
super(TransformerInitModel, self).__init__()
self.config = config
self.output_attentions = output_attentions
def init_Transformer_weights(self, module):
'\n Initialize the weights.\n '
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, TransformerLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
|
class TransformerMockingjay(TransformerInitModel):
'\n The Transformer model.\n Currently supporting upstreams models of Mockingjay, Tera, and Audio Albert.\n '
def __init__(self, config, input_dim, output_attentions=False, keep_multihead_output=False, with_input_module=True):
'\n Args:\n config (TransformerConfig):\n A `TransformerConfig` class instance with the configuration to build a new model,\n can also be a `dict` that initializes the TransformerConfig class\n intput_dim (int):\n The input dimension of model\n output_attentions:\n If True, also output attentions weights computed by the model at each layer.\n Default: False\n keep_multihead_output (bool):\n If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics.\n Default: False\n with_input_module (bool):\n If True, set up the `TransformerModel` with a `TransformerInputRepresentations` class instance.\n Default: True\n '
super(TransformerMockingjay, self).__init__(config, output_attentions)
self.with_input_module = with_input_module
if self.with_input_module:
self.input_representations = TransformerInputRepresentations(config, input_dim)
self.encoder = TransformerEncoder(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.apply(self.init_Transformer_weights)
self.input_size = input_dim
def prune_heads(self, heads_to_prune):
'\n Prunes heads of the model.\n heads_to_prune (dict):\n dict of {layer_num: list of heads to prune in this layer}\n '
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
'\n Gather all multi-head outputs.\n Return:\n list (layers) of multihead module outputs with gradients\n '
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, spec_input, pos_enc=None, attention_mask=None, output_all_encoded_layers=False, head_mask=None):
"\n Args:\n spec_input (torch.LongTensor):\n A torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n with the selected frames processed as masked frames during training,\n generated by the `process_train_MAM_data()` function in `transformer/mam.py`.\n pos_enc (torch.LongTensor):\n A torch.LongTensor of shape [batch_size, sequence_length, hidden_size],\n generated by the `fast_position_encoding()` function in `transformer/mam.py`.\n attention_mask (torch.LongTensor):\n An optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n output_all_encoded_layers (bool):\n A boolean which controls the content of the `encoded_layers` output as described below.\n Default: True\n head_mask (torch.Tensor):\n An optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n Return:\n Output (s3prl.Output):\n An Output module that contains `hidden_states` and/or `output`.\n\n hidden_states (encoded_layers):\n controled by the `output_all_encoded_layers` argument of `forward`:\n - If `output_all_encoded_layers==True`: outputs a list of the full sequences of encoded-hidden-states\n at the end of each attention block, each encoded-hidden-state is a torch.FloatTensor\n of size [batch_size, sequence_length, hidden_size], i.e [num_hidden_layers, batch_size, sequence_length, hidden_size]\n - If `output_all_encoded_layers==False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size].\n output (all_attentions):\n controled by the `output_attentions` argument of `__init__`:\n - If `output_attentions==True`, also output attentions weights computed by the model at each layer.\n "
if (attention_mask is None):
attention_mask = torch.ones_like(spec_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=spec_input.dtype)
extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0))
if (head_mask is not None):
if (head_mask.dim() == 1):
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.expand_as(self.config.num_hidden_layers, (- 1), (- 1), (- 1), (- 1))
elif (head_mask.dim() == 2):
head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.to(dtype=spec_input.dtype)
else:
head_mask = ([None] * self.config.num_hidden_layers)
if self.with_input_module:
input_representations = self.input_representations(spec_input, pos_enc)
else:
input_representations = spec_input
encoded_layers = self.encoder(input_representations, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, head_mask=head_mask)
if self.output_attentions:
(all_attentions, encoded_layers) = encoded_layers
if (not output_all_encoded_layers):
encoded_layers = encoded_layers[(- 1)]
if self.output_attentions:
return Output(output=all_attentions, hidden_states=encoded_layers)
return Output(hidden_states=encoded_layers)
|
class VqApcLayer(nn.Module):
'\n The Vq Layer.\n Currently used in the upstream model of VQ-APC (nn/rnn_apc.py).\n Defines a VQ layer that follows an RNN layer.\n '
def __init__(self, input_size, codebook_size, code_dim, gumbel_temperature):
'\n Args:\n input_size (int):\n An int indicating the pre-quantized input feature size,\n usually the hidden size of RNN.\n codebook_size (int):\n An int indicating the number of codes.\n code_dim (int):\n An int indicating the size of each code. If not the last layer,\n then must equal to the RNN hidden size.\n gumbel_temperature (float):\n A float indicating the temperature for gumbel-softmax.\n '
super(VqApcLayer, self).__init__()
self.codebook_size = codebook_size
self.vq_logits = nn.Linear(input_size, codebook_size)
self.gumbel_temperature = gumbel_temperature
self.codebook_CxE = nn.Linear(codebook_size, code_dim, bias=False)
self.token_usg = np.zeros(codebook_size)
def forward(self, inputs_BxLxI, testing=False):
'\n Args:\n inputs_BxLxI (torch.LongTensor):\n A 3d-tensor representing the input features.\n testing (bool):\n A bool indicating training or testing phase.\n Default: False\n Return:\n Output (s3prl.Output):\n An Output module that contains `output` and `logit`\n\n output (codes_BxLxE):\n The VQ codes.\n logit (logits_BxLxC):\n The VQ logits.\n '
logits_BxLxC = self.vq_logits(inputs_BxLxI)
if testing:
shape = logits_BxLxC.size()
(_, ind) = logits_BxLxC.max(dim=(- 1))
onehot_BxLxC = torch.zeros_like(logits_BxLxC).view((- 1), shape[(- 1)])
onehot_BxLxC.scatter_(1, ind.view((- 1), 1), 1)
onehot_BxLxC = onehot_BxLxC.view(*shape)
else:
onehot_BxLxC = gumbel_softmax(logits_BxLxC, tau=self.gumbel_temperature, hard=True, eps=EPS, dim=(- 1))
self.token_usg += onehot_BxLxC.detach().cpu().reshape((- 1), self.codebook_size).sum(dim=0).numpy()
codes_BxLxE = self.codebook_CxE(onehot_BxLxC)
return Output(output=codes_BxLxE, logit=logits_BxLxC)
def report_ppx(self):
'\n Computes perplexity of distribution over codebook.\n '
acc_usg = (self.token_usg / sum(self.token_usg))
return (2 ** sum(((- acc_usg) * np.log2((acc_usg + EPS)))))
def report_usg(self):
'\n Computes usage each entry in codebook.\n '
acc_usg = (self.token_usg / sum(self.token_usg))
self.token_usg = np.zeros(self.codebook_size)
return acc_usg
|
def get_optimizer(model_params, total_steps, optimizer_config):
optimizer_config = copy.deepcopy(optimizer_config)
optimizer_name = optimizer_config.pop('name')
optimizer = eval(f'get_{optimizer_name}')(model_params, total_steps=total_steps, **optimizer_config)
return optimizer
|
def get_grouped_parameters(model_params):
named_params = []
for m in model_params:
named_params += list(m.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
grouped_parameters = [{'params': [p for (n, p) in named_params if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in named_params if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
return grouped_parameters
|
def get_BertAdam_with_schedule(model_params, lr=0.0002, total_steps=20000, warmup_proportion=0.07, **kwargs):
grouped_parameters = get_grouped_parameters(model_params)
optimizer = BertAdam(grouped_parameters, lr=lr, warmup=warmup_proportion, t_total=total_steps)
return optimizer
|
def get_AdamW_with_schedule(model_params, lr=0.0002, total_steps=20000, warmup_proportion=0.07, **kwargs):
grouped_parameters = get_grouped_parameters(model_params)
optimizer = Lamb(grouped_parameters, lr=lr, warmup=warmup_proportion, t_total=total_steps, adam=True, correct_bias=True, **kwargs)
return optimizer
|
def get_Lamb_with_schedule(model_params, lr=0.0002, total_steps=20000, warmup_proportion=0.07, **kwargs):
grouped_parameters = get_grouped_parameters(model_params)
optimizer = Lamb(grouped_parameters, lr=lr, warmup=warmup_proportion, t_total=total_steps, adam=False, correct_bias=False, **kwargs)
return optimizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.