code stringlengths 17 6.64M |
|---|
def label2a(a):
if (a < 0):
return 0
return 1
|
def label2b(a):
if (a < 0):
return (- 1)
if (a > 0):
return 1
return 0
|
def label7(a):
if (a < (- 2)):
return (- 3)
if (a < (- 1)):
return (- 2)
if (a < 0):
return (- 1)
if (a == 0):
return 0
if (a <= 1):
return 1
if (a <= 2):
return 2
return 3
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, hidden_size, dropout, **kwargs):
super(Model, self).__init__()
self.in_linear = nn.Linear(input_dim, hidden_size)
self.out_linear = nn.Linear(hidden_size, output_class_num)
self.drop = nn.Dropout(dropout)
self.act_fn = nn.functional.relu
def forward(self, features):
hidden = self.in_linear(features)
hidden = self.drop(hidden)
hidden = self.act_fn(hidden)
predicted = self.out_linear(hidden)
return predicted
|
class PhoneDataset(Dataset):
def __init__(self, split, bucket_size, libri_root, phone_path, bucket_file, sample_rate=16000, train_dev_seed=1337, **kwargs):
super(PhoneDataset, self).__init__()
self.libri_root = libri_root
self.phone_path = phone_path
self.sample_rate = sample_rate
self.class_num = 41
self.Y = {}
phone_file = open(os.path.join(phone_path, 'converted_aligned_phones.txt')).readlines()
for line in phone_file:
line = line.strip('\n').split(' ')
self.Y[line[0]] = [int(p) for p in line[1:]]
if ((split == 'train') or (split == 'dev')):
usage_list = open(os.path.join(phone_path, 'train_split.txt')).readlines()
random.seed(train_dev_seed)
random.shuffle(usage_list)
percent = int((len(usage_list) * 0.9))
usage_list = (usage_list[:percent] if (split == 'train') else usage_list[percent:])
elif (split == 'test'):
usage_list = open(os.path.join(phone_path, 'test_split.txt')).readlines()
else:
raise ValueError("Invalid 'split' argument for dataset: PhoneDataset!")
usage_list = {line.strip('\n'): None for line in usage_list}
print(((((('[Dataset] - # phone classes: ' + str(self.class_num)) + ', number of data for ') + split) + ': ') + str(len(usage_list))))
assert os.path.isdir(bucket_file), 'Please first run `preprocess/generate_len_for_bucket.py to get bucket file.'
table = pd.read_csv(os.path.join(bucket_file, 'train-clean-100.csv')).sort_values(by=['length'], ascending=False)
X = table['file_path'].tolist()
X_lens = table['length'].tolist()
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in zip(X, X_lens):
if (self._parse_x_name(x) in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
def _parse_x_name(self, x):
return x.split('/')[(- 1)].split('.')[0]
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.libri_root, wav_path))
return wav.view((- 1))
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file) for x_file in self.X[index]]
label_batch = [torch.LongTensor(self.Y[self._parse_x_name(x_file)]) for x_file in self.X[index]]
return (wav_batch, label_batch)
def collate_fn(self, items):
return (items[0][0], items[0][1])
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
predicted = self.linear(features)
return predicted
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class QUESST14Dataset(Dataset):
'QUESST 2014 dataset (English-only).'
def __init__(self, split, **kwargs):
dataset_root = Path(kwargs['dataset_root'])
doc_paths = english_audio_paths(dataset_root, 'language_key_utterances.lst')
query_paths = english_audio_paths(dataset_root, f'language_key_{split}.lst')
self.dataset_root = dataset_root
self.n_queries = len(query_paths)
self.n_docs = len(doc_paths)
self.data = (query_paths + doc_paths)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_path = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['gain', '-3.0']])
wav = wav.squeeze(0)
return (wav.numpy(), audio_path.with_suffix('').name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(wavs, audio_names) = zip(*samples)
return (wavs, audio_names)
|
def english_audio_paths(dataset_root_path, lst_name):
'Extract English audio paths.'
audio_paths = []
with open(((dataset_root_path / 'scoring') / lst_name)) as f:
for line in f:
(audio_path, lang) = tuple(line.strip().split())
if (lang != 'nnenglish'):
continue
audio_path = re.sub('^.*?\\/', '', audio_path)
audio_paths.append((dataset_root_path / audio_path))
return audio_paths
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim: int, downstream_expert: dict, expdir: str, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = Path(expdir)
self.train_dataset = QUESST14Trainset('dev', **self.datarc)
self.valid_dataset = QUESST14Trainset('eval', **self.datarc)
self.test_dataset = None
self.model = Model(input_dim=upstream_dim, **self.modelrc)
def get_dataloader(self, mode):
if (mode == 'train'):
return DataLoader(self.train_dataset, sampler=WeightedRandomSampler(weights=self.train_dataset.sample_weights, num_samples=len(self.train_dataset.sample_weights), replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.train_dataset.collate_fn)
if (mode == 'valid'):
return DataLoader(self.valid_dataset, sampler=WeightedRandomSampler(weights=self.valid_dataset.sample_weights, num_samples=self.datarc['valid_size'], replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.valid_dataset.collate_fn)
if (mode in ['dev', 'eval']):
self.test_dataset = QUESST14Testset(mode, **self.datarc)
return DataLoader(self.test_dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=self.test_dataset.collate_fn)
raise NotImplementedError
def forward(self, mode, features, infos, records, **kwargs):
if (mode in ['train', 'valid']):
features = torch.stack(features)
(prefix_sums, labels) = infos
labels = torch.cat(labels).to(features.device)
embs = self.model(features)
query_embs = embs[:self.datarc['batch_size']]
audio_embs = embs[self.datarc['batch_size']:]
max_similarities = torch.empty(len(labels)).to(labels.device)
for i in range(self.datarc['batch_size']):
similarities = F.cosine_similarity(query_embs[i:(i + 1)], audio_embs[prefix_sums[i]:prefix_sums[(i + 1)]])
max_similarities[i] = similarities.max()
pos_similarities = max_similarities[(labels > 0)]
neg_similarities = max_similarities[(labels < 0)]
pos_loss = (1 - pos_similarities).sum()
neg_loss = neg_similarities.clamp(0).sum()
loss = ((pos_loss + neg_loss) / self.datarc['batch_size'])
records['loss'].append(loss.item())
records['similarity-positive'] += pos_similarities.tolist()
records['similarity-negative'] += neg_similarities.tolist()
return loss
elif (mode in ['dev', 'eval']):
audio_tensors = torch.stack(features)
(prefix_sums, audio_names) = infos
embs = self.model(audio_tensors)
embs = embs.detach().cpu()
for i in range(len(audio_names)):
records['embs'].append(embs[prefix_sums[i]:prefix_sums[(i + 1)]])
records['audio_names'].append(audio_names[i])
else:
raise NotImplementedError
def log_records(self, mode, records, logger, global_step, **kwargs):
'Log training, validation information or test on a dataset.'
if (mode in ['train', 'valid']):
prefix = f'quesst14_embedding/{mode}'
for (key, val) in records.items():
average = (sum(val) / len(val))
logger.add_scalar(f'{prefix}-{key}', average, global_step=global_step)
elif (mode in ['dev', 'eval']):
query_embs = records['embs'][:self.test_dataset.n_queries]
doc_embs = records['embs'][self.test_dataset.n_queries:]
query_names = records['audio_names'][:self.test_dataset.n_queries]
doc_names = records['audio_names'][self.test_dataset.n_queries:]
results = {}
for (query_emb, query_name) in zip(tqdm(query_embs, desc='Query', ncols=0), query_names):
query_emb = query_emb[0:1].cuda()
scores = []
for (doc_emb, doc_name) in zip(tqdm(doc_embs, desc='Doc', ncols=0, leave=False), doc_names):
with torch.no_grad():
doc_emb = doc_emb.cuda()
similarities = F.cosine_similarity(query_emb, doc_emb)
score = similarities.max().detach().cpu()
scores.append(score)
scores = torch.stack(scores)
if (scores.std() < 0.1):
scores = torch.zeros_like(scores)
else:
scores = (((scores - scores.mean()) / (scores.std() + 1e-06)) + 0.5)
results[query_name] = list(zip(doc_names, scores.tolist()))
score_thresh = 0.5
root = etree.Element('stdlist', termlist_filename='benchmark.stdlist.xml', indexing_time='1.00', language='english', index_size='1', system_id='benchmark')
for (query_name, doc_scores) in results.items():
term_list = etree.SubElement(root, 'detected_termlist', termid=query_name, term_search_time='1.0', oov_term_count='1')
for (doc_name, score) in doc_scores:
etree.SubElement(term_list, 'term', file=doc_name, channel='1', tbeg='0.000', dur='0.00', score=f'{score:.4f}', decision=('YES' if (score > score_thresh) else 'NO'))
etree.ElementTree(root).write(str((self.expdir / 'benchmark.stdlist.xml')), encoding='UTF-8', pretty_print=True)
else:
raise NotImplementedError
|
class Model(nn.Module):
def __init__(self, input_dim, bottleneck_dim, hidden_dim, num_layers, **kwargs):
super(Model, self).__init__()
self.connector = nn.Linear(input_dim, bottleneck_dim)
self.rnn = nn.LSTM(input_size=bottleneck_dim, hidden_size=hidden_dim, num_layers=num_layers, batch_first=True)
self.attention_linear = nn.Linear(hidden_dim, 1)
def forward(self, features):
hiddens = F.relu(self.connector(features))
(lstm_outputs, _) = self.rnn(hiddens)
hiddens = torch.tanh(lstm_outputs)
attention_weights = F.softmax(self.attention_linear(hiddens), dim=1)
embeds = torch.sum((hiddens * attention_weights), dim=1)
return embeds
|
class QUESST14Testset(Dataset):
'QUESST 2014 testing dataset (English-only).'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
dataset_root = Path(kwargs['quesst2014_root'])
doc_paths = get_audio_paths(dataset_root, 'language_key_utterances.lst')
query_paths = get_audio_paths(dataset_root, f'language_key_{split}.lst')
self.dataset_root = dataset_root
self.n_queries = len(query_paths)
self.n_docs = len(doc_paths)
self.data = (query_paths + doc_paths)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_path = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['norm']])
wav = wav.squeeze(0)
src_len = len(wav)
tgt_len = (48000 if (src_len <= 48000) else (((src_len // 12000) + 1) * 12000))
wav = torch.cat([wav, torch.zeros((tgt_len - src_len))])
segments = wav.unfold(0, 48000, 12000).unbind(0)
return (segments, audio_path.with_suffix('').name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(segments, audio_names) = zip(*samples)
lengths = [len(segs) for segs in segments]
prefix_sums = list(accumulate(lengths, initial=0))
flattened = [seg for segs in segments for seg in segs]
return (flattened, (prefix_sums, audio_names))
|
def get_audio_paths(dataset_root_path, lst_name):
'Extract audio paths.'
audio_paths = []
with open(((dataset_root_path / 'scoring') / lst_name)) as f:
for line in f:
(audio_path, lang) = tuple(line.strip().split())
if (lang != 'nnenglish'):
continue
audio_path = re.sub('^.*?\\/', '', audio_path)
audio_paths.append((dataset_root_path / audio_path))
return audio_paths
|
class QUESST14Trainset(Dataset):
'QUESST 2014 training dataset.'
def __init__(self, split, **kwargs):
dataset_root = Path(kwargs['quesst2014_root'])
scoring_root = (dataset_root / 'scoring')
split_root = (scoring_root / f'groundtruth_quesst14_{split}')
query2positives = parse_rttm((split_root / f'quesst14_{split}.rttm'))
audio_names = parse_lst((scoring_root / 'language_key_utterances.lst'))
query_names = parse_lst((scoring_root / f'language_key_{split}.lst'))
print(f'[QUESST2014] # of audios: {len(audio_names)}')
print(f'[QUESST2014] # of queries: {len(query_names)}')
audio_set = set(audio_names)
query2negatives = {query_name: list((audio_set - set((query2positives[query_name] if (query_name in query2positives) else [])))) for query_name in query_names}
positive_pairs = [(query_name, audio_name) for query_name in query_names for audio_name in (set(query2positives[query_name]) & audio_set)]
negative_pairs = [(query_name, list(negative_audio_set)) for (query_name, negative_audio_set) in query2negatives.items()]
print(f'[QUESST2014] # of positive pairs: {len(positive_pairs)}')
print(f'[QUESST2014] # of negative pairs: {len(negative_pairs)}')
self.audio_root = (dataset_root / 'Audio')
self.query_root = (dataset_root / f'{split}_queries')
self.max_dur = 3.0
self.positive_pairs = positive_pairs
self.negative_pairs = negative_pairs
def __len__(self):
return (len(self.positive_pairs) + len(self.negative_pairs))
def __getitem__(self, idx):
if (idx < len(self.positive_pairs)):
(query_name, audio_name) = self.positive_pairs[idx]
else:
(query_name, audio_names) = self.negative_pairs[(idx - len(self.positive_pairs))]
audio_name = random.sample(audio_names, 1)[0]
query_path = (self.query_root / query_name).with_suffix('.wav')
audio_path = (self.audio_root / audio_name).with_suffix('.wav')
query_tensor = path2tensor(query_path)
audio_tensor = path2tensor(audio_path)
query_segment = crop_segment(query_tensor, self.max_dur)
audio_segments = unfold_segments(audio_tensor, self.max_dur)
label = torch.LongTensor([(1 if (idx < len(self.positive_pairs)) else (- 1))])
return (query_segment, audio_segments, label)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(query_segments, segments_list, labels) = zip(*samples)
flattened = [segment for segments in segments_list for segment in segments]
lengths = [len(segments) for segments in segments_list]
prefix_sums = list(accumulate(lengths, initial=0))
return ((list(query_segments) + flattened), (prefix_sums, labels))
@property
def sample_weights(self):
'Sample weights to balance positive and negative data.'
n_pos = len(self.positive_pairs)
n_neg = len(self.negative_pairs)
return (([(1 / n_pos)] * n_pos) + ([(1 / n_neg)] * n_neg))
|
def parse_rttm(rttm_path):
'Parse audio and query pairs from *.rttm.'
pattern = re.compile('LEXEME\\s+(quesst14_[0-9]+).*?(quesst14_(dev|eval)_[0-9]+)')
query2audios = defaultdict(list)
with open(rttm_path) as fd:
for line in fd:
match = pattern.match(line)
if (match is None):
continue
query2audios[match.group(2)].append(match.group(1))
return query2audios
|
def parse_lst(lst_path):
'Extract audio names of nnenglish.'
audio_names = []
with open(lst_path) as fd:
for line in fd:
(audio_path, lang) = tuple(line.strip().split())
if (lang != 'nnenglish'):
continue
audio_name = Path(audio_path).with_suffix('').name
audio_names.append(audio_name)
return audio_names
|
def path2tensor(filepath):
(tensor, _) = apply_effects_file(str(filepath), [['channels', '1'], ['rate', '16000'], ['norm']])
return tensor.squeeze(0)
|
def crop_segment(tensor, tgt_dur, sample_rate=16000):
src_dur = (len(tensor) / sample_rate)
random_shift = random.uniform(0, (src_dur - tgt_dur))
(audio_tensor, _) = apply_effects_tensor(tensor.unsqueeze(0), sample_rate, [['pad', f'{tgt_dur}', f'{tgt_dur}'], ['trim', f'{(tgt_dur + random_shift)}', f'{tgt_dur}']])
return audio_tensor.squeeze(0)
|
def unfold_segments(tensor, tgt_dur, sample_rate=16000):
seg_len = int((tgt_dur * sample_rate))
src_len = len(tensor)
hop_len = (seg_len // 4)
tgt_len = (seg_len if (src_len <= seg_len) else (((src_len // hop_len) + 1) * hop_len))
pad_len = (tgt_len - src_len)
front_pad_len = random.randint(0, pad_len)
tail_pad_len = (pad_len - front_pad_len)
padded_tensor = torch.cat([torch.zeros(front_pad_len), tensor, torch.zeros(tail_pad_len)])
segments = padded_tensor.unfold(0, seg_len, hop_len).unbind(0)
return segments
|
class ModelEntry():
def __init__(self, model, name, trainable, interfaces):
self.model = model
self.name = name
self.trainable = trainable
self.interfaces = interfaces
|
class Runner():
'\n Used to handle high-level concepts of a ML experiment\n eg. training loop, evaluation loop, upstream propagation, optimization, logging, checkpoint saving\n '
def __init__(self, args, config):
self.args = args
self.config = config
self.init_ckpt = (torch.load(self.args.init_ckpt, map_location='cpu') if self.args.init_ckpt else {})
self.upstream = self._get_upstream()
self.featurizer = self._get_featurizer()
self.downstream = self._get_downstream()
self.all_entries = [self.upstream, self.featurizer, self.downstream]
def _load_weight(self, model, name):
init_weight = self.init_ckpt.get(name)
if init_weight:
show(f'[Runner] - Loading {name} weights from the previous experiment')
model.load_state_dict(init_weight)
def _init_model(self, model, name, trainable, interfaces=None):
for interface in (interfaces or []):
assert hasattr(model, interface), interface
self._load_weight(model, name)
if (is_initialized() and trainable and any((p.requires_grad for p in model.parameters()))):
model = DDP(model, device_ids=[self.args.local_rank], find_unused_parameters=True)
for interface in (interfaces or []):
setattr(model, interface, getattr(model.module, interface))
return ModelEntry(model, name, trainable, interfaces)
def _get_upstream(self):
if (('from_hf_hub' in self.args) and (self.args.from_hf_hub == True)):
from huggingface_hub import snapshot_download
print(f'[Runner] - Downloading upstream model {self.args.upstream} from the Hugging Face Hub')
filepath = snapshot_download(self.args.upstream, self.args.upstream_revision, use_auth_token=True)
sys.path.append(filepath)
dependencies = (Path(filepath) / 'requirements.txt').resolve()
print('[Dependency] - The downloaded upstream model requires the following dependencies. Please make sure they are installed:')
for (idx, line) in enumerate((Path(filepath) / 'requirements.txt').open().readlines()):
print(f'{idx}. {line.strip()}')
print(f'You can install them by:')
print()
print(f'pip install -r {dependencies}')
print()
from expert import UpstreamExpert
Upstream = UpstreamExpert
ckpt_path = os.path.join(filepath, self.args.upstream_model_name)
else:
Upstream = getattr(hub, self.args.upstream)
ckpt_path = self.args.upstream_ckpt
upstream_refresh = self.args.upstream_refresh
if (is_initialized() and (get_rank() > 0)):
torch.distributed.barrier()
upstream_refresh = False
model = Upstream(ckpt=ckpt_path, model_config=self.args.upstream_model_config, refresh=upstream_refresh).to(self.args.device)
if (is_initialized() and (get_rank() == 0)):
torch.distributed.barrier()
return self._init_model(model=model, name='Upstream', trainable=self.args.upstream_trainable, interfaces=['get_downsample_rates'])
def _get_featurizer(self):
model = Featurizer(upstream=self.upstream.model, feature_selection=self.args.upstream_feature_selection, layer_selection=self.args.upstream_layer_selection, upstream_device=self.args.device, normalize=self.args.upstream_feature_normalize).to(self.args.device)
return self._init_model(model=model, name='Featurizer', trainable=True, interfaces=['output_dim', 'downsample_rate'])
def _get_downstream(self):
expert = importlib.import_module(f's3prl.downstream.{self.args.downstream}.expert')
Downstream = getattr(expert, 'DownstreamExpert')
model = Downstream(upstream_dim=self.featurizer.model.output_dim, upstream_rate=self.featurizer.model.downsample_rate, **self.config, **vars(self.args)).to(self.args.device)
return self._init_model(model=model, name='Downstream', trainable=True, interfaces=['get_dataloader', 'log_records'])
def _get_optimizer(self, model_params):
optimizer = get_optimizer(model_params, self.config['runner']['total_steps'], self.config['optimizer'])
self._load_weight(optimizer, 'Optimizer')
return optimizer
def _get_scheduler(self, optimizer):
scheduler = get_scheduler(optimizer, self.config['runner']['total_steps'], self.config['scheduler'])
self._load_weight(scheduler, 'Scheduler')
return scheduler
def _create_model_card(self, path):
model_card = MODEL_CARD_MARKDOWN.format(upstream_model=self.args.upstream)
with open(os.path.join(path, 'README.md'), 'w') as f:
f.write(model_card)
def train(self):
trainable_models = []
trainable_paras = []
for entry in self.all_entries:
if entry.trainable:
entry.model.train()
trainable_models.append(entry.model)
trainable_paras += list(entry.model.parameters())
else:
entry.model.eval()
amp = self.config['runner'].get('fp16', False)
if amp:
print('[Runner] - Enabled fp16 training')
scaler = torch.cuda.amp.GradScaler()
optimizer = self._get_optimizer(trainable_models)
scheduler = None
if self.config.get('scheduler'):
scheduler = self._get_scheduler(optimizer)
specaug = None
if self.config.get('specaug'):
from .specaug import SpecAug
specaug = SpecAug(**self.config['specaug'])
tqdm_file = (sys.stderr if is_leader_process() else open(os.devnull, 'w'))
pbar = tqdm(total=self.config['runner']['total_steps'], dynamic_ncols=True, desc='overall', file=tqdm_file)
init_step = self.init_ckpt.get('Step')
if init_step:
pbar.n = init_step
if is_leader_process():
logger = SummaryWriter(self.args.expdir)
batch_ids = []
backward_steps = 0
records = defaultdict(list)
epoch = self.init_ckpt.get('Epoch', 0)
train_split = self.config['runner'].get('train_dataloader', 'train')
while (pbar.n < pbar.total):
try:
dataloader = self.downstream.model.get_dataloader(train_split, epoch=epoch)
except TypeError as e:
if ("unexpected keyword argument 'epoch'" in str(e)):
dataloader = self.downstream.model.get_dataloader(train_split)
if (hasattr(dataloader, 'sampler') and isinstance(dataloader.sampler, DistributedSampler)):
dataloader.sampler.set_epoch(epoch)
else:
raise
for (batch_id, (wavs, *others)) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc='train', file=tqdm_file)):
try:
if (pbar.n >= pbar.total):
break
global_step = (pbar.n + 1)
wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs]
with torch.cuda.amp.autocast(enabled=amp):
if self.upstream.trainable:
features = self.upstream.model(wavs)
else:
with torch.no_grad():
features = self.upstream.model(wavs)
features = self.featurizer.model(wavs, features)
if specaug:
(features, _) = specaug(features)
loss = self.downstream.model(train_split, features, *others, records=records)
batch_ids.append(batch_id)
gradient_accumulate_steps = self.config['runner'].get('gradient_accumulate_steps')
loss = (loss / gradient_accumulate_steps)
if amp:
scaler.scale(loss).backward()
else:
loss.backward()
del loss
except RuntimeError as e:
if ('CUDA out of memory' in str(e)):
print(f'[Runner] - CUDA out of memory at step {global_step}')
if is_initialized():
raise
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
optimizer.zero_grad()
continue
else:
raise
backward_steps += 1
if ((backward_steps % gradient_accumulate_steps) > 0):
continue
if amp:
scaler.unscale_(optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(trainable_paras, self.config['runner']['gradient_clipping'])
if amp:
scaler.step(optimizer)
scaler.update()
elif math.isnan(grad_norm):
print(f'[Runner] - grad norm is NaN at step {global_step}')
else:
optimizer.step()
optimizer.zero_grad()
if scheduler:
scheduler.step()
if (not is_leader_process()):
batch_ids = []
records = defaultdict(list)
continue
if ((global_step % self.config['runner']['log_step']) == 0):
self.downstream.model.log_records(train_split, records=records, logger=logger, global_step=global_step, batch_ids=batch_ids, total_batch_num=len(dataloader))
batch_ids = []
records = defaultdict(list)
save_names = []
if ((global_step % self.config['runner']['eval_step']) == 0):
for split in self.config['runner']['eval_dataloaders']:
save_names += self.evaluate(split, logger, global_step)
if ((global_step % self.config['runner']['save_step']) == 0):
def check_ckpt_num(directory):
max_keep = self.config['runner']['max_keep']
ckpt_pths = glob.glob(f'{directory}/states-*.ckpt')
if (len(ckpt_pths) >= max_keep):
ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0])))
for ckpt_pth in ckpt_pths[:((len(ckpt_pths) - max_keep) + 1)]:
os.remove(ckpt_pth)
check_ckpt_num(self.args.expdir)
save_names.append(f'states-{global_step}.ckpt')
if (len(save_names) > 0):
all_states = {'Optimizer': optimizer.state_dict(), 'Step': global_step, 'Epoch': epoch, 'Args': self.args, 'Config': self.config}
for entry in self.all_entries:
if entry.trainable:
all_states[entry.name] = get_model_state(entry.model)
if scheduler:
all_states['Scheduler'] = scheduler.state_dict()
if is_initialized():
all_states['WorldSize'] = get_world_size()
save_paths = [os.path.join(self.args.expdir, name) for name in save_names]
tqdm.write(f'[Runner] - Save the checkpoint to:')
for (i, path) in enumerate(save_paths):
tqdm.write(f'{(i + 1)}. {path}')
torch.save(all_states, path)
pbar.update(1)
epoch += 1
pbar.close()
if self.args.push_to_hf_hub:
self.push_to_huggingface_hub()
if is_leader_process():
logger.close()
def evaluate(self, split=None, logger=None, global_step=0):
'evaluate function will always be called on a single process even during distributed training'
not_during_training = ((split is None) and (logger is None) and (global_step == 0))
if not_during_training:
split = self.args.evaluate_split
tempdir = tempfile.mkdtemp()
logger = SummaryWriter(tempdir)
random.seed(self.args.seed)
np.random.seed(self.args.seed)
torch.manual_seed(self.args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(self.args.seed)
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
trainings = []
for entry in self.all_entries:
trainings.append(entry.model.training)
entry.model.eval()
dataloader = self.downstream.model.get_dataloader(split)
evaluate_ratio = float(self.config['runner'].get('evaluate_ratio', 1))
evaluate_steps = round((len(dataloader) * evaluate_ratio))
batch_ids = []
records = defaultdict(list)
for (batch_id, (wavs, *others)) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc=split, total=evaluate_steps)):
if (batch_id > evaluate_steps):
break
wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs]
with torch.no_grad():
features = self.upstream.model(wavs)
features = self.featurizer.model(wavs, features)
self.downstream.model(split, features, *others, records=records, batch_id=batch_id)
batch_ids.append(batch_id)
save_names = self.downstream.model.log_records(split, records=records, logger=logger, global_step=global_step, batch_ids=batch_ids, total_batch_num=len(dataloader))
batch_ids = []
records = defaultdict(list)
if torch.cuda.is_available():
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
for (entry, training) in zip(self.all_entries, trainings):
if training:
entry.model.train()
if not_during_training:
logger.close()
shutil.rmtree(tempdir)
return ([] if (type(save_names) is not list) else save_names)
def inference(self):
filepath = Path(self.args.evaluate_split)
assert filepath.is_file(), filepath
filename = filepath.stem
if hasattr(self.downstream.model, 'load_audio'):
wav = self.downstream.model.load_audio(filepath)
else:
(wav, sr) = torchaudio.load(str(filepath))
assert (sr == SAMPLE_RATE), sr
wavs = [wav.view((- 1)).to(self.args.device)]
for entry in self.all_entries:
entry.model.eval()
with torch.no_grad():
features = self.upstream.model(wavs)
features = self.featurizer.model(wavs, features)
self.downstream.model.inference(features, [filename])
def push_to_huggingface_hub(self):
'Creates a downstream repository on the Hub and pushes training artifacts to it.'
if (self.args.hf_hub_org.lower() != 'none'):
organization = self.args.hf_hub_org
else:
organization = os.environ.get('HF_USERNAME')
huggingface_token = HfFolder.get_token()
print(f'[Runner] - Organisation to push fine-tuned model to: {organization}')
if (self.args.hub == 'huggingface'):
model_info = HfApi().model_info(self.args.upstream, token=huggingface_token)
downstream_model_id = model_info.sha
upstream_model_id = model_info.modelId.replace('/', '__')
else:
upstream_model_id = self.args.upstream.replace('/', '__')
downstream_model_id = str(uuid.uuid4())[:8]
repo_name = f'{upstream_model_id}__{downstream_model_id}'
repo_url = HfApi().create_repo(token=huggingface_token, name=repo_name, organization=organization, exist_ok=True, private=False)
print(f'[Runner] - Created Hub repo: {repo_url}')
HF_HUB_DIR = 'hf_hub'
REPO_ROOT_DIR = os.path.join(self.args.expdir, HF_HUB_DIR, repo_name)
REPO_TASK_DIR = os.path.join(REPO_ROOT_DIR, self.args.downstream, self.args.expname)
print(f'[Runner] - Cloning Hub repo to {REPO_ROOT_DIR}')
model_repo = Repository(local_dir=REPO_ROOT_DIR, clone_from=repo_url, use_auth_token=huggingface_token)
model_repo.git_pull()
shutil.copytree(self.args.expdir, REPO_TASK_DIR, dirs_exist_ok=True, ignore=shutil.ignore_patterns(HF_HUB_DIR))
checkpoints = list(Path(REPO_TASK_DIR).glob('*best*.ckpt'))
if (len(checkpoints) == 0):
print('[Runner] - Did not find a best checkpoint! Using the final checkpoint instead ...')
CKPT_PATH = os.path.join(REPO_TASK_DIR, f"states-{self.config['runner']['total_steps']}.ckpt")
elif (len(checkpoints) > 1):
print(f'[Runner] - More than one best checkpoint found! Using {checkpoints[0]} as default ...')
CKPT_PATH = checkpoints[0]
else:
print(f'[Runner] - Found best checkpoint {checkpoints[0]}!')
CKPT_PATH = checkpoints[0]
shutil.move(CKPT_PATH, os.path.join(REPO_TASK_DIR, 'model.ckpt'))
model_repo.lfs_track('*.ckpt')
self._create_model_card(REPO_ROOT_DIR)
print('[Runner] - Pushing model files to the Hub ...')
model_repo.push_to_hub()
print('[Runner] - Training run complete!')
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
"\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n LibriMix offeres different options for the users. For\n clean source separation, src=['mix_clean'] and tgt=['s1', 's2'].\n Please see https://github.com/JorisCos/LibriMix for details\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n "
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 2))
cond_list = ['s1', 's2', 'noise', 'mix_clean', 'mix_both', 'mix_single']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=None)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=None)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.nn.functional.sigmoid, 'tanh': torch.nn.functional.tanh}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!'.format(output_dir))
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
if (not os.path.exists('{}/{}'.format(wav_dir, cond))):
continue
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def get_utt2path(wav_scp_file):
utt2path = {}
with open(wav_scp_file, 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(utt, path) = line.split()
utt2path[utt] = path
return utt2path
|
def main():
random.seed(args.seed)
with open('{}/s1/utt2spk'.format(args.src_dir), 'r') as fh:
content = fh.readlines()
uttlist = []
for line in content:
line = line.strip('\n')
utt = line.split()[0]
uttlist.append(utt)
uttlist.sort()
num_utt_ori = len(uttlist)
random.shuffle(uttlist)
uttlist = uttlist[:args.sample]
uttlist.sort()
print('Selecting {} utts from {} utts'.format(len(uttlist), num_utt_ori))
for dset in ['mix_both', 'mix_clean', 'mix_single', 'noise', 's1', 's2']:
(src_dset, tgt_dset) = ('{}/{}'.format(args.src_dir, dset), '{}/{}'.format(args.tgt_dir, dset))
os.makedirs(tgt_dset)
with open('{}/utt2spk'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
with open('{}/spk2utt'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
utt2path = get_utt2path('{}/wav.scp'.format(src_dset))
with open('{}/wav.scp'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt2path[utt]))
return 0
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
"\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n LibriMix offeres different options for the users. For\n clean source separation, src=['mix_clean'] and tgt=['s1', 's2'].\n Please see https://github.com/JorisCos/LibriMix for details\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n "
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 2))
cond_list = ['s1', 's2', 'noise', 'mix_clean', 'mix_both', 'mix_single']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=None)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=None)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh', 'none']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.sigmoid, 'tanh': torch.nn.functional.tanh, 'none': torch.nn.Identity()}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!'.format(output_dir))
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
if (not os.path.exists('{}/{}'.format(wav_dir, cond))):
continue
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def get_utt2path(wav_scp_file):
utt2path = {}
with open(wav_scp_file, 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(utt, path) = line.split()
utt2path[utt] = path
return utt2path
|
def main():
random.seed(args.seed)
with open('{}/s1/utt2spk'.format(args.src_dir), 'r') as fh:
content = fh.readlines()
uttlist = []
for line in content:
line = line.strip('\n')
utt = line.split()[0]
uttlist.append(utt)
uttlist.sort()
num_utt_ori = len(uttlist)
random.shuffle(uttlist)
uttlist = uttlist[:args.sample]
uttlist.sort()
print('Selecting {} utts from {} utts'.format(len(uttlist), num_utt_ori))
for dset in ['mix_both', 'mix_clean', 'mix_single', 'noise', 's1', 's2']:
(src_dset, tgt_dset) = ('{}/{}'.format(args.src_dir, dset), '{}/{}'.format(args.tgt_dir, dset))
os.makedirs(tgt_dset)
with open('{}/utt2spk'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
with open('{}/spk2utt'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt))
utt2path = get_utt2path('{}/wav.scp'.format(src_dset))
with open('{}/wav.scp'.format(tgt_dset), 'w') as fh:
for utt in uttlist:
fh.write('{} {}\n'.format(utt, utt2path[utt]))
return 0
|
class DownstreamExpert(SpeakerExpert):
'\n Basically the same as the speaker utterance expert, except handles the speaker frame-wise label\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, expdir, **kwargs)
def forward(self, mode, features, labels, records, **kwargs):
'\n Args:\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n labels:\n the frame-wise spekaer labels\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
lengths = torch.LongTensor([len(l) for l in features])
features = pad_sequence(features, batch_first=True)
labels = labels.unsqueeze((- 1)).expand(features.size(0), features.size(1)).to(features.device)
predicted = self.model(features)
class_num = predicted.size((- 1))
loss = self.objective(predicted.reshape((- 1), class_num), labels.reshape((- 1)))
predicted_classid = predicted.max(dim=(- 1)).indices
sames = (predicted_classid == labels)
for (s, l) in zip(sames, lengths):
records['acc'] += s[:l].tolist()
return loss
|
class SpeakerDataset(Dataset):
def __init__(self, split, bucket_size, libri_root, split_file, bucket_file, sample_rate=16000, train_dev_seed=1337, **kwargs):
self.libri_root = libri_root
self.split_file = split_file
self.sample_rate = sample_rate
assert os.path.isdir(bucket_file), 'Please first run `preprocess/generate_len_for_bucket.py to get bucket file.'
self.table = pd.read_csv(os.path.join(bucket_file, 'train-clean-100.csv')).sort_values(by=['length'], ascending=False)
X = self.table['file_path'].tolist()
X_lens = self.table['length'].tolist()
if (((split == 'train') or (split == 'dev')) and os.path.isfile(os.path.join(split_file, 'train_split.txt'))):
usage_list = open(os.path.join(split_file, 'train_split.txt')).readlines()
random.seed(train_dev_seed)
random.shuffle(usage_list)
percent = int((len(usage_list) * 0.9))
usage_list = (usage_list[:percent] if (split == 'train') else usage_list[percent:])
elif ((split == 'test') and os.path.isfile(os.path.join(split_file, 'test_split.txt'))):
usage_list = open(os.path.join(split_file, 'test_split.txt')).readlines()
else:
raise NotImplementedError('Invalid `split` argument!')
usage_list = {line.strip('\n'): None for line in usage_list}
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in zip(X, X_lens):
if (self._parse_x_name(x) in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
print('[Dataset] - Computing speaker class...')
speakers = self._get_all_speakers(X)
self.speaker2idx = self._compute_speaker2idx(speakers)
self.class_num = len(self.speaker2idx)
print(((((('[Dataset] - # possible speaker classes: ' + str(self.class_num)) + ', number of data for ') + split) + ': ') + str(len(usage_list))))
def _parse_x_name(self, x):
return x.split('/')[(- 1)].split('.')[0]
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.libri_root, wav_path))
return wav.view((- 1))
def _get_speaker_from_path(self, x):
return x.split('/')[(- 1)].split('.')[0].split('-')[0]
def _get_all_speakers(self, X):
speaker_set = {}
for x in X:
speaker = self._get_speaker_from_path(x)
if (speaker not in speaker_set):
speaker_set[speaker] = 0
else:
speaker_set[speaker] += 1
return speaker_set
def _compute_speaker2idx(self, speakers):
idx = 0
speaker2idx = {}
for speaker in sorted(speakers):
if ((speaker not in speaker2idx) and (speakers[speaker] > SPEAKER_THRESHOLD)):
speaker2idx[speaker] = idx
idx += 1
return speaker2idx
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file) for x_file in self.X[index]]
label_batch = torch.LongTensor([self.speaker2idx[self._get_speaker_from_path(x_file)] for x_file in self.X[index]])
return (wav_batch, label_batch)
def collate_fn(self, items):
return (items[0][0], items[0][1])
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.train_dataset = SpeakerDataset('train', self.datarc['train_batch_size'], **self.datarc)
self.dev_dataset = SpeakerDataset('dev', self.datarc['eval_batch_size'], **self.datarc)
self.test_dataset = SpeakerDataset('test', self.datarc['eval_batch_size'], **self.datarc)
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
self.objective = nn.CrossEntropyLoss()
self.logging = os.path.join(expdir, 'log.log')
self.best = defaultdict((lambda : 0))
def _get_train_dataloader(self, dataset):
return DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=1, shuffle=False, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)
'\n Datalaoder Specs:\n Each dataloader should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents1, your_other_contents2, ...]\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with dim()==1 and sample_rate==16000\n '
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, records, **kwargs):
'\n Args:\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n labels:\n the utterance-wise spekaer labels\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features = torch.stack([f.mean(dim=0) for f in features], dim=0)
labels = labels.to(features.device)
predicted = self.model(features)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
prefix = f'libri_speaker/{mode}-'
average = torch.FloatTensor(records['acc']).mean().item()
logger.add_scalar(f'{prefix}acc', average, global_step=global_step)
message = f'''{prefix}|step:{global_step}|acc:{average}
'''
save_ckpt = []
if (average > self.best[prefix]):
self.best[prefix] = average
message = f'best|{message}'
name = prefix.split('/')[(- 1)].split('-')[0]
save_ckpt.append(f'best-states-{name}.ckpt')
with open(self.logging, 'a') as f:
f.write(message)
print(message)
return save_ckpt
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
predicted = self.linear(features)
return predicted
|
class SpeechCommandsBaseDataset(Dataset):
'12-class Speech Commands base dataset.'
def __init__(self):
self.class2index = {CLASSES[i]: i for i in range(len(CLASSES))}
self.class_num = 12
self.data = []
def __getitem__(self, idx):
(class_name, audio_path) = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), EFFECTS)
wav = wav.squeeze(0).numpy()
fileid = '-'.join(Path(audio_path).parts[(- 2):])
return (wav, self.class2index[class_name], fileid)
def __len__(self):
return len(self.data)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
return zip(*samples)
|
class SpeechCommandsDataset(SpeechCommandsBaseDataset):
'Training and validation dataset.'
def __init__(self, data_list, **kwargs):
super().__init__()
data = [((class_name, audio_path) if (class_name in self.class2index.keys()) else ('_unknown_', audio_path)) for (class_name, audio_path) in data_list]
data += [('_silence_', audio_path) for audio_path in Path(kwargs['speech_commands_root'], '_background_noise_').glob('*.wav')]
class_counts = {class_name: 0 for class_name in CLASSES}
for (class_name, _) in data:
class_counts[class_name] += 1
sample_weights = [(len(data) / class_counts[class_name]) for (class_name, _) in data]
self.data = data
self.sample_weights = sample_weights
def __getitem__(self, idx):
(wav, label, stem) = super().__getitem__(idx)
if (label == self.class2index['_silence_']):
random_offset = randint(0, (len(wav) - 16000))
wav = wav[random_offset:(random_offset + 16000)]
return (wav, label, stem)
|
class SpeechCommandsTestingDataset(SpeechCommandsBaseDataset):
'Testing dataset.'
def __init__(self, **kwargs):
super().__init__()
self.data = [(class_dir.name, audio_path) for class_dir in Path(kwargs['speech_commands_test_root']).iterdir() if class_dir.is_dir() for audio_path in class_dir.glob('*.wav')]
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim: int, downstream_expert: dict, expdir: str, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
(train_list, valid_list) = split_dataset(self.datarc['speech_commands_root'])
self.train_dataset = SpeechCommandsDataset(train_list, **self.datarc)
self.dev_dataset = SpeechCommandsDataset(valid_list, **self.datarc)
self.test_dataset = SpeechCommandsTestingDataset(**self.datarc)
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(input_dim=self.modelrc['projector_dim'], output_dim=self.train_dataset.class_num, **model_conf)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.register_buffer('best_score', torch.zeros(1))
def _get_balanced_train_dataloader(self, dataset, drop_last=False):
sampler = WeightedRandomSampler(dataset.sample_weights, len(dataset.sample_weights))
if is_initialized():
sampler = DistributedSamplerWrapper(sampler)
return DataLoader(dataset, sampler=sampler, batch_size=self.datarc['batch_size'], drop_last=drop_last, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_balanced_dev_dataloader(self, dataset, drop_last=False):
return DataLoader(dataset, sampler=WeightedRandomSampler(dataset.sample_weights, len(dataset.sample_weights)), batch_size=self.datarc['batch_size'], drop_last=drop_last, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_dataloader(self, dataset):
return DataLoader(dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_dataloader(self, mode):
if (mode == 'train'):
return self._get_balanced_train_dataloader(self.train_dataset, drop_last=True)
elif (mode == 'dev'):
return self._get_balanced_dev_dataloader(self.dev_dataset, drop_last=False)
elif (mode == 'test'):
return self._get_dataloader(self.test_dataset)
else:
raise NotImplementedError
def forward(self, mode, features, labels, filenames, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['loss'].append(loss.item())
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['filename'] += filenames
records['predict'] += [CLASSES[idx] for idx in predicted_classid.cpu().tolist()]
records['truth'] += [CLASSES[idx] for idx in labels.cpu().tolist()]
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ['loss', 'acc']:
values = records[key]
average = (sum(values) / len(values))
logger.add_scalar(f'speech_commands/{mode}-{key}', average, global_step=global_step)
with open(Path(self.expdir, 'log.log'), 'a') as f:
if (key == 'acc'):
print(f'{mode} {key}: {average}')
f.write(f'''{mode} at step {global_step}: {average}
''')
if ((mode == 'dev') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
f.write(f'''New best on {mode} at step {global_step}: {average}
''')
save_names.append(f'{mode}-best.ckpt')
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
lines = [f'''{f} {i}
''' for (f, i) in zip(records['filename'], records['predict'])]
file.writelines(lines)
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
lines = [f'''{f} {i}
''' for (f, i) in zip(records['filename'], records['truth'])]
file.writelines(lines)
return save_names
|
def split_dataset(root_dir: Union[(str, Path)], max_uttr_per_class=((2 ** 27) - 1)) -> Tuple[(List[Tuple[(str, str)]], List[Tuple[(str, str)]])]:
'Split Speech Commands into 3 set.\n \n Args:\n root_dir: speech commands dataset root dir\n max_uttr_per_class: predefined value in the original paper\n \n Return:\n train_list: [(class_name, audio_path), ...]\n valid_list: as above\n '
(train_list, valid_list) = ([], [])
for entry in Path(root_dir).iterdir():
if ((not entry.is_dir()) or (entry.name == '_background_noise_')):
continue
for audio_path in entry.glob('*.wav'):
speaker_hashed = re.sub('_nohash_.*$', '', audio_path.name)
hashed_again = hashlib.sha1(speaker_hashed.encode('utf-8')).hexdigest()
percentage_hash = ((int(hashed_again, 16) % (max_uttr_per_class + 1)) * (100.0 / max_uttr_per_class))
if (percentage_hash < 10):
valid_list.append((entry.name, audio_path))
elif (percentage_hash < 20):
pass
else:
train_list.append((entry.name, audio_path))
return (train_list, valid_list)
|
class Model(nn.Module):
'\n Not used in SUPERB Benchmark\n '
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
hidden_dim = kwargs['hidden_dim']
self.connector = nn.Linear(input_dim, hidden_dim)
self.fc1 = nn.Linear(hidden_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_class_num)
def forward(self, features):
features = F.relu(self.connector(features))
features = self.fc1(features)
pooled = features.mean(dim=1)
predicted = self.fc2(pooled)
return predicted
|
class AdditionalDataset():
@classmethod
def from_tsv(cls, file, key, bpe_tokenizer=None, pre_tokenizer=None):
data = []
with open(file, 'r') as file:
reader = csv.DictReader(file, delimiter='\t', quotechar=None, doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE)
for line in reader:
data.append(line[key])
return cls(data, bpe_tokenizer, pre_tokenizer)
def __init__(self, data, dictionary, bpe_tokenizer=None, pre_tokenizer=None):
self.data = data
self.bpe_tokenizer = bpe_tokenizer
self.pre_tokenizer = pre_tokenizer
self.dictionary = dictionary
def _create_target(self, index):
tokenized = self._tokenize_text(self.data[index])
target = self.dictionary.encode_line(tokenized, add_if_not_exist=False, append_eos=True).long()
return target
def get_addtional_input(self, id_list):
target = [self._create_target(id) for id in id_list]
batched_target = fairseq.data.data_utils.collate_tokens(target, self.dictionary.pad(), self.dictionary.eos(), left_pad=False, move_eos_to_beginning=False)
target_lengths = torch.tensor([t.size(0) for t in target], dtype=torch.long)
prev_output_tokens = fairseq.data.data_utils.collate_tokens(target, self.dictionary.pad(), self.dictionary.eos(), left_pad=False, move_eos_to_beginning=True)
ntokens = sum((t.size(0) for t in target))
return {'target': batched_target, 'prev_output_tokens': prev_output_tokens, 'target_lengths': target_lengths, 'ntokens': ntokens}
def _tokenize_text(self, text):
if (self.pre_tokenizer is not None):
text = self.pre_tokenizer.encode(text)
if (self.bpe_tokenizer is not None):
text = self.bpe_tokenizer.encode(text)
return text
|
class S3prl_SpeechToTextTask(SpeechToTextTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load_dataset(self, split, max_feature_len=(- 1), epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = S3prl_SpeechToTextDatasetCreator.from_tsv(self.args.data, self.data_cfg, split, self.tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed, upstream_rate=self.upstream_rate, max_feature_len=max_feature_len)
def build_model(self, args, input_dim):
args.input_feat_per_channel = input_dim
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTask, self).build_model(args)
|
class S3prl_SpeechToTextDatasetCreator(SpeechToTextDatasetCreator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
KEY_SAMPLE_RATE = 'sr'
DEFAULT_SAMPLE_RATE = 16000
@classmethod
def _from_list(cls, split_name: str, is_train_split, samples: List[List[Dict]], data_cfg: S2TDataConfig, tgt_dict, pre_tokenizer, bpe_tokenizer, upstream_rate, max_feature_len) -> SpeechToTextDataset:
(audio_paths, n_frames, src_texts, tgt_texts, ids) = ([], [], [], [], [])
(speakers, src_langs, tgt_langs) = ([], [], [])
srs = []
for s in samples:
ids.extend([ss[cls.KEY_ID] for ss in s])
audio_paths.extend([op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s])
n_frames.extend([int(ss[cls.KEY_N_FRAMES]) for ss in s])
tgt_texts.extend([ss[cls.KEY_TGT_TEXT] for ss in s])
src_texts.extend([ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s])
speakers.extend([ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s])
src_langs.extend([ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s])
tgt_langs.extend([ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s])
srs.extend([int(ss.get(cls.KEY_SAMPLE_RATE, cls.DEFAULT_SAMPLE_RATE)) for ss in s])
return S3prl_SpeechToTextDataset(split_name, is_train_split, data_cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, srs=srs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, upstream_rate=upstream_rate, max_feature_len=max_feature_len)
@classmethod
def from_tsv(cls, root: str, data_cfg: S2TDataConfig, splits: str, tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split: bool, epoch: int, seed: int, upstream_rate: int, max_feature_len: int) -> SpeechToTextDataset:
_splits = splits.split(',')
assert (len(_splits) == 1), 'do not support multiple files training'
split = _splits[0]
tsv_path = op.join(root, f'{split}.tsv')
if (not op.isfile(tsv_path)):
raise FileNotFoundError(f'Dataset not found: {tsv_path}')
with open(tsv_path) as f:
reader = csv.DictReader(f, delimiter='\t', quotechar=None, doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE)
samples = [dict(e) for e in reader]
assert (len(samples) > 0)
return cls._from_list(split, is_train_split, [samples], data_cfg, tgt_dict, pre_tokenizer, bpe_tokenizer, upstream_rate, max_feature_len)
|
class S3prl_SpeechToTextDataset(SpeechToTextDataset):
TARGET_RATE = 16000
def __init__(self, *args, srs=Optional[List[int]], upstream_rate=160, max_feature_len=(- 1), **kwargs):
super().__init__(*args, **kwargs)
self.srs = srs
self.max_feature_len = max_feature_len
self.max_wav_len = (max_feature_len * upstream_rate)
self.resamplers = {}
for sr in set(srs):
self.resamplers[sr] = torchaudio.transforms.Resample(orig_freq=sr, new_freq=self.TARGET_RATE)
for i in range(len(self.n_frames)):
new_n_frames = (((self.n_frames[i] * self.TARGET_RATE) / self.srs[i]) / upstream_rate)
if ((self.max_feature_len > 0) and (new_n_frames > max_feature_len)):
new_n_frames = max_feature_len
self.n_frames[i] = int(new_n_frames)
def __getitem__(self, index: int) -> Tuple[(str, int, torch.Tensor, Optional[torch.Tensor])]:
(source, sr) = torchaudio.load(self.audio_paths[index])
assert (self.srs[index] == sr)
source = self.resamplers[sr](source)
source = torch.mean(source, dim=0)
source = source.view((- 1))
if (self.feature_transforms is not None):
assert (not self.data_cfg.use_audio_input)
source = self.feature_transforms(source)
source = source.float()
if (self.max_feature_len > 0):
if (source.size(0) > self.max_wav_len):
print(f'wav too long({source.size(0)}), truncate to {self.max_wav_len} (id={index})')
source = source[:self.max_wav_len]
target = None
if (self.tgt_texts is not None):
tokenized = self.tokenize_text(self.tgt_texts[index])
target = self.tgt_dict.encode_line(tokenized, add_if_not_exist=False, append_eos=True).long()
if self.data_cfg.prepend_tgt_lang_tag:
lang_tag = self.LANG_TAG_TEMPLATE.format(self.tgt_langs[index])
lang_tag_idx = self.tgt_dict.index(lang_tag)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
return (self.ids[index], index, source, target)
def collater(self, samples: List[Tuple[(str, int, torch.Tensor, torch.Tensor)]]):
ids = [sample[0] for sample in samples]
samples = [sample[1:] for sample in samples]
output_dict = super().collater(samples)
output_dict['utt_id'] = ids
wavs = []
for i in range(output_dict['nsentences']):
wav = output_dict['net_input']['src_tokens'][i]
length = output_dict['net_input']['src_lengths'][i].item()
wavs.append(wav[:length].numpy())
return (wavs, output_dict)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n upstream_rate: int\n 160: for upstream with 10 ms per frame\n 320: for upstream with 20 ms per frame\n \n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/example/config.yaml\n\n expdir: string\n The expdir from command-line argument, you should save all results into\n this directory, like some logging files.\n\n **kwargs: dict\n All the arguments specified by the argparser in run_downstream.py\n and all the other fields in config.yaml, in case you need it.\n \n Note1. Feel free to add new argument for __init__ as long as it is\n a command-line argument or a config field. You can check the constructor\n code in downstream/runner.py\n "
super(DownstreamExpert, self).__init__()
print(downstream_expert)
self.expdir = expdir
self.src_lang = downstream_expert['src_lang']
self.tgt_lang = downstream_expert['tgt_lang']
self.post_process = downstream_expert['post_process']
self.output_prefix = downstream_expert['output_prefix']
self.upstream_rate = upstream_rate
self.datarc = downstream_expert['datarc']
self.max_positions = downstream_expert['modelrc']['max_source_positions']
self.upstream_rate = downstream_expert.get('upstream_rate', upstream_rate)
if (self.upstream_rate < 0):
self.upstream_rate = upstream_rate
assert ((self.upstream_rate % upstream_rate) == 0)
self.downsample_ratio = int((self.upstream_rate / upstream_rate))
self.downsample_method = downstream_expert.get('downsample_method', 'drop')
if (self.downsample_method == 'concat'):
upstream_dim *= self.downsample_ratio
self.task = S3prl_SpeechToTextTask.setup_task(Namespace(**downstream_expert['taskrc']))
self.task.upstream_rate = self.upstream_rate
self.data_dir = downstream_expert['taskrc']['data']
self.criterion = self.task.build_criterion(Namespace(**downstream_expert['criterionrc']))
modelrc = Namespace(**downstream_expert['modelrc'])
assert (modelrc.arch in fairseq.models.ARCH_CONFIG_REGISTRY)
fairseq.models.ARCH_CONFIG_REGISTRY[modelrc.arch](modelrc)
self.model = self.task.build_model(modelrc, upstream_dim)
self.generator = self.task.build_generator([self.model], Namespace(**downstream_expert['generatorrc']))
self.batch_itr = {}
self.use_asr = downstream_expert['taskrc']['use_asr']
if self.use_asr:
rc = downstream_expert['asrrc']
self.asr_datarc = rc['datarc']
self.asr_weight = rc['weight']
self.asr_dict = Dictionary.load(f"{self.data_dir}/{rc['vocab_file']}")
asr_bperc = rc['bpe_tokenizer'].copy()
asr_bperc['sentencepiece_model'] = f"{self.data_dir}/{asr_bperc['sentencepiece_model']}"
self.asr_bpe = encoders.build_bpe(Namespace(**asr_bperc))
self.asr_task = S3prl_SpeechToTextTask.setup_task(Namespace(**downstream_expert['taskrc']))
self.asr_dict.add_symbol('<blank>')
self.asr_task.tgt_dict = self.asr_dict
self.asr_head = nn.Linear(modelrc.encoder_embed_dim, len(self.asr_dict))
self.additional_dataset = {}
self.register_buffer('best_score', torch.zeros(1))
def get_dataloader(self, split, epoch: int=0):
"\n Args:\n mode: string\n 'train', 'dev' or 'test'\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
data_split = self.datarc[split]
if (data_split not in self.batch_itr):
self.task.load_dataset(split=data_split, max_feature_len=self.max_positions)
self.batch_itr[data_split] = self.task.get_batch_iterator(self.task.dataset(data_split), max_tokens=self.datarc['max_tokens'], max_positions=self.max_positions, num_workers=self.datarc['num_workers'], ignore_invalid_inputs=False, epoch=(epoch + 1))
return self.batch_itr[data_split].next_epoch_itr()
def forward(self, mode, features, input_dict, records, **kwargs):
"\n Args:\n mode: string\n 'train', 'dev' or 'test' for this forward step\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n your_other_contents1, ... :\n in the order defined by your dataloader (dataset + collate_fn)\n these are all in cpu, and you can move them to the same device\n as features\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records (also customized by you)\n\n Note1. downstream/runner.py will call self.log_records\n 1. every `log_step` during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n "
device = features[0].device
features = self.downsample(features)
features_length = torch.LongTensor([len(feature) for feature in features])
features = pad_sequence(features, batch_first=True, padding_value=0.0)
input_dict['net_input']['src_tokens'] = features
input_dict['net_input']['src_lengths'] = features_length
input_dict = fairseq.utils.move_to_cuda(input_dict, device=device)
if self.use_asr:
asr_input_dict = self._create_asr_input_dict(input_dict, mode)
asr_input_dict = fairseq.utils.move_to_cuda(asr_input_dict, device=device)
loss = torch.FloatTensor(0)
if (mode in ['train', 'dev']):
encoder_out = self.model.encoder(src_tokens=input_dict['net_input']['src_tokens'], src_lengths=input_dict['net_input']['src_lengths'])
st_decoder_out = self.model.decoder(prev_output_tokens=input_dict['net_input']['prev_output_tokens'], encoder_out=encoder_out)
(st_loss, _) = self.criterion.compute_loss(self.model, st_decoder_out, input_dict)
loss = st_loss
if self.use_asr:
asr_loss = self.count_asr_loss(encoder_out, asr_input_dict)
loss = (((1 - self.asr_weight) * st_loss) + (self.asr_weight * asr_loss))
loss /= input_dict['nsentences']
records['loss'].append(loss.item())
if self.use_asr:
records['st_loss'].append(st_loss.item())
records['asr_loss'].append(asr_loss.item())
if (mode in ['dev', 'test']):
records['ids'] += input_dict['id'].cpu().tolist()
records['utt_ids'] += input_dict['utt_id']
(hyps, refs) = self._inference_step(input_dict)
records['hyps'] += hyps
records['refs'] += refs
if self.use_asr:
(asr_hyps, asr_refs) = self._inference_step_asr(asr_input_dict)
records['asr_hyps'] += asr_hyps
records['asr_refs'] += asr_refs
return loss
def downsample(self, features):
if (self.downsample_ratio == 1):
return features
new_features = []
for feature in features:
if (self.downsample_method == 'drop'):
feature = feature[::self.downsample_ratio]
elif (self.downsample_method == 'concat'):
N = (feature.size(0) % self.downsample_ratio)
if (N != 0):
feature = F.pad(feature, (0, 0, 0, (self.downsample_ratio - N)))
feature = feature.view((feature.size(0) // self.downsample_ratio), (feature.size(1) * self.downsample_ratio))
elif (self.downsample_method == 'average'):
N = (feature.size(0) % self.downsample_ratio)
if (N != 0):
feature = F.pad(feature, (0, 0, 0, (self.downsample_ratio - N)))
feature = feature.view((feature.size(0) // self.downsample_ratio), self.downsample_ratio, feature.size(1)).mean(dim=1)
else:
raise NotImplementedError
new_features.append(feature)
return new_features
def _create_asr_input_dict(self, input_dict, mode):
if (mode not in self.additional_dataset):
dataset = AdditionalDataset.from_tsv(f'{self.data_dir}/{self.datarc[mode]}.tsv', self.asr_datarc['key'], self.asr_dict, self.asr_bpe)
self.additional_dataset[mode] = dataset
additional_data = self.additional_dataset[mode].get_addtional_input(input_dict['id'])
asr_input_dict = input_dict.copy()
asr_input_dict['net_input'] = input_dict['net_input'].copy()
asr_input_dict['net_input']['prev_output_tokens'] = additional_data['prev_output_tokens']
asr_input_dict['target'] = additional_data['target']
asr_input_dict['target_lengths'] = additional_data['target_lengths']
asr_input_dict['ntokens'] = additional_data['ntokens']
return asr_input_dict
def count_asr_loss(self, encoder_out, input_dict):
hidden = encoder_out['encoder_out'][0]
log_prob = self.asr_head(hidden).log_softmax(2)
hidden_length = self.model.encoder.subsample.get_out_seq_lens_tensor(input_dict['net_input']['src_lengths'])
targets = input_dict['target']
target_lengths = input_dict['target_lengths']
loss = nn.functional.ctc_loss(log_prob, targets, hidden_length, target_lengths, blank=self.asr_dict.index('<blank>'), reduction='sum', zero_infinity=True)
return loss
def _decode(self, toks, dictionary):
toks = toks[(toks != dictionary.pad())]
s = dictionary.string(toks.int().cpu(), self.post_process)
return (s if s else '<unk>')
def _inference_step(self, input_dict):
output = self.generator.generate([self.model], input_dict)
hyps = []
refs = []
for i in range(len(output)):
hyps.append(self._decode(output[i][0]['tokens'], self.task.target_dictionary))
refs.append(self._decode(input_dict['target'][i], self.task.target_dictionary))
return (hyps, refs)
def _inference_step_asr(self, input_dict):
encoder_out = self.model.encoder(src_tokens=input_dict['net_input']['src_tokens'], src_lengths=input_dict['net_input']['src_lengths'])
hidden = encoder_out['encoder_out'][0]
logit = self.asr_head(hidden)
predict = logit.argmax(dim=(- 1)).transpose(0, 1)
hyps = []
refs = []
for i in range(len(predict)):
predict_ids = predict[i].unique_consecutive()
predict_ids = predict_ids[(predict_ids != self.asr_dict.index('<blank>'))]
hyps.append(self._decode(predict_ids, self.asr_dict))
refs.append(self._decode(input_dict['target'][i], self.asr_dict))
return (hyps, refs)
def _metric(self, hyps, refs):
tok = ('zh' if (self.tgt_lang == 'zh') else '13a')
bleu = sacrebleu.corpus_bleu(hyps, [refs], tokenize=tok)
return bleu
def _asr_metric(self, hyps, refs):
ce = 0
we = 0
c_total = 0
w_total = 0
for (hyp, ref) in zip(hyps, refs):
normalized_hyp = hyp.translate(str.maketrans('', '', ''.join(list((set(string.punctuation) - set("'-")))))).lower()
normalized_ref = ref.translate(str.maketrans('', '', ''.join(list((set(string.punctuation) - set("'-")))))).lower()
ce += editdistance.eval(normalized_hyp, normalized_ref)
c_total += len(normalized_ref)
hyp_w = normalized_hyp.split()
ref_w = normalized_ref.split()
we += editdistance.eval(hyp_w, ref_w)
w_total += len(ref_w)
cer = (ce / c_total)
wer = (we / w_total)
return (cer, wer)
def log_records(self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n mode: string\n 'train':\n records and batchids contain contents for `log_step` batches\n `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n 'dev' or 'test' :\n records and batchids contain contents for the entire evaluation dataset\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{your_task_name}/{mode}-{key}' as key name to log your contents,\n preventing conflict with the logging of other tasks\n\n global_step:\n The global_step when training, which is helpful for Tensorboard logging\n\n batch_ids:\n The batches contained in records when enumerating over the dataloader\n\n total_batch_num:\n The total amount of batches in the dataloader\n \n Return:\n a list of string\n Each string is a filename we wish to use to save the current model\n according to the evaluation result, like the best.ckpt on the dev set\n You can return nothing or an empty list when no need to save the checkpoint\n "
save_names = []
if (mode in ['train', 'dev']):
ave_loss = (sum(records['loss']) / len(records['loss']))
logger.add_scalar(f'st/{mode}-loss', ave_loss, global_step=global_step)
if self.use_asr:
ave_st_loss = (sum(records['st_loss']) / len(records['st_loss']))
logger.add_scalar(f'st/{mode}-st_loss', ave_st_loss, global_step=global_step)
ave_asr_loss = (sum(records['asr_loss']) / len(records['asr_loss']))
logger.add_scalar(f'st/{mode}-asr_loss', ave_asr_loss, global_step=global_step)
if (mode in ['dev', 'test']):
bleu = self._metric(records['hyps'], records['refs'])
logger.add_scalar(f'st/{mode}-bleu', bleu.score, global_step=global_step)
for i in range(4):
logger.add_scalar(f'st/{mode}-bleu{(i + 1)}', bleu.precisions[i], global_step=global_step)
if ((bleu.score > self.best_score) and (mode == 'dev')):
self.best_score = (torch.ones(1) * bleu.score)
save_names.append(f'{mode}-best.ckpt')
with open(f'{self.expdir}/{self.output_prefix}-st-{mode}.tsv', 'w') as f:
print('utt_id', 'hyp', 'ref', sep='\t', file=f)
results = list(zip(records['ids'], records['hyps'], records['refs'], records['utt_ids']))
results.sort(key=(lambda x: x[0]))
for (idx, hyp, ref, utt_id) in results:
print(utt_id, hyp, ref, sep='\t', file=f)
print(bleu)
if self.use_asr:
(cer, wer) = self._asr_metric(records['asr_hyps'], records['asr_refs'])
logger.add_scalar(f'st/{mode}-asr-cer', cer, global_step=global_step)
logger.add_scalar(f'st/{mode}-asr-wer', wer, global_step=global_step)
with open(f'{self.expdir}/{self.output_prefix}-asr-{mode}.tsv', 'w') as f:
print('utt_id', 'hyp', 'ref', sep='\t', file=f)
results = list(zip(records['ids'], records['asr_hyps'], records['asr_refs'], records['utt_ids']))
results.sort(key=(lambda x: x[0]))
for (idx, hyp, ref, utt_id) in results:
print(utt_id, hyp, ref, sep='\t', file=f)
tqdm.write(f'[cer]:{cer}, [wer]:{wer}')
return save_names
|
def verbose(args, text):
if args.verbose:
print(text)
|
def length(s):
return len(s.split())
|
def verbose(args, text):
if args.verbose:
print(text)
|
def create_sentencepiece(filenames, model_type, vocab_size, output_prefix):
sp.SentencePieceTrainer.train(input=','.join(filenames), model_prefix=output_prefix, vocab_size=vocab_size, model_type=model_type, character_coverage=1.0, unk_id=UNK_TOKEN_ID, bos_id=BOS_TOKEN_ID, eos_id=EOS_TOKEN_ID, pad_id=PAD_TOKEN_ID)
spm = sp.SentencePieceProcessor(model_file=f'{output_prefix}.model')
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (vocab.get(UNK_TOKEN_ID) == UNK_TOKEN)
assert (vocab.get(BOS_TOKEN_ID) == BOS_TOKEN)
assert (vocab.get(EOS_TOKEN_ID) == EOS_TOKEN)
assert (vocab.get(PAD_TOKEN_ID) == PAD_TOKEN)
vocab = {i: s for (i, s) in vocab.items() if (s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN})}
with open(f'{output_prefix}.txt', 'w') as f:
for (_, s) in sorted(vocab.items(), key=(lambda x: x[0])):
print(f'{s} 1', file=f)
|
def verbose(args, text):
if args.verbose:
print(text)
|
class SpeakerVerifi_train(Dataset):
def __init__(self, vad_config, key_list, file_path, meta_data, max_timestep=None, n_jobs=12):
self.roots = file_path
self.root_key = key_list
self.max_timestep = max_timestep
self.vad_c = vad_config
self.dataset = []
self.all_speakers = []
for index in range(len(self.root_key)):
cache_path = ((Path(os.path.dirname(__file__)) / '.wav_lengths') / f'{self.root_key[index]}_length.pt')
cache_path.parent.mkdir(exist_ok=True)
root = Path(self.roots[index])
if (not cache_path.is_file()):
def trimmed_length(path):
(wav_sample, _) = apply_effects_file(path, EFFECTS)
wav_sample = wav_sample.squeeze(0)
length = wav_sample.shape[0]
return length
wav_paths = find_files(root)
wav_lengths = Parallel(n_jobs=n_jobs)((delayed(trimmed_length)(path) for path in tqdm.tqdm(wav_paths, desc='Preprocessing')))
wav_tags = [Path(path).parts[(- 3):] for path in wav_paths]
torch.save([wav_tags, wav_lengths], str(cache_path))
else:
(wav_tags, wav_lengths) = torch.load(str(cache_path))
wav_paths = [root.joinpath(*tag) for tag in wav_tags]
speaker_dirs = [f.stem for f in root.iterdir() if f.is_dir()]
self.all_speakers.extend(speaker_dirs)
for (path, length) in zip(wav_paths, wav_lengths):
if (length > self.vad_c['min_sec']):
self.dataset.append(path)
self.all_speakers.sort()
self.speaker_num = len(self.all_speakers)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
path = self.dataset[idx]
(wav, _) = apply_effects_file(str(path), EFFECTS)
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
tags = Path(path).parts[(- 3):]
utterance_id = '-'.join(tags).replace('.wav', '')
label = self.all_speakers.index(tags[0])
return (wav.numpy(), utterance_id, label)
def collate_fn(self, samples):
return zip(*samples)
|
class SpeakerVerifi_test(Dataset):
def __init__(self, vad_config, file_path, meta_data):
self.root = file_path
self.meta_data = meta_data
self.necessary_dict = self.processing()
self.vad_c = vad_config
self.dataset = self.necessary_dict['spk_paths']
self.pair_table = self.necessary_dict['pair_table']
def processing(self):
pair_table = []
spk_paths = set()
with open(self.meta_data, 'r') as f:
usage_list = f.readlines()
for pair in usage_list:
list_pair = pair.split()
pair_1 = os.path.join(self.root, list_pair[1])
pair_2 = os.path.join(self.root, list_pair[2])
spk_paths.add(pair_1)
spk_paths.add(pair_2)
one_pair = [list_pair[0], pair_1, pair_2]
pair_table.append(one_pair)
return {'spk_paths': list(spk_paths), 'total_spk_num': None, 'pair_table': pair_table}
def __len__(self):
return len(self.necessary_dict['spk_paths'])
def __getitem__(self, idx):
x_path = self.dataset[idx]
x_name = x_path
(wav, _) = apply_effects_file(x_path, EFFECTS)
wav = wav.squeeze(0)
return (wav.numpy(), x_name)
def collate_fn(self, data_sample):
(wavs, x_names) = zip(*data_sample)
return (wavs, x_names)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n\n Note 1.\n dataloaders should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents, ...]\n\n where wav1, wav2 ... are in variable length\n and wav1 is in torch.FloatTensor\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = expdir
train_file_path = ((Path(self.datarc['file_path']) / 'dev') / 'wav')
test_file_path = ((Path(self.datarc['file_path']) / 'test') / 'wav')
train_config = {'vad_config': self.datarc['vad_config'], 'file_path': [train_file_path], 'key_list': ['Voxceleb1'], 'meta_data': self.datarc['train_meta_data'], 'max_timestep': self.datarc['max_timestep']}
self.train_dataset = SpeakerVerifi_train(**train_config)
dev_config = {'vad_config': self.datarc['vad_config'], 'file_path': train_file_path, 'meta_data': self.datarc['dev_meta_data']}
self.dev_dataset = SpeakerVerifi_test(**dev_config)
test_config = {'vad_config': self.datarc['vad_config'], 'file_path': test_file_path, 'meta_data': self.datarc['test_meta_data']}
self.test_dataset = SpeakerVerifi_test(**test_config)
self.connector = nn.Linear(self.upstream_dim, self.modelrc['input_dim'])
agg_dim = self.modelrc['module_config'][self.modelrc['module']].get('agg_dim', self.modelrc['input_dim'])
ModelConfig = {'input_dim': self.modelrc['input_dim'], 'agg_dim': agg_dim, 'agg_module_name': self.modelrc['agg_module'], 'module_name': self.modelrc['module'], 'hparams': self.modelrc['module_config'][self.modelrc['module']], 'utterance_module_name': self.modelrc['utter_module']}
self.model = Model(**ModelConfig)
objective_config = {'speaker_num': self.train_dataset.speaker_num, 'hidden_dim': self.modelrc['input_dim'], **self.modelrc['LossConfig'][self.modelrc['ObjectiveLoss']]}
self.objective = eval(self.modelrc['ObjectiveLoss'])(**objective_config)
self.score_fn = nn.CosineSimilarity(dim=(- 1))
self.eval_metric = EER
self.register_buffer('best_score', (torch.ones(1) * 100))
def get_dataloader(self, mode):
"\n Args:\n mode: string\n 'train', 'dev' or 'test'\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
if (mode == 'train'):
return self._get_train_dataloader(self.train_dataset)
elif (mode == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (mode == 'test'):
return self._get_eval_dataloader(self.test_dataset)
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def forward(self, mode, features, utter_idx, labels=None, records=None, **kwargs):
'\n Args:\n features:\n the features extracted by upstream\n put in the device assigned by command-line args\n\n labels:\n the speaker labels\n\n records:\n defaultdict(list), by appending scalars into records,\n these scalars will be averaged and logged on Tensorboard\n\n logger:\n Tensorboard SummaryWriter, given here for logging/debugging\n convenience, please use "self.downstream/your_content_name" as key\n name to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features_pad = pad_sequence(features, batch_first=True)
if (self.modelrc['module'] == 'XVector'):
attention_mask = [torch.ones((feature.shape[0] - 14)) for feature in features]
else:
attention_mask = [torch.ones(feature.shape[0]) for feature in features]
attention_mask_pad = pad_sequence(attention_mask, batch_first=True)
attention_mask_pad = ((1.0 - attention_mask_pad) * (- 100000.0))
features_pad = self.connector(features_pad)
if (mode == 'train'):
agg_vec = self.model(features_pad, attention_mask_pad.cuda())
labels = torch.LongTensor(labels).to(features_pad.device)
loss = self.objective(agg_vec, labels)
records['loss'].append(loss.item())
return loss
elif (mode in ['dev', 'test']):
agg_vec = self.model.inference(features_pad, attention_mask_pad.cuda())
agg_vec = torch.nn.functional.normalize(agg_vec, dim=(- 1))
utt_name = utter_idx
for idx in range(len(agg_vec)):
records[utt_name[idx]] = agg_vec[idx].cpu().detach()
return torch.tensor(0)
def log_records(self, mode, records, logger, global_step, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
save_names = []
if (mode == 'train'):
loss = torch.FloatTensor(records['loss']).mean().item()
logger.add_scalar(f'sv-voxceleb1/{mode}-loss', loss, global_step=global_step)
print(f'sv-voxceleb1/{mode}-loss: {loss}')
elif (mode in ['dev', 'test']):
trials = self.test_dataset.pair_table
labels = []
scores = []
def names(name):
return '-'.join(name.split('/')[(- 3):]).split('.')[0]
for (label, name1, name2) in trials:
labels.append(label)
score = self.score_fn(records[name1], records[name2]).numpy()
records['scores'].extend([score])
records['pair_names'].extend([f'{names(name1)}_{names(name2)}'])
scores.append(score)
(eer, *others) = self.eval_metric(np.array(labels, dtype=int), np.array(scores))
logger.add_scalar(f'sv-voxceleb1/{mode}-EER', eer, global_step=global_step)
print(f'sv-voxceleb1/{mode}-EER: {eer}')
if ((eer < self.best_score) and (mode == 'dev')):
self.best_score = (torch.ones(1) * eer)
save_names.append(f'{mode}-best.ckpt')
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
line = [f'''{name} {score}
''' for (name, score) in zip(records['pair_names'], records['scores'])]
file.writelines(line)
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
line = [f'''{name} {score}
''' for (name, score) in zip(records['pair_names'], records['labels'])]
file.writelines(line)
return save_names
def separate_data(self, agg_vec):
assert ((len(agg_vec) % 2) == 0)
total_num = (len(agg_vec) // 2)
feature1 = agg_vec[:total_num]
feature2 = agg_vec[total_num:]
return (feature1, feature2)
|
def collect_speaker_ids(roots, speaker_num):
all_speaker = []
all_speaker.extend([f.path for f in os.scandir(roots) if f.is_dir()])
ids = [[speaker.split('/')[(- 3)], speaker.split('/')[(- 1)]] for speaker in all_speaker]
vox1 = []
for id in ids:
if (id[0] == roots.split('/')[(- 2)]):
vox1.append(id[1])
dev_speaker = random.sample(vox1, k=speaker_num)
vox1_train = [ids for ids in vox1 if (ids not in dev_speaker)]
train_speaker = []
train_speaker.extend(vox1_train)
return (train_speaker, dev_speaker)
|
def construct_dev_speaker_id_txt(dev_speakers, dev_txt_name):
f = open(dev_txt_name, 'w')
for dev in dev_speakers:
f.write(dev)
f.write('\n')
f.close()
return
|
def sample_wavs_and_dump_txt(root, dev_ids, numbers, meta_data_name):
wav_list = []
count_positive = 0
print(f'generate {numbers} sample pairs')
for _ in trange(numbers):
prob = random.random()
if (prob > 0.5):
dev_id_pair = random.sample(dev_ids, 2)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[1]))).split('/')[(- 3):])
label = '0'
wav_list.append(' '.join([label, sample1, sample2]))
else:
dev_id_pair = random.sample(dev_ids, 1)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
label = '1'
count_positive += 1
wav_list.append(' '.join([label, sample1, sample2]))
print('finish, then dump file ..')
f = open(meta_data_name, 'w')
for data in wav_list:
f.write((data + '\n'))
f.close()
return wav_list
|
def EER(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
s = interp1d(fpr, tpr)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
eer = brentq(a, 0.0, 1.0)
thresh = interp1d(fpr, thresholds)(eer)
return (eer, thresh)
|
def eer_yist_f(labels, scores):
'\n Args:\n labels: (N,1) with value being 0 or 1\n scores: (N,1) within [-1, 1]\n\n Returns:\n equal_error_rates\n threshold\n '
joints = sorted(zip(scores, labels), key=(lambda x: x[0]))
(sorted_scores, sorted_labels) = zip(*joints)
total_ones = sum(sorted_labels)
total_zeros = (len(sorted_labels) - total_ones)
prefsum_ones = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=1), initial=0))
prefsum_zeros = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=0), initial=0))
ext_scores = [(- 1.0), *sorted_scores, 1.0]
(thresh_left, thresh_right) = (0, len(ext_scores))
while True:
if (thresh_left == thresh_right):
break
thresh_idx = ((thresh_left + thresh_right) // 2)
nb_false_positives = (total_zeros - prefsum_zeros[thresh_idx])
nb_false_negatives = prefsum_ones[thresh_idx]
if (nb_false_positives > nb_false_negatives):
thresh_left = thresh_idx
elif (nb_false_positives < nb_false_negatives):
thresh_right = thresh_idx
else:
break
thresh = ((ext_scores[thresh_idx] + ext_scores[(thresh_idx + 1)]) / 2)
false_negative_ratio = (nb_false_negatives / len(labels))
false_positive_ratio = (nb_false_positives / len(labels))
equal_error_rate = ((false_positive_ratio + false_negative_ratio) / 2)
return (equal_error_rate, thresh)
|
def _count_labels(counted_so_far, label, label_to_count=0):
return ((counted_so_far + 1) if (label == label_to_count) else counted_so_far)
|
def compute_metrics(input_x_speaker, ylabel):
wav1 = []
wav2 = []
for i in range(len(ylabel)):
wav1.append(input_x_speaker[i].unsqueeze(0))
wav2.append(input_x_speaker[(len(ylabel) + i)].unsqueeze(0))
wav1 = torch.stack(wav1)
wav2 = torch.stack(wav2)
ylabel = torch.stack(ylabel).cpu().detach().long().tolist()
scores = self.score_fn(wav1, wav2).squeeze().cpu().detach().tolist()
return (scores, ylabel)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim: int, downstream_expert: dict, expdir: str, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.lossrc = downstream_expert['lossrc']
self.expdir = Path(expdir)
self.train_dataset = None
self.valid_dataset = None
self.test_dataset = None
self.model = Model(input_dim=upstream_dim, **self.modelrc)
self.objective = nn.CosineEmbeddingLoss(**self.lossrc)
def get_dataloader(self, mode):
if (mode == 'train'):
self.train_dataset = SWS2013Dataset('dev', **self.datarc)
self.valid_dataset = SWS2013Dataset('eval', **self.datarc)
return DataLoader(self.train_dataset, sampler=WeightedRandomSampler(weights=self.train_dataset.sample_weights, num_samples=len(self.train_dataset.sample_weights), replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.train_dataset.collate_fn)
if (mode == 'valid'):
return DataLoader(self.valid_dataset, sampler=WeightedRandomSampler(weights=self.valid_dataset.sample_weights, num_samples=self.datarc['valid_size'], replacement=True), batch_size=self.datarc['batch_size'], drop_last=True, num_workers=self.datarc['num_workers'], collate_fn=self.valid_dataset.collate_fn)
if (mode in ['dev', 'eval']):
self.test_dataset = QUESST14Dataset(mode, **self.datarc)
return DataLoader(self.test_dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=self.test_dataset.collate_fn)
if (mode == 'sws2013_eval'):
self.test_dataset = SWS2013Testset('eval', **self.datarc)
return DataLoader(self.test_dataset, shuffle=False, batch_size=self.datarc['batch_size'], drop_last=False, num_workers=self.datarc['num_workers'], collate_fn=self.test_dataset.collate_fn)
raise NotImplementedError
def forward(self, mode, features, labels, records, **kwargs):
if (mode in ['train', 'valid']):
audio_tensors = torch.stack(features[:(len(features) // 2)])
query_tensors = torch.stack(features[(len(features) // 2):])
labels = torch.cat(labels).to(audio_tensors.device)
audio_embs = self.model(audio_tensors)
query_embs = self.model(query_tensors)
loss = self.objective(audio_embs, query_embs, labels)
records['loss'].append(loss.item())
with torch.no_grad():
similarities = F.cosine_similarity(audio_embs, query_embs)
records['similarity-positive'] += similarities[(labels > 0)].tolist()
records['similarity-negative'] += similarities[(labels < 0)].tolist()
return loss
elif (mode in ['dev', 'eval', 'sws2013_eval']):
audio_tensors = torch.stack(features)
(lengths, audio_names) = labels
embs = self.model(audio_tensors)
embs = embs.detach().cpu()
offset = 0
for (length, audio_name) in zip(lengths, audio_names):
records['embs'].append(embs[offset:(offset + length)])
records['audio_names'].append(audio_name)
offset += length
else:
raise NotImplementedError
def log_records(self, mode, records, logger, global_step, **kwargs):
'Log training, validation information or test on a dataset.'
if (mode in ['train', 'valid']):
prefix = f'sws2013/{mode}'
for (key, val) in records.items():
average = (sum(val) / len(val))
logger.add_scalar(f'{prefix}-{key}', average, global_step=global_step)
elif (mode in ['dev', 'eval', 'sws2013_eval']):
query_embs = records['embs'][:self.test_dataset.n_queries]
doc_embs = records['embs'][self.test_dataset.n_queries:]
query_names = records['audio_names'][:self.test_dataset.n_queries]
doc_names = records['audio_names'][self.test_dataset.n_queries:]
results = {}
for (query_emb, query_name) in zip(tqdm(query_embs, desc='Query', ncols=0), query_names):
query_emb = query_emb[0:1].cuda()
scores = []
for (doc_emb, doc_name) in zip(tqdm(doc_embs, desc='Doc', ncols=0, leave=False), doc_names):
with torch.no_grad():
doc_emb = doc_emb.cuda()
similarities = F.cosine_similarity(query_emb, doc_emb)
score = similarities.max().detach().cpu()
scores.append(score)
scores = torch.stack(scores)
scores = ((scores - scores.mean()) / (scores.std() + 1e-06))
results[query_name] = list(zip(doc_names, scores.tolist()))
score_thresh = 0
root = etree.Element('stdlist', termlist_filename='benchmark.stdlist.xml', indexing_time='1.00', language='english', index_size='1', system_id='benchmark')
for (query_name, doc_scores) in results.items():
term_list = etree.SubElement(root, 'detected_termlist', termid=query_name, term_search_time='1.0', oov_term_count='1')
for (doc_name, score) in doc_scores:
etree.SubElement(term_list, 'term', file=doc_name, channel='1', tbeg='0.000', dur='0.00', score=f'{score:.4f}', decision=('YES' if (score > score_thresh) else 'NO'))
tree = etree.ElementTree(root)
tree.write(str((self.expdir / 'benchmark.stdlist.xml')), encoding='UTF-8', pretty_print=True)
else:
raise NotImplementedError
|
class Model(nn.Module):
def __init__(self, input_dim, bottleneck_dim, hidden_dim, **kwargs):
super(Model, self).__init__()
self.connector = nn.Linear(input_dim, bottleneck_dim)
self.fc1 = nn.Linear(bottleneck_dim, hidden_dim)
self.attention_linear = nn.Linear(hidden_dim, 1)
def forward(self, features):
hiddens = F.relu(self.connector(features))
hiddens = torch.tanh(self.fc1(hiddens))
attention_weights = F.softmax(self.attention_linear(hiddens), dim=1)
embeds = torch.sum((hiddens * attention_weights), dim=1)
return embeds
|
class QUESST14Dataset(Dataset):
'QUESST 2014 dataset (English-only).'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
dataset_root = Path(kwargs['quesst2014_root'])
doc_paths = get_audio_paths(dataset_root, 'language_key_utterances.lst')
query_paths = get_audio_paths(dataset_root, f'language_key_{split}.lst')
self.dataset_root = dataset_root
self.n_queries = len(query_paths)
self.n_docs = len(doc_paths)
self.data = (query_paths + doc_paths)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_path = self.data[idx]
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['norm'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['pad', '0', '3']])
segments = wav.squeeze(0).unfold(0, 48000, 12000).unbind(0)
return (segments, len(segments), audio_path.with_suffix('').name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(segments, lengths, audio_names) = zip(*samples)
segments = [seg for segs in segments for seg in segs]
return (segments, (lengths, audio_names))
|
def get_audio_paths(dataset_root_path, lst_name):
'Extract audio paths.'
audio_paths = []
with open(((dataset_root_path / 'scoring') / lst_name)) as f:
for line in f:
(audio_path, lang) = tuple(line.strip().split())
audio_path = re.sub('^.*?\\/', '', audio_path)
audio_paths.append((dataset_root_path / audio_path))
return audio_paths
|
class SWS2013Dataset(Dataset):
'SWS 2013 dataset.'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
dataset_root = Path(kwargs['sws2013_root'])
split_root = (Path(kwargs['sws2013_scoring_root']) / f'sws2013_{split}')
audio2dur = parse_ecf((split_root / 'sws2013.ecf.xml'))
query2audios = parse_rttm((split_root / f'sws2013_{split}.rttm'))
query2tensors = find_queries((dataset_root / f'{split}_queries'))
print(f'[SWS2013] # of audios: {len(audio2dur)}')
print(f'[SWS2013] # of queries: {len(query2tensors)}')
all_audio_set = set(audio2dur.keys())
query2audio_set = {query: set((audio_info['audio'] for audio_info in audio_infos)) for (query, audio_infos) in query2audios.items()}
query2audio_compl_set = {query: (all_audio_set - audio_set) for (query, audio_set) in query2audio_set.items()}
(positive_pairs, negative_pairs) = ([], [])
for (query, tensors) in query2tensors.items():
for query_tensor in tensors:
negative_pairs.append({'query_tensor': query_tensor, 'audio_set': (query2audio_compl_set[query] if (query in query2audio_compl_set) else all_audio_set)})
if (query not in query2audios):
continue
for audio_info in query2audios[query]:
positive_pairs.append({'query_tensor': query_tensor, 'audio': audio_info['audio'], 'offset': audio_info['offset'], 'duration': audio_info['duration']})
print(f'[SWS2013] # of positive pairs: {len(positive_pairs)}')
self.audio_dir = (dataset_root / 'Audio')
self.audio2dur = audio2dur
self.max_dur = 3.0
self.positive_pairs = positive_pairs
self.negative_pairs = negative_pairs
def __len__(self):
return (len(self.positive_pairs) + len(self.negative_pairs))
def __getitem__(self, idx):
if (idx < len(self.positive_pairs)):
pair = self.positive_pairs[idx]
audio_path = (self.audio_dir / pair['audio']).with_suffix('.wav')
audio_tensor = path2segment(audio_path, pair['duration'], self.max_dur, pair['offset'])
else:
pair = self.negative_pairs[(idx - len(self.positive_pairs))]
sample_audio = random.sample(pair['audio_set'], 1)[0]
audio_dur = self.audio2dur[sample_audio]
audio_path = (self.audio_dir / sample_audio).with_suffix('.wav')
audio_tensor = path2segment(audio_path, audio_dur, self.max_dur, 0.0)
audio_tensor = audio_tensor.squeeze(0)
query_tensor = tensor2segment(pair['query_tensor'], self.max_dur)
label = torch.LongTensor([(1 if (idx < len(self.positive_pairs)) else (- 1))])
return (audio_tensor, query_tensor, label)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(audio_tensors, query_tensors, labels) = zip(*samples)
return ((audio_tensors + query_tensors), labels)
@property
def sample_weights(self):
'Sample weights to balance positive and negative data.'
n_pos = len(self.positive_pairs)
n_neg = len(self.negative_pairs)
return (([(1 / n_pos)] * n_pos) + ([(1 / n_neg)] * n_neg))
|
def parse_rttm(rttm_path):
'Parse audio and query pairs from *.rttm.'
pattern = re.compile('LEXEME\\s+(sws2013_[0-9]+).*?([0-9]\\.[0-9]+)\\s+([0-9]\\.[0-9]+)\\s+(sws2013_(dev|eval)_[0-9]+)')
query2audios = defaultdict(list)
with open(rttm_path) as fd:
for line in fd:
match = pattern.match(line)
if (match is None):
continue
query2audios[match.group(4)].append({'audio': match.group(1), 'offset': float(match.group(2)), 'duration': float(match.group(3))})
return query2audios
|
def parse_ecf(ecf_path):
'Find audios from sws2013.ecf.xml.'
root = ET.parse(str(ecf_path)).getroot()
audio2dur = {}
for excerpt in root.findall('excerpt'):
audio_name = excerpt.attrib['audio_filename'].replace('Audio/', '').replace('.wav', '')
duration = float(excerpt.attrib['dur'])
audio2dur[audio_name] = duration
return audio2dur
|
def find_queries(query_dir_path):
'Find all queries under sws2013_dev & sws2013_eval.'
pattern = re.compile('(_[0-9]{2})?\\.wav')
query2tensors = defaultdict(list)
for query_path in tqdm(list(query_dir_path.glob('*.wav')), ncols=0, desc='Load queries'):
query_name = pattern.sub('', query_path.name)
(wav_tensor, sample_rate) = apply_effects_file(str(query_path), [['channels', '1'], ['rate', '16000'], ['norm']])
(trimmed, _) = apply_effects_tensor(wav_tensor, sample_rate, [['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse']])
wav_tensor = (trimmed if (trimmed.size(1) >= (sample_rate * 0.5)) else wav_tensor)
wav_tensor = wav_tensor.squeeze(0)
query2tensors[query_name].append(wav_tensor)
return query2tensors
|
def path2segment(filepath, src_dur, tgt_dur, offset):
random_shift = random.uniform(0, (src_dur - tgt_dur))
(audio_tensor, _) = apply_effects_file(str(filepath), [['channels', '1'], ['rate', '16000'], ['norm'], ['pad', f'{tgt_dur}', f'{tgt_dur}'], ['trim', f'{((tgt_dur + offset) + random_shift)}', f'{tgt_dur}']])
return audio_tensor
|
def tensor2segment(tensor, tgt_dur, sample_rate=16000):
src_dur = (len(tensor) / sample_rate)
random_shift = random.uniform(0, (src_dur - tgt_dur))
(audio_tensor, _) = apply_effects_tensor(tensor.unsqueeze(0), sample_rate, [['pad', f'{tgt_dur}', f'{tgt_dur}'], ['trim', f'{(tgt_dur + random_shift)}', f'{tgt_dur}']])
return audio_tensor.squeeze(0)
|
class SWS2013Testset(Dataset):
'SWS 2013 testset.'
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
scoring_root = Path(kwargs['sws2013_scoring_root'])
audio_names = parse_ecf(((scoring_root / f'sws2013_{split}') / 'sws2013.ecf.xml'))
query_names = parse_tlist(((scoring_root / f'sws2013_{split}') / f'sws2013_{split}.tlist.xml'))
self.dataset_root = Path(kwargs['sws2013_root'])
self.split = split
self.n_queries = len(query_names)
self.n_docs = len(audio_names)
self.data = (query_names + audio_names)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_name = self.data[idx]
audio_path = (((self.dataset_root / f'{self.split}_queries') / audio_name) if (idx < self.n_queries) else ((self.dataset_root / 'Audio') / audio_name))
audio_path = audio_path.with_suffix('.wav')
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['norm'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['pad', '0', '3']])
segments = wav.squeeze(0).unfold(0, 48000, 12000).unbind(0)
return (segments, len(segments), audio_name)
def collate_fn(self, samples):
'Collate a mini-batch of data.'
(segments, lengths, audio_names) = zip(*samples)
segments = [seg for segs in segments for seg in segs]
return (segments, (lengths, audio_names))
|
def parse_ecf(ecf_path):
'Find audio paths from sws2013.ecf.xml.'
root = ET.parse(str(ecf_path)).getroot()
audio_names = []
for excerpt in root.findall('excerpt'):
audio_name = excerpt.attrib['audio_filename'].replace('Audio/', '').replace('.wav', '')
audio_names.append(audio_name)
return audio_names
|
def parse_tlist(tlist_path):
'Find audio paths from sws2013_eval.tlist.xml.'
root = ET.parse(str(tlist_path)).getroot()
audio_names = []
for term in root.findall('term'):
audio_names.append(term.attrib['termid'])
return audio_names
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc[self.modelrc['select']]
self.model = model_cls(self.upstream_dim, output_class_num=self.train_dataset.class_num, **model_conf)
|
def timit_posteriorgram_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def timit_posteriorgram_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return timit_posteriorgram_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def timit_posteriorgram(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/fb2hkvetp26wges/convbank.ckpt?dl=1'
return timit_posteriorgram_url(*args, refresh=refresh, **kwargs)
|
class ConvBank(nn.Module):
def __init__(self, input_dim, output_class_num, kernels, cnn_size, hidden_size, dropout, **kwargs):
super(ConvBank, self).__init__()
self.drop_p = dropout
self.in_linear = nn.Linear(input_dim, hidden_size)
latest_size = hidden_size
self.cnns = nn.ModuleList()
assert (len(kernels) > 0)
for kernel in kernels:
self.cnns.append(nn.Conv1d(latest_size, cnn_size, kernel, padding=(kernel // 2)))
latest_size = (cnn_size * len(kernels))
self.out_linear = nn.Linear(latest_size, output_class_num)
def forward(self, features):
hidden = F.dropout(F.relu(self.in_linear(features)), p=self.drop_p)
conv_feats = []
hidden = hidden.transpose(1, 2).contiguous()
for cnn in self.cnns:
conv_feats.append(cnn(hidden))
hidden = torch.cat(conv_feats, dim=1).transpose(1, 2).contiguous()
hidden = F.dropout(F.relu(hidden), p=self.drop_p)
predicted = self.out_linear(hidden)
return predicted
|
class UpstreamExpert(nn.Module):
def __init__(self, ckpt, **kwargs):
super(UpstreamExpert, self).__init__()
ckpt = torch.load(ckpt, map_location='cpu')
args = ckpt['Args']
self.upstream = getattr(s3prl.hub, args.upstream)()
self.featurizer = Featurizer(self.upstream, 'last_hidden_state', 'cpu')
config = ckpt['Config']
modelrc = config['downstream_expert']['modelrc']
model_cls = eval(modelrc['select'])
model_conf = modelrc[modelrc['select']]
self.model = model_cls(self.featurizer.output_dim, output_class_num=TIMIT_PHONE_CLASSES, **model_conf)
self.model.load_state_dict(UpstreamExpert._fix_state_key(ckpt['Downstream']))
@staticmethod
def _fix_state_key(states):
keys = list(states.keys())
for key in keys:
new_key = '.'.join(key.split('.')[1:])
states[new_key] = states[key]
states.pop(key)
return states
def get_downsample_rates(self, key: str) -> int:
return self.upstream.get_downsample_rates(key)
def forward(self, wavs):
'\n Args:\n wavs:\n list of unpadded wavs [wav1, wav2, ...]\n each wav is in torch.FloatTensor with sample rate 16000\n and already put in the device assigned by command-line args\n\n Return:\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n '
feats = self.upstream(wavs)
feats = self.featurizer(wavs, feats)
feats_length = [len(f) for f in feats]
feats = pad_sequence(feats, batch_first=True)
posteriors = self.model(feats)
posteriors = [F.softmax(p[:l], dim=(- 1)) for (p, l) in zip(posteriors, feats_length)]
posteriors = pad_sequence(posteriors, batch_first=True)
return {'last_hidden_state': posteriors, 'hidden_states': [posteriors]}
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, hidden_size, dropout, **kwargs):
super(Model, self).__init__()
self.in_linear = nn.Linear(input_dim, hidden_size)
self.out_linear = nn.Linear(hidden_size, output_class_num)
self.drop = nn.Dropout(dropout)
self.act_fn = nn.functional.relu
def forward(self, features):
hidden = self.in_linear(features)
hidden = self.drop(hidden)
hidden = self.act_fn(hidden)
predicted = self.out_linear(hidden)
return predicted
|
class PhoneDataset(Dataset):
def __init__(self, split, bucket_size, data_root, phone_path, bucket_file, sample_rate=16000, train_dev_seed=1337, **kwargs):
super(PhoneDataset, self).__init__()
self.data_root = data_root
self.phone_path = phone_path
self.sample_rate = sample_rate
self.class_num = 39
self.Y = {}
phone_file = open(os.path.join(phone_path, 'converted_aligned_phones.txt')).readlines()
for line in phone_file:
line = line.strip('\n').split(' ')
self.Y[line[0]] = [int(p) for p in line[1:]]
if (split == 'train'):
train_list = open(os.path.join(phone_path, 'train_split.txt')).readlines()
usage_list = [line for line in train_list if (line.split('-')[2][:2] in ('SI', 'SX'))]
elif ((split == 'dev') or (split == 'test')):
test_list = open(os.path.join(phone_path, 'test_split.txt')).readlines()
usage_list = [line for line in test_list if (line.split('-')[2][:2] != 'SA')]
if (split == 'dev'):
usage_list = [line for line in usage_list if (not (line.split('-')[1].lower() in TEST_SPEAKERS))]
else:
usage_list = [line for line in usage_list if (line.split('-')[1].lower() in TEST_SPEAKERS)]
else:
raise ValueError("Invalid 'split' argument for dataset: PhoneDataset!")
usage_list = {line.strip('\n'): None for line in usage_list}
print(((((('[Dataset] - # phone classes: ' + str(self.class_num)) + ', number of data for ') + split) + ': ') + str(len(usage_list))))
assert os.path.isdir(bucket_file), 'Please first run `preprocess/generate_len_for_bucket.py to get bucket file.'
table = pd.read_csv(os.path.join(bucket_file, ('TRAIN.csv' if (split == 'train') else 'TEST.csv'))).sort_values(by=['length'], ascending=False)
X = table['file_path'].tolist()
X_lens = table['length'].tolist()
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in zip(X, X_lens):
if (self._parse_x_name(x).upper() in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
def _parse_x_name(self, x):
return '-'.join(x.split('.')[0].split('/')[1:])
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.data_root, wav_path))
assert (sr == self.sample_rate), f'Sample rate mismatch: real {sr}, config {self.sample_rate}'
return wav.view((- 1))
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file) for x_file in self.X[index]]
label_batch = [torch.LongTensor(self.Y[self._parse_x_name(x_file).upper()]) for x_file in self.X[index]]
return (wav_batch, label_batch)
def collate_fn(self, items):
return (items[0][0], items[0][1])
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
predicted = self.linear(features)
return predicted
|
class DownstreamExpert(PhoneExpert):
'\n Basically the same as the phone linear expert\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, **kwargs)
delattr(self, 'model')
self.model = Model(input_dim=self.upstream_dim, output_class_num=self.train_dataset.class_num, **self.modelrc)
|
class SpeakerClassifiDataset(Dataset):
def __init__(self, mode, file_path, meta_data, max_timestep=None):
self.root = file_path
self.speaker_num = 1251
self.meta_data = meta_data
self.max_timestep = max_timestep
self.usage_list = open(self.meta_data, 'r').readlines()
cache_path = os.path.join(CACHE_PATH, f'{mode}.pkl')
if os.path.isfile(cache_path):
print(f'[SpeakerClassifiDataset] - Loading file paths from {cache_path}')
with open(cache_path, 'rb') as cache:
dataset = pickle.load(cache)
else:
dataset = eval('self.{}'.format(mode))()
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
with open(cache_path, 'wb') as cache:
pickle.dump(dataset, cache)
print(f'[SpeakerClassifiDataset] - there are {len(dataset)} files found')
self.dataset = dataset
self.label = self.build_label(self.dataset)
def build_label(self, train_path_list):
y = []
for path in train_path_list:
id_string = path.split('/')[(- 3)]
y.append((int(id_string[2:]) - 10001))
return y
@classmethod
def label2speaker(self, labels):
return [f'id{(label + 10001)}' for label in labels]
def train(self):
dataset = []
print('search specified wav name for training set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 1):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching training set wav')
return dataset
def dev(self):
dataset = []
print('search specified wav name for dev set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 2):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching dev set wav')
return dataset
def test(self):
dataset = []
print('search specified wav name for test set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 3):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching test set wav')
return dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(wav, sr) = torchaudio.load(self.dataset[idx])
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
length = self.max_timestep
def path2name(path):
return Path('-'.join(Path(path).parts[(- 3):])).stem
path = self.dataset[idx]
return (wav.numpy(), self.label[idx], path2name(path))
def collate_fn(self, samples):
return zip(*samples)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = expdir
root_dir = Path(self.datarc['file_path'])
self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])
self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])
self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(input_dim=self.modelrc['projector_dim'], output_dim=self.train_dataset.speaker_num, **model_conf)
self.objective = nn.CrossEntropyLoss()
self.register_buffer('best_score', torch.zeros(1))
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, filenames, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['loss'].append(loss.item())
records['filename'] += filenames
records['predict_speaker'] += SpeakerClassifiDataset.label2speaker(predicted_classid.cpu().tolist())
records['truth_speaker'] += SpeakerClassifiDataset.label2speaker(labels.cpu().tolist())
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ['acc', 'loss']:
average = torch.FloatTensor(records[key]).mean().item()
logger.add_scalar(f'voxceleb1/{mode}-{key}', average, global_step=global_step)
with open((Path(self.expdir) / 'log.log'), 'a') as f:
if (key == 'acc'):
print(f'{mode} {key}: {average}')
f.write(f'''{mode} at step {global_step}: {average}
''')
if ((mode == 'dev') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
f.write(f'''New best on {mode} at step {global_step}: {average}
''')
save_names.append(f'{mode}-best.ckpt')
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
lines = [f'''{f} {p}
''' for (f, p) in zip(records['filename'], records['predict_speaker'])]
file.writelines(lines)
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
lines = [f'''{f} {l}
''' for (f, l) in zip(records['filename'], records['truth_speaker'])]
file.writelines(lines)
return save_names
|
class DownstreamExpert(SpeakerExpert):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__(upstream_dim, downstream_expert, expdir, **kwargs)
def forward(self, mode, features, lengths, labels, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
predicted = predicted.transpose((- 1), (- 2))
labels = [labels[index].expand(features_len[index]) for index in range(len(labels))]
labels = pad_sequence(labels, padding_value=(- 100), batch_first=True)
loss = self.objective(predicted, labels)
predicted = predicted.transpose((- 1), (- 2))
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid[(labels != (- 100))] == labels[(labels != (- 100))]).view((- 1)).cpu().float().tolist()
records['loss'].append(loss.item())
return loss
|
class SpeakerVerifi_train(Dataset):
def __init__(self, vad_config, file_path, meta_data, max_timestep=None):
self.roots = file_path
self.root_key = list(self.roots.keys())
self.max_timestep = max_timestep
self.vad_c = vad_config
self.dataset = []
self.all_speakers = []
for key in self.root_key:
cache_path = f'./downstream/voxceleb2_amsoftmax_segment_eval/cache_wav_paths/cache_{key}.p'
p = Path(self.roots[key])
if os.path.isfile(cache_path):
cache_wavs_dict = pickle.load(open(cache_path, 'rb'))
self.all_speakers.extend(list(cache_wavs_dict.keys()))
for speaker_id in list(cache_wavs_dict.keys()):
for wavs in cache_wavs_dict[speaker_id]:
self.dataset.append(str(((p / speaker_id) / wavs)))
else:
speaker_wav_dict = {}
speaker_dirs = [f.path.split('/')[(- 1)] for f in os.scandir(self.roots[key]) if f.is_dir()]
self.all_speakers.extend(speaker_dirs)
print('search all wavs paths')
start = time.time()
for speaker in tqdm.tqdm(speaker_dirs):
speaker_dir = (p / speaker)
wav_list = find_files(speaker_dir)
speaker_wav_dict[speaker] = []
for wav in wav_list:
(wav, _) = apply_effects_file(str((speaker_dir / wav)), EFFECTS)
wav = wav.squeeze(0)
length = wav.shape[0]
if (length > self.vad_c['min_sec']):
self.dataset.append(str((speaker_dir / wav)))
speaker_wav_dict[speaker].append('/'.join(wav.split('/')[(- 2):]))
end = time.time()
print(f'search all wavs paths costs {(end - start)} seconds')
print(f'save wav paths to {cache_path}! so we can directly load all_path in next time!')
pickle.dump(speaker_wav_dict, open(cache_path, 'wb'))
self.speaker_num = len(self.all_speakers)
self.necessary_dict = self.processing()
self.label_mapping_spk_id = {}
self.build_label_mapping()
self.label = self.build_label(self.dataset)
def processing(self):
speaker_num = len(self.all_speakers)
return {'spk_paths': self.all_speakers, 'total_spk_num': speaker_num, 'pair_table': None}
def build_label_mapping(self):
spk_count = 0
for speaker_id in self.all_speakers:
self.label_mapping_spk_id[speaker_id.split('/')[(- 1)]] = spk_count
spk_count += 1
def build_label(self, train_path_list):
y = []
for path in train_path_list:
id_string = path.split('/')[(- 3)]
y.append(self.label_mapping_spk_id[id_string])
return y
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(wav, _) = torchaudio.load(self.dataset[idx])
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
length = self.max_timestep
return (wav, torch.tensor([self.label[idx]]).long())
def collate_fn(self, samples):
(wavs, labels) = ([], [])
None_list1 = []
None_list2 = []
None_list3 = []
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
None_list1.append(None)
None_list2.append(None)
None_list3.append(None)
return (wavs, labels, None_list1, None_list2, None_list3)
|
class SpeakerVerifi_dev(Dataset):
def __init__(self, vad_config, segment_config, file_path, meta_data):
self.root = file_path
self.meta_data = meta_data
self.segment_config = segment_config
self.vad_c = vad_config
self.pair_dict = self.preprocessing()
cache_path = f'./downstream/voxceleb2_amsoftmax_segment_eval/cache_wav_paths/cache_dev_segment.p'
if os.path.isfile(cache_path):
self.dataset = pickle.load(open(cache_path, 'rb'))
else:
self.dataset = self.segment_processing()
pickle.dump(self.dataset, open(cache_path, 'wb'))
def segment_processing(self):
wav_list = self.pair_dict['wav_table']
utterance_id = 0
segment_list = []
print('processing test set to segments')
for wav_info in tqdm.tqdm(wav_list):
label_info = wav_info[0]
pair_info = wav_info[1]
(wav, _) = apply_effects_file(wav_info[2], EFFECTS)
wav = wav.squeeze(0)
index_end = (len(wav) - self.segment_config['window'])
segment_num = (index_end // self.segment_config['stride'])
if (index_end < 0):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, 0, len(wav), wav_info[2]])
else:
for index in range(0, index_end, self.segment_config['stride']):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, index, (index + self.segment_config['window']), wav_info[2]])
utterance_id += 1
return segment_list
def preprocessing(self):
wav_table = []
pair_id = 0
with open(self.meta_data, 'r') as f:
usage_list = f.readlines()
for pair in usage_list:
list_pair = pair.split()
pair_1 = os.path.join(self.root, list_pair[1])
pair_2 = os.path.join(self.root, list_pair[2])
wav1 = (list_pair[0], str(pair_id), pair_1)
wav2 = (list_pair[0], str(pair_id), pair_2)
wav_table.append(wav1)
wav_table.append(wav2)
pair_id += 1
return {'wav_table': wav_table}
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(label_info, pair_id, utter_id, seg_info, start, end, path) = self.dataset[idx]
(wav, _) = torchaudio.load(path)
wav = wav.squeeze(0)
seg_tensor = wav[start:end]
return (label_info, pair_id, utter_id, seg_info, seg_tensor)
def collate_fn(self, data_sample):
label_list = []
pair_list = []
utterid_list = []
seg_num_list = []
seg_tensor_list = []
for samples in data_sample:
label_list.append(samples[0])
pair_list.append(samples[1])
utterid_list.append(samples[2])
seg_num_list.append(samples[3])
seg_tensor_list.append(samples[4])
return (seg_tensor_list, label_list, pair_list, utterid_list, seg_num_list)
|
class SpeakerVerifi_test(Dataset):
def __init__(self, vad_config, segment_config, file_path, meta_data):
self.root = file_path
self.meta_data = meta_data
self.segment_config = segment_config
self.vad_c = vad_config
self.pair_dict = self.preprocessing()
cache_path = f'./downstream/voxceleb2_amsoftmax_segment_eval/cache_wav_paths/cache_test_segment.p'
if os.path.isfile(cache_path):
self.dataset = pickle.load(open(cache_path, 'rb'))
else:
self.dataset = self.segment_processing()
pickle.dump(self.dataset, open(cache_path, 'wb'))
def segment_processing(self):
wav_list = self.pair_dict['wav_table']
utterance_id = 0
segment_list = []
print('processing test set to segments')
for wav_info in tqdm.tqdm(wav_list):
label_info = wav_info[0]
pair_info = wav_info[1]
(wav, _) = torchaudio.load(wav_info[2])
wav = wav.squeeze(0)
index_end = (len(wav) - self.segment_config['window'])
segment_num = (index_end // self.segment_config['stride'])
if (index_end < 0):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, 0, len(wav), wav_info[2]])
else:
for index in range(0, index_end, self.segment_config['stride']):
segment_list.append([int(label_info), pair_info, str(utterance_id), segment_num, index, (index + self.segment_config['window']), wav_info[2]])
utterance_id += 1
return segment_list
def preprocessing(self):
wav_table = []
pair_id = 0
with open(self.meta_data, 'r') as f:
usage_list = f.readlines()
for pair in usage_list:
list_pair = pair.split()
pair_1 = os.path.join(self.root, list_pair[1])
pair_2 = os.path.join(self.root, list_pair[2])
wav1 = (list_pair[0], str(pair_id), pair_1)
wav2 = (list_pair[0], str(pair_id), pair_2)
wav_table.append(wav1)
wav_table.append(wav2)
pair_id += 1
return {'wav_table': wav_table}
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(label_info, pair_id, utter_id, seg_info, start, end, path) = self.dataset[idx]
(wav, _) = torchaudio.load(path)
wav = wav.squeeze(0)
seg_tensor = wav[start:end]
return (label_info, pair_id, utter_id, seg_info, seg_tensor)
def collate_fn(self, data_sample):
label_list = []
pair_list = []
utterid_list = []
seg_num_list = []
seg_tensor_list = []
for samples in data_sample:
label_list.append(samples[0])
pair_list.append(samples[1])
utterid_list.append(samples[2])
seg_num_list.append(samples[3])
seg_tensor_list.append(samples[4])
return (seg_tensor_list, label_list, pair_list, utterid_list, seg_num_list)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n\n Note 1.\n dataloaders should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents, ...]\n\n where wav1, wav2 ... are in variable length\n and wav1 is in torch.FloatTensor\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.train_dataset = SpeakerVerifi_train(self.datarc['vad_config'], **self.datarc['train'])
self.dev_dataset = SpeakerVerifi_dev(self.datarc['vad_config'], self.datarc['segment_config'], **self.datarc['dev'])
self.test_dataset = SpeakerVerifi_test(self.datarc['vad_config'], self.datarc['segment_config'], **self.datarc['test'])
self.connector = nn.Linear(self.upstream_dim, self.modelrc['input_dim'])
self.model = Model(input_dim=self.modelrc['input_dim'], agg_dim=self.modelrc['agg_dim'], agg_module=self.modelrc['agg_module'], config=self.modelrc)
self.objective = AdMSoftmaxLoss(self.modelrc['input_dim'], self.train_dataset.speaker_num, s=30.0, m=0.4)
self.score_fn = nn.CosineSimilarity(dim=(- 1))
self.eval_metric = EER
def get_dataloader(self, mode):
"\n Args:\n mode: string\n 'train', 'dev' or 'test'\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
if (mode == 'train'):
return self._get_train_dataloader(self.train_dataset)
elif (mode == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (mode == 'test'):
return self._get_eval_dataloader(self.test_dataset)
def _get_train_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=True, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def forward(self, mode, features, labels, pair_list, utterid_list, seg_num_list, records, **kwargs):
'\n Args:\n features:\n the features extracted by upstream\n put in the device assigned by command-line args\n\n labels:\n the speaker labels\n\n records:\n defaultdict(list), by appending scalars into records,\n these scalars will be averaged and logged on Tensorboard\n\n logger:\n Tensorboard SummaryWriter, given here for logging/debugging\n convenience, please use "self.downstream/your_content_name" as key\n name to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features_pad = pad_sequence(features, batch_first=True)
if (self.modelrc['module'] == 'XVector'):
attention_mask = [torch.ones((feature.shape[0] - 14)) for feature in features]
else:
attention_mask = [torch.ones(feature.shape[0]) for feature in features]
attention_mask_pad = pad_sequence(attention_mask, batch_first=True)
attention_mask_pad = ((1.0 - attention_mask_pad) * (- 100000.0))
features_pad = self.connector(features_pad)
agg_vec = self.model(features_pad, attention_mask_pad.cuda())
if self.training:
labels = torch.LongTensor(labels).to(features_pad.device)
loss = self.objective(agg_vec, labels)
return loss
else:
agg_vec = (agg_vec / torch.norm(agg_vec, dim=(- 1)).unsqueeze((- 1)))
if (len(labels) > 1):
agg_vec_list = [vec for vec in agg_vec]
for index in range(len(agg_vec)):
records[f'utterid_{utterid_list[index]}'].append(agg_vec[index])
records[f'utterid_info'].append(f'utterid_{utterid_list[index]}')
records[f'pairid_info'].append(f'pairid_{pair_list[index]}')
records[f'pairid_{pair_list[index]}'].append(f'utterid_{utterid_list[index]}')
records[f'pairid_{pair_list[index]}_label'].append(labels[index])
else:
records[f'utterid_{utterid_list[0]}'].append(agg_vec[0])
records[f'utterid_info'].append(f'utterid_{utterid_list[0]}')
records[f'pairid_info'].append(f'pairid_{pair_list[0]}')
records[f'pairid_{pair_list[0]}'].append(f'utterid_{utterid_list[0]}')
records[f'pairid_{pair_list[0]}_label'].append(labels[0])
return torch.tensor(0)
def log_records(self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
if (not self.training):
records = self.declutter(records)
EER_result = self.eval_metric(np.array(records['ylabels']), np.array(records['scores']))
records['EER'] = EER_result[0]
logger.add_scalar((f'{mode}-' + 'EER'), records['EER'], global_step=global_step)
def declutter(self, records):
utterance_ids = set(records['utterid_info'])
for index in utterance_ids:
records[index] = torch.mean(torch.stack(records[index]), dim=0)
pair_ids = set(records['pairid_info'])
for index in pair_ids:
wav_set = list(set(records[index]))
if (len(wav_set) == 1):
wavs1 = records[wav_set[0]]
wavs2 = records[wav_set[0]]
else:
wavs1 = records[wav_set[0]]
wavs2 = records[wav_set[1]]
score = self.score_fn(wavs1, wavs2).squeeze().cpu().detach().tolist()
ylabel = list(set(records[f'{index}_label']))[0]
records['ylabels'].append(ylabel)
records['scores'].append(score)
return records
|
def EER(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
s = interp1d(fpr, tpr)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
eer = brentq(a, 0.0, 1.0)
thresh = interp1d(fpr, thresholds)(eer)
return (eer, thresh)
|
def eer_yist_f(labels, scores):
'\n Args:\n labels: (N,1) with value being 0 or 1\n scores: (N,1) within [-1, 1]\n\n Returns:\n equal_error_rates\n threshold\n '
joints = sorted(zip(scores, labels), key=(lambda x: x[0]))
(sorted_scores, sorted_labels) = zip(*joints)
total_ones = sum(sorted_labels)
total_zeros = (len(sorted_labels) - total_ones)
prefsum_ones = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=1), initial=0))
prefsum_zeros = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=0), initial=0))
ext_scores = [(- 1.0), *sorted_scores, 1.0]
(thresh_left, thresh_right) = (0, len(ext_scores))
while True:
if (thresh_left == thresh_right):
break
thresh_idx = ((thresh_left + thresh_right) // 2)
nb_false_positives = (total_zeros - prefsum_zeros[thresh_idx])
nb_false_negatives = prefsum_ones[thresh_idx]
if (nb_false_positives > nb_false_negatives):
thresh_left = thresh_idx
elif (nb_false_positives < nb_false_negatives):
thresh_right = thresh_idx
else:
break
thresh = ((ext_scores[thresh_idx] + ext_scores[(thresh_idx + 1)]) / 2)
false_negative_ratio = (nb_false_negatives / len(labels))
false_positive_ratio = (nb_false_positives / len(labels))
equal_error_rate = ((false_positive_ratio + false_negative_ratio) / 2)
return (equal_error_rate, thresh)
|
def _count_labels(counted_so_far, label, label_to_count=0):
return ((counted_so_far + 1) if (label == label_to_count) else counted_so_far)
|
def compute_metrics(input_x_speaker, ylabel):
wav1 = []
wav2 = []
for i in range(len(ylabel)):
wav1.append(input_x_speaker[i].unsqueeze(0))
wav2.append(input_x_speaker[(len(ylabel) + i)].unsqueeze(0))
wav1 = torch.stack(wav1)
wav2 = torch.stack(wav2)
ylabel = torch.stack(ylabel).cpu().detach().long().tolist()
scores = self.score_fn(wav1, wav2).squeeze().cpu().detach().tolist()
return (scores, ylabel)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n\n Note 1.\n dataloaders should output in the following format:\n\n [[wav1, wav2, ...], your_other_contents, ...]\n\n where wav1, wav2 ... are in variable length\n and wav1 is in torch.FloatTensor\n '
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.seed = kwargs['seed']
self.train_dataset = AudioBatchData(**self.datarc['train'], batch_size=self.datarc['train_batch_size'])
self.dev_dataset = SpeakerVerifi_dev(**self.datarc['dev'])
self.test_dataset = SpeakerVerifi_test(**self.datarc['test'])
self.connector = nn.Linear(upstream_dim, self.modelrc['input_dim'])
self.model = Model(input_dim=self.modelrc['input_dim'], agg_module=self.modelrc['agg_module'], config=self.modelrc)
self.objective = GE2E()
self.score_fn = nn.CosineSimilarity(dim=(- 1))
self.eval_metric = EER
def _get_train_dataloader(self, dataset):
return self.train_dataset.getDataLoader(batchSize=1, numWorkers=0)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def forward(self, features, lengths, labels, records=None, logger=None, prefix=None, global_step=0, **kwargs):
'\n Args:\n features:\n the features extracted by upstream\n put in the device assigned by command-line args\n\n labels:\n the frame-wise phone labels\n\n records:\n defaultdict(list), by appending scalars into records,\n these scalars will be averaged and logged on Tensorboard\n\n logger:\n Tensorboard SummaryWriter, given here for logging/debugging\n convenience, please use "self.downstream/your_content_name" as key\n name to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n '
features_pad = pad_sequence(features, batch_first=True)
attention_mask = [torch.ones(feature.shape[0]) for feature in features]
attention_mask_pad = pad_sequence(attention_mask, batch_first=True)
attention_mask_pad = ((1.0 - attention_mask_pad) * (- 100000.0))
features_pad = self.connector(features_pad)
agg_vec = self.model(features_pad, attention_mask_pad.cuda())
agg_vec = (agg_vec / torch.norm(agg_vec, dim=(- 1)).unsqueeze((- 1)))
if self.training:
GE2E_matrix = agg_vec.reshape((- 1), self.train_dataset.utter_number, agg_vec.shape[(- 1)])
loss = self.objective(GE2E_matrix)
return loss
else:
(vec1, vec2) = self.separate_data(agg_vec, labels)
scores = self.score_fn(vec1, vec2).squeeze().cpu().detach().tolist()
ylabels = torch.stack(labels).cpu().detach().long().tolist()
if (len(ylabels) > 1):
records['scores'].extend(scores)
records['ylabels'].extend(ylabels)
else:
records['scores'].append(scores)
records['ylabels'].append(ylabels)
return torch.tensor(0)
def log_records(self, records, logger, prefix, global_step, **kwargs):
"\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
if (not self.training):
EER_result = self.eval_metric(np.array(records['ylabels']), np.array(records['scores']))
records['EER'] = EER_result[0]
logger.add_scalar((f'{prefix}' + 'EER'), records['EER'], global_step=global_step)
def separate_data(self, agg_vec, ylabel):
total_num = len(ylabel)
feature1 = agg_vec[:total_num]
feature2 = agg_vec[total_num:]
return (feature1, feature2)
|
def collect_speaker_ids(roots, speaker_num):
all_speaker = []
for key in list(roots.keys()):
all_speaker.extend([f.path for f in os.scandir(roots[key]) if f.is_dir()])
ids = [[speaker.split('/')[(- 3)], speaker.split('/')[(- 1)]] for speaker in all_speaker]
vox1 = []
vox2 = []
for id in ids:
if (id[0] == roots['Voxceleb1'].split('/')[(- 2)]):
vox1.append(id[1])
if (id[0] == roots['Voxceleb2'].split('/')[(- 2)]):
vox2.append(id[1])
dev_speaker = random.sample(vox1, k=speaker_num)
vox1_train = [ids for ids in vox1 if (ids not in dev_speaker)]
train_speaker = []
train_speaker.extend(vox1_train)
train_speaker.extend(vox2)
return (train_speaker, dev_speaker)
|
def construct_dev_speaker_id_txt(dev_speakers, dev_txt_name):
f = open(dev_txt_name, 'w')
for dev in dev_speakers:
f.write(dev)
f.write('\n')
f.close()
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.