code stringlengths 17 6.64M |
|---|
def generate_sample(embeddings, this_spk, other_spks, label):
'\n Calculate cosine similarity.\n Generate positive or negative samples with the label.\n '
this_spk_embs = embeddings[this_spk]
other_spk_embs = list(chain(*[embeddings[spk] for spk in other_spks]))
samples = []
for this_spk_emb in this_spk_embs:
for other_spk_emb in other_spk_embs:
cosine_similarity = get_cosine_similarity(this_spk_emb, other_spk_emb)
samples.append((cosine_similarity, label))
return samples
|
def calculate_equal_error_rate(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
equal_error_rate = brentq(a, 0.0, 1.0)
threshold = interp1d(fpr, thresholds)(equal_error_rate)
return (equal_error_rate, threshold)
|
def calculate_threshold(data_root, task, device, query='E3*.wav'):
if (task == 'task1'):
spks = (SRCSPKS + TRGSPKS_TASK1)
if (task == 'task2'):
spks = (SRCSPKS + TRGSPKS_TASK2)
else:
raise NotImplementedError
encoder = load_asv_model(device)
embeddings = defaultdict(list)
for spk in spks:
wav_list = find_files(os.path.join(data_root, spk), query)
print(f'Extracting spekaer embedding for {len(wav_list)} files of {spk}')
for wav_path in wav_list:
embedding = get_embedding(wav_path, encoder)
embeddings[spk].append(embedding)
samples = []
for spk in spks:
negative_spks = [_spk for _spk in spks if (_spk != spk)]
samples += generate_sample(embeddings, spk, [spk], 1)
samples += generate_sample(embeddings, spk, negative_spks, 0)
print(f'[INFO]: Number of samples: {len(samples)}')
scores = [x[0] for x in samples]
labels = [x[1] for x in samples]
(equal_error_rate, threshold) = calculate_equal_error_rate(labels, scores)
return (float(equal_error_rate), float(threshold))
|
def calculate_accept(x_path, y_path, encoder, threshold):
x_emb = get_embedding(x_path, encoder)
y_emb = get_embedding(y_path, encoder)
cosine_similarity = get_cosine_similarity(x_emb, y_emb)
return (cosine_similarity > threshold)
|
class SequenceDataset(Dataset):
def __init__(self, split, bucket_size, dictionary, libri_root, bucket_file, **kwargs):
super(SequenceDataset, self).__init__()
self.dictionary = dictionary
self.libri_root = libri_root
self.sample_rate = SAMPLE_RATE
self.split_sets = kwargs[split]
assert os.path.isdir(bucket_file), 'Please first run `python3 preprocess/generate_len_for_bucket.py -h` to get bucket file.'
table_list = []
for item in self.split_sets:
file_path = os.path.join(bucket_file, (item + '.csv'))
if os.path.exists(file_path):
table_list.append(pd.read_csv(file_path))
else:
logging.warning(f'{item} is not found in bucket_file: {bucket_file}, skipping it.')
table_list = pd.concat(table_list)
table_list = table_list.sort_values(by=['length'], ascending=False)
X = table_list['file_path'].tolist()
X_lens = table_list['length'].tolist()
assert (len(X) != 0), f'0 data found for {split}'
Y = self._load_transcript(X)
x_names = set([self._parse_x_name(x) for x in X])
y_names = set(Y.keys())
usage_list = list((x_names & y_names))
Y = {key: Y[key] for key in usage_list}
self.Y = {k: self.dictionary.encode_line(v, line_tokenizer=(lambda x: x.split())).long() for (k, v) in Y.items()}
self.X = []
(batch_x, batch_len) = ([], [])
for (x, x_len) in tqdm(zip(X, X_lens), total=len(X), desc=f'ASR dataset {split}', dynamic_ncols=True):
if (self._parse_x_name(x) in usage_list):
batch_x.append(x)
batch_len.append(x_len)
if (len(batch_x) == bucket_size):
if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME)):
self.X.append(batch_x[:(bucket_size // 2)])
self.X.append(batch_x[(bucket_size // 2):])
else:
self.X.append(batch_x)
(batch_x, batch_len) = ([], [])
if (len(batch_x) > 1):
if (self._parse_x_name(x) in usage_list):
self.X.append(batch_x)
def _parse_x_name(self, x):
return x.split('/')[(- 1)].split('.')[0]
def _load_wav(self, wav_path):
(wav, sr) = torchaudio.load(os.path.join(self.libri_root, wav_path))
assert (sr == self.sample_rate), f'Sample rate mismatch: real {sr}, config {self.sample_rate}'
return wav.view((- 1))
def _load_transcript(self, x_list):
'Load the transcripts for Librispeech'
def process_trans(transcript):
transcript = transcript.upper()
return (' '.join(list(transcript.replace(' ', '|'))) + ' |')
trsp_sequences = {}
split_spkr_chap_list = list(set(('/'.join(x.split('/')[:(- 1)]) for x in x_list)))
for dir in split_spkr_chap_list:
parts = dir.split('/')
trans_path = f'{parts[(- 2)]}-{parts[(- 1)]}.trans.txt'
path = os.path.join(self.libri_root, dir, trans_path)
assert os.path.exists(path)
with open(path, 'r') as trans_f:
for line in trans_f:
lst = line.strip().split()
trsp_sequences[lst[0]] = process_trans(' '.join(lst[1:]))
return trsp_sequences
def _build_dictionary(self, transcripts, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8):
d = Dictionary()
transcript_list = list(transcripts.values())
Dictionary.add_transcripts_to_dictionary(transcript_list, d, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
def __len__(self):
return len(self.X)
def __getitem__(self, index):
wav_batch = [self._load_wav(x_file).numpy() for x_file in self.X[index]]
label_batch = [self.Y[self._parse_x_name(x_file)].numpy() for x_file in self.X[index]]
filename_batch = [Path(x_file).stem for x_file in self.X[index]]
return (wav_batch, label_batch, filename_batch)
def collate_fn(self, items):
assert (len(items) == 1)
return (items[0][0], items[0][1], items[0][2])
|
class Dictionary(fairseq_Dictionary):
'Dictionary inheritted from FairSeq'
@staticmethod
def _add_transcripts_to_dictionary_single_worker(transcripts, eos_word, worker_id=0, num_workers=1):
counter = Counter()
size = len(transcripts)
chunk_size = (size // num_workers)
offset = (worker_id * chunk_size)
end = min((size + 1), (offset + chunk_size))
for line in transcripts[offset:end]:
for word in line.split():
counter.update([word])
counter.update([eos_word])
return counter
@staticmethod
def add_transcripts_to_dictionary(transcripts, dict, num_workers):
def merge_result(counter):
for (w, c) in sorted(counter.items()):
dict.add_symbol(w, c)
if (num_workers > 1):
pool = get_context('spawn').Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(Dictionary._add_transcripts_to_dictionary_single_worker, (transcripts, dict.eos_word, worker_id, num_workers)))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Dictionary._add_transcripts_to_dictionary_single_worker(transcripts, dict.eos_word))
|
def token_to_word(text):
return text.replace(' ', '').replace('|', ' ').strip()
|
def get_decoder(decoder_args_dict, dictionary):
decoder_args = Namespace(**decoder_args_dict)
if (decoder_args.decoder_type == 'kenlm'):
from .w2l_decoder import W2lKenLMDecoder
decoder_args.beam_size_token = len(dictionary)
if isinstance(decoder_args.unk_weight, str):
decoder_args.unk_weight = eval(decoder_args.unk_weight)
return W2lKenLMDecoder(decoder_args, dictionary)
return None
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n upstream_rate: int\n 160: for upstream with 10 ms per frame\n 320: for upstream with 20 ms per frame\n\n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/example/config.yaml\n\n expdir: string\n The expdir from command-line argument, you should save all results into\n this directory, like some logging files.\n\n **kwargs: dict\n All the arguments specified by the argparser in run_downstream.py\n and all the other fields in config.yaml, in case you need it.\n\n Note1. Feel free to add new argument for __init__ as long as it is\n a command-line argument or a config field. You can check the constructor\n code in downstream/runner.py\n "
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.upstream_rate = upstream_rate
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.expdir = expdir
self.dictionary = Dictionary.load(self.datarc.get('dict_path', str((Path(__file__).parent / 'char.dict'))))
self.projector = nn.Linear(upstream_dim, self.modelrc['project_dim'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc[self.modelrc['select']]
self.model = model_cls(self.modelrc['project_dim'], len(self.dictionary.symbols), upstream_rate, **model_conf)
self.blank = self.dictionary.bos()
self.objective = nn.CTCLoss(blank=self.blank, zero_infinity=self.datarc['zero_infinity'])
decoder_args = self.datarc.get('decoder_args')
self.decoder = get_decoder(decoder_args, self.dictionary)
self.register_buffer('best_score', (torch.ones(1) * 100))
def get_dataloader(self, split):
'\n Args:\n split: string\n The name of the dataloader, can be train/dev/test-clean/test-other for asr\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n '
if (not hasattr(self, f'{split}_dataset')):
batch_size = (self.datarc['batch_size'] if (split == 'train') else self.datarc['eval_batch_size'])
setattr(self, f'{split}_dataset', SequenceDataset(split, batch_size, self.dictionary, **self.datarc))
if (split == 'train'):
return self._get_train_dataloader(self.train_dataset)
else:
return self._get_eval_dataloader(getattr(self, f'{split}_dataset'))
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=1, shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=1, shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _compute_metrics(self, pred_tokens_all, pred_words_all, target_tokens_all, target_words_all):
'Computes WER and UER given the prediction and true transcriptions'
unit_error_sum = 0.0
word_error_sum = 0.0
unit_length_sum = 0
word_length_sum = 0
for (pred_tokens, pred_words, target_tokens, target_words) in zip(pred_tokens_all, pred_words_all, target_tokens_all, target_words_all):
pred_tokens = pred_tokens.split()
target_tokens = target_tokens.split()
unit_error_sum += editdistance.eval(pred_tokens, target_tokens)
unit_length_sum += len(target_tokens)
word_error_sum += editdistance.eval(pred_words, target_words)
word_length_sum += len(target_words)
(uer, wer) = (100.0, 100.0)
if (unit_length_sum > 0):
uer = ((100.0 * unit_error_sum) / unit_length_sum)
if (word_length_sum > 0):
wer = ((100.0 * word_error_sum) / word_length_sum)
return (uer, wer)
def _decode(self, log_probs, input_lens):
'Decoder that take log probabilities as input and outputs decoded seq'
pred_tokens_batch = []
pred_words_batch = []
for (log_prob, in_len) in zip(log_probs, input_lens):
log_prob = log_prob[:in_len].unsqueeze(0)
decoded = None
if ((self.decoder is not None) and (not self.training)):
decoded = self.decoder.decode(log_prob)
if (len(decoded) >= 1):
decoded = decoded[0]
decoded = (None if (len(decoded) < 1) else decoded[0])
pred_token_ids = log_prob.argmax(dim=(- 1)).unique_consecutive()
pred_token_ids = pred_token_ids[(pred_token_ids != self.blank)].tolist()
pred_tokens = self.dictionary.string(pred_token_ids)
if ((decoded is not None) and ('words' in decoded)):
pred_words = decoded['words']
else:
pred_words = token_to_word(pred_tokens).split()
pred_tokens_batch.append(pred_tokens)
pred_words_batch.append(pred_words)
return (pred_tokens_batch, pred_words_batch)
def _get_log_probs(self, features):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features])
features = pad_sequence(features, batch_first=True).to(device=device)
features = self.projector(features)
(logits, log_probs_len) = self.model(features, features_len)
log_probs = nn.functional.log_softmax(logits, dim=(- 1))
return (log_probs, log_probs_len)
def inference(self, features, filenames):
(log_probs, log_probs_len) = self._get_log_probs(features)
(_, pred_words_batch) = self._decode(log_probs.float().contiguous().cpu(), log_probs_len)
hyps = [' '.join(hyp) for hyp in pred_words_batch]
if (filenames != []):
with open((Path(self.expdir) / 'inference.ark'), 'w') as file:
for (hyp, filename) in zip(hyps, filenames):
file.write(f'''{filename} {hyp}
''')
return hyps
def forward(self, split, features, labels, filenames, records, **kwargs):
'\n Args:\n split: string\n The name of the dataloader, can be train/dev/test-clean/test-other for asr\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n your_other_contents1, ... :\n in the order defined by your dataloader (dataset + collate_fn)\n these are all in cpu, and you can move them to the same device\n as features\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records (also customized by you)\n\n Note1. downstream/runner.py will call self.log_records\n 1. every `log_step` during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n '
(log_probs, log_probs_len) = self._get_log_probs(features)
device = features[0].device
labels = [torch.IntTensor(l) for l in labels]
labels_len = torch.IntTensor([len(label) for label in labels]).to(device=device)
labels = pad_sequence(labels, batch_first=True, padding_value=self.dictionary.pad()).to(device=device)
loss = self.objective(log_probs.transpose(0, 1), labels, log_probs_len, labels_len)
records['loss'].append(loss.item())
target_tokens_batch = []
target_words_batch = []
for label in labels:
label_idx = ((label != self.dictionary.pad()) & (label != self.dictionary.eos()))
target_token_ids = label[label_idx].tolist()
target_tokens = self.dictionary.string(target_token_ids)
target_words = token_to_word(target_tokens).split()
target_tokens_batch.append(target_tokens)
target_words_batch.append(target_words)
with torch.no_grad():
(pred_tokens_batch, pred_words_batch) = self._decode(log_probs.float().contiguous().cpu(), log_probs_len)
records['target_tokens'] += target_tokens_batch
records['target_words'] += target_words_batch
records['pred_tokens'] += pred_tokens_batch
records['pred_words'] += pred_words_batch
records['filenames'] += filenames
return loss
def log_records(self, split, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n split: string\n 'train':\n records and batchids contain contents for `log_step` batches\n `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n 'dev' or 'test-clean' or 'test-other' :\n records and batchids contain contents for the entire evaluation dataset\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{your_task_name}/{split}-{key}' as key name to log your contents,\n preventing conflict with the logging of other tasks\n\n global_step:\n The global_step when training, which is helpful for Tensorboard logging\n\n batch_ids:\n The batches contained in records when enumerating over the dataloader\n\n total_batch_num:\n The total amount of batches in the dataloader\n\n Return:\n a list of string\n Each string is a filename we wish to use to save the current model\n according to the evaluation result, like the best.ckpt on the dev set\n You can return nothing or an empty list when no need to save the checkpoint\n "
loss = torch.FloatTensor(records['loss']).mean().item()
print(f'{split} loss: {loss}')
(uer, wer) = self._compute_metrics(records['pred_tokens'], records['pred_words'], records['target_tokens'], records['target_words'])
logger.add_scalar(f'asr/{split}-loss', loss, global_step=global_step)
logger.add_scalar(f'asr/{split}-uer', uer, global_step=global_step)
logger.add_scalar(f'asr/{split}-wer', wer, global_step=global_step)
print(f'{split} uer: {uer}')
print(f'{split} wer: {wer}')
save_names = []
if ((split == 'dev-clean') and (wer < self.best_score)):
self.best_score = (torch.ones(1) * wer)
save_names.append(f'{split}-best.ckpt')
if (('test' in split) or ('dev' in split)):
lm = ('noLM' if (self.decoder is None) else 'LM')
hyp_ark = open(os.path.join(self.expdir, f'{split}-{lm}-hyp.ark'), 'w')
ref_ark = open(os.path.join(self.expdir, f'{split}-{lm}-ref.ark'), 'w')
for (filename, hyp, ref) in zip(records['filenames'], records['pred_words'], records['target_words']):
hyp = ' '.join(hyp)
ref = ' '.join(ref)
hyp_ark.write(f'''{filename} {hyp}
''')
ref_ark.write(f'''{filename} {ref}
''')
hyp_ark.close()
ref_ark.close()
return save_names
|
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
self.criterion_type = CriterionType.CTC
self.blank = (tgt_dict.index('<ctc_blank>') if ('<ctc_blank>' in tgt_dict.indices) else tgt_dict.bos())
if ('<sep>' in tgt_dict.indices):
self.silence = tgt_dict.index('<sep>')
elif ('|' in tgt_dict.indices):
self.silence = tgt_dict.index('|')
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
def generate(self, models, sample, **unused):
'Generate a batch of inferences.'
encoder_input = {k: v for (k, v) in sample['net_input'].items() if (k != 'prev_output_tokens')}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
'Run encoder and normalize emissions'
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, 'get_logits'):
emissions = model.get_logits(encoder_out)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
'Normalize tokens by handling CTC blank, ASG replabels, etc.'
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter((lambda x: (x != self.blank)), idxs)
return torch.LongTensor(list(idxs))
|
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, 'unit_lm', False)
if args.lexicon:
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index('<unk>')
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for (i, (word, spellings)) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
(_, score) = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (tgt_dict.unk() not in spelling_idxs), f'{spelling} {spelling_idxs}'
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(beam_size=args.beam, beam_size_token=int(getattr(args, 'beam_size_token', len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type)
if (self.asg_transitions is None):
N = 768
self.asg_transitions = []
self.decoder = LexiconDecoder(self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, self.asg_transitions, self.unit_lm)
else:
assert args.unit_lm, 'lexicon free decoding can only be done with a unit language model'
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(beam_size=args.beam, beam_size_token=int(getattr(args, 'beam_size_token', len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type)
self.decoder = LexiconFreeDecoder(self.decoder_opts, self.lm, self.silence, self.blank, [])
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
'Returns frame numbers corresponding to every non-blank token.\n\n Parameters\n ----------\n token_idxs : List[int]\n IDs of decoded tokens.\n\n Returns\n -------\n List[int]\n Frame numbers corresponding to every non-blank token.\n '
timesteps = []
for (i, token_idx) in enumerate(token_idxs):
if (token_idx == self.blank):
continue
if ((i == 0) or (token_idx != token_idxs[(i - 1)])):
timesteps.append(i)
return timesteps
def decode(self, emissions):
(B, T, N) = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0)))
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[:self.nbest]
hypos.append([{'tokens': self.get_tokens(result.tokens), 'score': result.score, 'timesteps': self.get_timesteps(result.tokens), 'words': [self.word_dict.get_entry(x) for x in result.words if (x >= 0)]} for result in nbest_results])
return hypos
|
class AtisDataset(Dataset):
def __init__(self, df, base_path, Sy_intent, type):
self.df = df
self.base_path = base_path
self.max_length = (SAMPLE_RATE * EXAMPLE_WAV_MAX_SEC)
self.Sy_intent = Sy_intent
self.type = type
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
wav_path = os.path.join(self.base_path, self.type, (self.df.loc[idx]['id'] + '.wav'))
(wav, sr) = torchaudio.load(wav_path)
wav = wav.squeeze(0)
label = []
for slot in ['intent']:
value = self.df.loc[idx][slot]
label.append(self.Sy_intent[slot][value])
return (wav, torch.tensor(label).long())
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class Identity(nn.Module):
def __init__(self, config):
super(Identity, self).__init__()
def forward(self, feature, att_mask, head_mask):
return [feature]
|
class Mean(nn.Module):
def __init__(self, out_dim):
super(Mean, self).__init__()
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
agg_vec_list = []
for i in range(len(feature)):
length = (torch.nonzero((att_mask[i] < 0), as_tuple=False)[0][0] + 1)
agg_vec = torch.mean(feature[i][:length], dim=0)
agg_vec_list.append(agg_vec)
return torch.stack(agg_vec_list)
|
class SAP(nn.Module):
' Self Attention Pooling module incoporate attention mask'
def __init__(self, out_dim):
super(SAP, self).__init__()
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling \n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep, att_mask):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n \n attention_weight:\n att_w : size (N, T, 1)\n \n return:\n utter_rep: size (N, H)\n '
seq_len = batch_rep.shape[1]
softmax = nn.functional.softmax
att_logits = self.W(batch_rep).squeeze((- 1))
att_logits = (att_mask + att_logits)
att_w = softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class Model(nn.Module):
def __init__(self, input_dim, agg_module, output_dim, config):
super(Model, self).__init__()
self.agg_method = eval(agg_module)(input_dim)
self.linear = nn.Linear(input_dim, output_dim)
self.model = eval(config['module'])(Namespace(**config['hparams']))
self.head_mask = ([None] * config['hparams']['num_hidden_layers'])
def forward(self, features, att_mask):
features = self.model(features, att_mask.unsqueeze((- 1)), head_mask=self.head_mask, output_all_encoded_layers=False)
utterance_vector = self.agg_method(features[0], att_mask)
predicted = self.linear(utterance_vector)
return predicted
|
class AudioSLUDataset(Dataset):
def __init__(self, df, base_path, Sy_intent, speaker_name):
self.df = df
self.base_path = base_path
self.max_length = (SAMPLE_RATE * EXAMPLE_WAV_MAX_SEC)
self.Sy_intent = Sy_intent
self.speaker_name = speaker_name
self.resampler = torchaudio.transforms.Resample(ORIGINAL_SAMPLE_RATE, SAMPLE_RATE)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
wav_path = os.path.join(self.base_path, ('audio_' + self.speaker_name), 'snips', (self.df.loc[idx]['u_id'] + '.mp3'))
(wav, sr) = torchaudio.load(wav_path)
assert (sr == ORIGINAL_SAMPLE_RATE)
wav = self.resampler(wav)
wav = wav.squeeze(0)
label = []
for slot in ['intent']:
value = self.df.loc[idx][slot]
label.append(self.Sy_intent[slot][value])
return (wav, torch.tensor(label).long())
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class Identity(nn.Module):
def __init__(self, config, **kwargs):
super(Identity, self).__init__()
def forward(self, feature, att_mask, head_mask, **kwargs):
return [feature]
|
class Mean(nn.Module):
def __init__(self, out_dim):
super(Mean, self).__init__()
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
agg_vec_list = []
for i in range(len(feature)):
length = (torch.nonzero((att_mask[i] < 0), as_tuple=False)[0][0] + 1)
agg_vec = torch.mean(feature[i][:length], dim=0)
agg_vec_list.append(agg_vec)
return torch.stack(agg_vec_list)
|
class SAP(nn.Module):
' Self Attention Pooling module incoporate attention mask'
def __init__(self, out_dim):
super(SAP, self).__init__()
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
' \n Arguments\n feature - [BxTxD] Acoustic feature with shape \n att_mask - [BxTx1] Attention Mask logits\n '
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling \n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep, att_mask):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n \n attention_weight:\n att_w : size (N, T, 1)\n \n return:\n utter_rep: size (N, H)\n '
seq_len = batch_rep.shape[1]
softmax = nn.functional.softmax
att_logits = self.W(batch_rep).squeeze((- 1))
att_logits = (att_mask + att_logits)
att_w = softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class Model(nn.Module):
def __init__(self, input_dim, agg_module, output_dim, config):
super(Model, self).__init__()
self.agg_method = eval(agg_module)(input_dim)
self.linear = nn.Linear(input_dim, output_dim)
self.model = eval(config['module'])(Namespace(**config['hparams']))
self.head_mask = ([None] * config['hparams']['num_hidden_layers'])
def forward(self, features, att_mask):
features = self.model(features, att_mask.unsqueeze((- 1)), head_mask=self.head_mask, output_all_encoded_layers=False)
utterance_vector = self.agg_method(features[0], att_mask)
predicted = self.linear(utterance_vector)
return F.log_softmax(predicted, dim=(- 1))
|
class CommonVoiceDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, ascending=False, ratio=1.0, offset=0, **kwargs):
self.path = path
self.bucket_size = bucket_size
for s in split:
with open(s, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
(file_list, text) = ([], [])
for (i, row) in enumerate(rows):
if (i == 0):
continue
file_list.append(join(path, row[0]))
text.append(tokenizer.encode(row[1]))
print(f'Found {len(file_list)} samples.')
if (ratio < 1.0):
print(f'Ratio = {ratio}, offset = {offset}')
skip = int((1.0 / ratio))
(file_list, text) = (file_list[offset::skip], text[offset::skip])
total_len = 0.0
for f in file_list:
total_len += (getsize(f) / 32000.0)
print('Total audio len = {:.2f} mins = {:.2f} hours'.format((total_len / 60.0), (total_len / 3600.0)))
(self.file_list, self.text) = (file_list, text)
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
def normalize(sent, language):
sent = unicodedata.normalize('NFKC', sent).upper()
sent = sent.translate(translator)
sent = re.sub(' +', ' ', sent)
if (language in ['zh-TW', 'zh-CN', 'ja']):
sent = sent.replace(' ', '')
if (language in ['zh-TW', 'zh-CN', 'ja', 'ar', 'ru']):
if any([(c.encode('UTF-8').isalpha() or (c == "'")) for c in list(sent)]):
return ''
if (language == 'zh-CN'):
if (len(zhcn_exception.intersection(set(list(sent)))) > 0):
return ''
if (language == 'es'):
if (len(spanish_exception.intersection(set(list(sent)))) > 0):
return ''
if (language == 'en'):
if any([(not (((ord(c) >= ord('A')) and (ord(c) <= ord('Z'))) or (c == ' ') or (c == "'"))) for c in list(sent)]):
return ''
return sent.strip()
|
def read_tsv(path, corpus_root, language, accent=None, hours=(- 1)):
with open(path, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
data_list = []
total_len = 0
iterator = tqdm(enumerate(rows))
for (i, row) in iterator:
if (i == 0):
continue
if ((language == 'es') and (row[7] != 'mexicano')):
continue
if ((language == 'en') and (row[7] != accent)):
continue
audio = MP3(join(corpus_root, row[1]))
secs = audio.info.length
sent_normed = normalize(row[2], language)
if (sent_normed == ''):
continue
data_list.append({'path': row[1], 'sentence': sent_normed, 'accent': (row[7] if (row[7] != '') else 'unk'), 'len': secs})
total_len += secs
if ((hours > 0) and ((total_len / 3600.0) > hours)):
iterator.close()
break
print(f'Read {len(data_list)} files')
print('Total {:.2f} hours'.format((total_len / 3600.0)))
return data_list
|
def write_tsv(data, out_path):
with open(out_path, 'w') as fp:
writer = csv.writer(fp, delimiter='\t')
writer.writerow(['path', 'sentence'])
for d in data:
path = (d['path'][:(- 3)] + 'wav')
writer.writerow([path, d['sentence']])
|
def write_txt(data, out_path):
with open(out_path, 'w') as fp:
for d in data:
fp.write((d['sentence'] + '\n'))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, help='Root of Common Voice 7.0 directory.')
parser.add_argument('--lang', type=str, help='Language abbreviation.')
parser.add_argument('--out', type=str, help='Path to output directory.')
parser.add_argument('--accent', type=str, default='none', help='English accent')
parser.add_argument('--hours', type=float, default=(- 1), help='Maximum hours used.')
args = parser.parse_args()
os.makedirs(args.out, exist_ok=True)
os.makedirs(join(args.out, args.lang), exist_ok=True)
for s in ['train', 'dev', 'test']:
data_list = read_tsv(join(args.root, args.lang, (s + '.tsv')), join(args.root, args.lang, 'clips'), args.lang, accent=args.accent, hours=args.hours)
if (data_list[0].get('len', (- 1)) > 0):
data_list = sorted(data_list, reverse=True, key=(lambda x: x['len']))
write_tsv(data_list, join(args.out, args.lang, (s + '.tsv')))
if (s == 'train'):
write_txt(data_list, join(args.out, args.lang, (s + '.txt')))
|
def read_processed_tsv(path):
with open(path, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
file_list = []
for (i, row) in enumerate(rows):
if (i == 0):
continue
file_list.append((row[0][:(- 3)] + 'mp3'))
return file_list
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, help='Directory of the dataset.')
parser.add_argument('--tsv', type=str, help='Path to processed tsv file.')
args = parser.parse_args()
file_list = read_processed_tsv(args.tsv)
for file in tqdm(file_list):
file = str(file)
file = join(args.root, file)
(wav, sample_rate) = torchaudio.load(file)
wav = resample(wav.squeeze(0).numpy(), sample_rate, 16000, res_type='kaiser_best')
wav = torch.FloatTensor(wav).unsqueeze(0)
new_file = (file[:(- 3)] + 'wav')
torchaudio.save(new_file, wav, 16000)
|
def parse_lexicon(line, tokenizer):
line.replace('\t', ' ')
(word, *phonemes) = line.split()
for p in phonemes:
assert (p in tokenizer._vocab2idx.keys())
return (word, phonemes)
|
def read_text(file, word2phonemes, tokenizer):
"Get transcription of target wave file, \n it's somewhat redundant for accessing each txt multiplt times,\n but it works fine with multi-thread"
src_file = ('-'.join(file.split('-')[:(- 1)]) + '.trans.txt')
idx = file.split('/')[(- 1)].split('.')[0]
with open(src_file, 'r') as fp:
for line in fp:
if (idx == line.split(' ')[0]):
transcription = line[:(- 1)].split(' ', 1)[1]
phonemes = []
for word in transcription.split():
phonemes += word2phonemes[word]
return tokenizer.encode(' '.join(phonemes))
|
class LibriPhoneDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, lexicon, ascending=False, **kwargs):
self.path = path
self.bucket_size = bucket_size
word2phonemes_all = defaultdict(list)
for lexicon_file in lexicon:
with open(lexicon_file, 'r') as file:
lines = [line.strip() for line in file.readlines()]
for line in lines:
(word, phonemes) = parse_lexicon(line, tokenizer)
word2phonemes_all[word].append(phonemes)
word2phonemes = {}
for (word, phonemes_all) in word2phonemes_all.items():
if (len(phonemes_all) > 1):
print(f'[LibriPhone] - {len(phonemes_all)} of phoneme sequences found for {word}.')
for (idx, phonemes) in enumerate(phonemes_all):
print(f'{idx}. {phonemes}')
word2phonemes[word] = phonemes_all[0]
print(f'[LibriPhone] - Taking the first phoneme sequences for a deterministic behavior.')
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob('*.flac'))
assert (len(split_list) > 0), 'No data found @ {}'.format(join(path, s))
file_list += split_list
text = []
for f in tqdm(file_list, desc='word -> phonemes'):
text.append(read_text(str(f), word2phonemes, tokenizer))
(self.file_list, self.text) = zip(*[(f_name, txt) for (f_name, txt) in sorted(zip(file_list, text), reverse=(not ascending), key=(lambda x: len(x[1])))])
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
def read_text(file):
"Get transcription of target wave file, \n it's somewhat redundant for accessing each txt multiplt times,\n but it works fine with multi-thread"
src_file = ('-'.join(file.split('-')[:(- 1)]) + '.trans.txt')
idx = file.split('/')[(- 1)].split('.')[0]
with open(src_file, 'r') as fp:
for line in fp:
if (idx == line.split(' ')[0]):
return line[:(- 1)].split(' ', 1)[1]
|
class LibriDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, ascending=False, **kwargs):
self.path = path
self.bucket_size = bucket_size
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob('*.flac'))
assert (len(split_list) > 0), 'No data found @ {}'.format(join(path, s))
file_list += split_list
text = []
for f in tqdm(file_list, desc='Read text'):
transcription = read_text(str(f))
text.append(tokenizer.encode(transcription))
(self.file_list, self.text) = zip(*[(f_name, txt) for (f_name, txt) in sorted(zip(file_list, text), reverse=(not ascending), key=(lambda x: len(x[1])))])
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
class SnipsDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, num_workers=12, ascending=False, **kwargs):
self.path = path
self.bucket_size = bucket_size
self.speaker_list = (kwargs[f'{split}_speakers'] if (type(split) == str) else kwargs[f'{split[0]}_speakers'])
transcripts_file = open(join(self.path, ('all.iob.snips.txt' if ('-slot' in tokenizer.token_type) else 'all-trans.txt'))).readlines()
transcripts = {}
for line in transcripts_file:
line = line.strip().split(' ')
index = line[0]
sent = ' '.join(line[1:])
transcripts[index] = sent
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob('*.wav'))
new_list = []
uf = 0
for i in trange(len(split_list), desc='checking files'):
uid = str(split_list[i]).split('/')[(- 1)].split('.wav', 1)[0].split('/')[(- 1)]
if (uid in transcripts):
for spk in self.speaker_list:
if (uid[:len(spk)] == spk):
new_list.append(split_list[i])
break
else:
print(split_list[i], 'Not Found')
uf += 1
print(('%d wav file with label not found in text file!' % uf))
split_list = new_list
print(f'loaded audio from {len(self.speaker_list)} speakers {str(self.speaker_list)} with {len(split_list)} examples.')
assert (len(split_list) > 0), 'No data found @ {}'.format(join(path, s))
file_list += split_list
text = [transcripts[str(f).split('.wav', 1)[0].split('/')[(- 1)]] for f in file_list]
text = [tokenizer.encode(txt) for txt in tqdm(text, desc='tokenizing')]
(self.file_list, self.text) = zip(*[(f_name, txt) for (f_name, txt) in sorted(zip(file_list, text), reverse=(not ascending), key=(lambda x: len(x[1])))])
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list)
|
def collect_audio_batch(batch, split, half_batch_size_wav_len=300000):
'Collects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...]\n '
def audio_reader(filepath):
(wav, sample_rate) = torchaudio.load(filepath)
return wav.reshape((- 1))
if (type(batch[0]) is not tuple):
batch = batch[0]
first_len = audio_reader(str(batch[0][0])).size(0)
if (split == 'train'):
if ((first_len > half_batch_size_wav_len) and (len(batch) > 1)):
batch = batch[:(len(batch) // 2)]
(file, audio_feat, audio_len, text) = ([], [], [], [])
with torch.no_grad():
for b in batch:
file.append(str(b[0]).split('/')[(- 1)].split('.')[0])
feat = audio_reader(str(b[0])).numpy()
audio_feat.append(feat)
audio_len.append(len(feat))
text.append(torch.LongTensor(b[1]).numpy())
(audio_len, file, audio_feat, text) = zip(*[(feat_len, f_name, feat, txt) for (feat_len, f_name, feat, txt) in sorted(zip(audio_len, file, audio_feat, text), reverse=True, key=(lambda x: x[0]))])
return (audio_feat, text, file)
|
def create_dataset(split, tokenizer, name, bucketing, batch_size, **kwargs):
' Interface for creating all kinds of dataset'
if (name.lower() == 'librispeech'):
from .corpus.librispeech import LibriDataset as Dataset
elif (name.lower() == 'snips'):
from .corpus.snips import SnipsDataset as Dataset
elif (name.lower() == 'libriphone'):
from .corpus.libriphone import LibriPhoneDataset as Dataset
elif (name.lower() in {'common_voice', 'sbcsae'}):
from .corpus.common_voice import CommonVoiceDataset as Dataset
else:
raise NotImplementedError
if (split == 'train'):
kwargs['ratio'] = 1.0
kwargs['offset'] = 0
loader_bs = (1 if bucketing else batch_size)
bucket_size = (batch_size if bucketing else 1)
dataset = Dataset(kwargs['train'], tokenizer, bucket_size, **kwargs)
else:
loader_bs = EVAL_BATCH_SIZE
dataset = Dataset(kwargs[split], tokenizer, 1, **kwargs)
return (dataset, loader_bs)
|
def load_dataset(split, tokenizer, corpus):
' Prepare dataloader for training/validation'
num_workers = corpus.pop('num_workers', 12)
(dataset, loader_bs) = create_dataset(split, tokenizer, num_workers=num_workers, **corpus)
collate_fn = partial(collect_audio_batch, split=split)
if (split == 'train'):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
dataloader = DataLoader(dataset, batch_size=loader_bs, shuffle=(sampler is None), sampler=sampler, collate_fn=collate_fn, num_workers=num_workers)
else:
dataloader = DataLoader(dataset, batch_size=loader_bs, shuffle=False, collate_fn=collate_fn, num_workers=num_workers)
return dataloader
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.expdir = expdir
self.upstream_dim = upstream_dim
self.corpus = downstream_expert['corpus']
self.tokenizer = load_text_encoder(**downstream_expert['text'])
modelrc = downstream_expert['model']
self.projector = nn.Linear(upstream_dim, modelrc['project_dim'])
model_select = downstream_expert['model']['select']
self.model = eval(model_select)(modelrc['project_dim'], self.tokenizer.vocab_size, upstream_rate=upstream_rate, **modelrc.get(model_select, {}))
self.objective = nn.CTCLoss(blank=self.tokenizer.pad_idx, zero_infinity=modelrc['zero_infinity'])
self.save_best_on = downstream_expert.get('save_best_on', 'dev')
self.metrics = downstream_expert['metric']
self.metric_higher_better = downstream_expert['metric_higher_better']
self.register_buffer('best_score', (torch.ones(1) * (0 if self.metric_higher_better else (1 << 31))))
def _get_task_name(self):
return f"ctc-{self.corpus['name'].lower()}"
def get_dataloader(self, split):
return load_dataset(split, self.tokenizer, self.corpus)
def forward(self, split, features, labels, filenames, records, **kwargs):
device = features[0].device
labels = [torch.LongTensor(label) for label in labels]
features_len = torch.IntTensor([len(feat) for feat in features])
labels_len = torch.IntTensor([len(label) for label in labels])
features = pad_sequence(features, batch_first=True)
labels = pad_sequence(labels, batch_first=True, padding_value=self.tokenizer.pad_idx).to(device=device)
features = self.projector(features)
(logits, log_probs_len) = self.model(features, features_len)
log_probs = nn.functional.log_softmax(logits, dim=(- 1))
loss = self.objective(log_probs.transpose(0, 1), labels, log_probs_len, labels_len)
records['loss'].append(loss.item())
pred_tokens = log_probs.argmax(dim=(- 1))
filtered_tokens = []
for pred_token in pred_tokens:
pred_token = pred_token.unique_consecutive()
filtered_token = [token for token in pred_token.tolist() if ((token != self.tokenizer.pad_idx) and (token != self.tokenizer.eos_idx))]
filtered_tokens.append(filtered_token)
hypothesis = [self.tokenizer.decode(h) for h in filtered_tokens]
groundtruth = [self.tokenizer.decode(g.tolist()) for g in labels]
records['hypothesis'] += hypothesis
records['groundtruth'] += groundtruth
records['filename'] += filenames
return loss
def log_records(self, split, records, logger, global_step, **kwargs):
loss = torch.FloatTensor(records['loss']).mean().item()
results = {'loss': loss}
for metric in self.metrics:
results[metric] = eval(metric)(hypothesis=records['hypothesis'], groundtruth=records['groundtruth'])
save_names = []
for (key, value) in results.items():
print(f'{split} {key}: {value}')
logger.add_scalar(f'{self._get_task_name()}/{split}-{key}', value, global_step=global_step)
if (key == self.metrics[0]):
save_criterion = ((value > self.best_score) if self.metric_higher_better else (value < self.best_score))
if ((split in self.save_best_on) and save_criterion):
self.best_score = (torch.ones(1) * value)
save_names.append(f'{split}-best.ckpt')
if (('test' in split) or ('dev' in split)):
hyp_ark = open(os.path.join(self.expdir, f'{split}-hyp.ark'), 'w')
ref_ark = open(os.path.join(self.expdir, f'{split}-ref.ark'), 'w')
for (filename, hyp, ref) in zip(records['filename'], records['hypothesis'], records['groundtruth']):
hyp_ark.write(f'''{filename} {hyp}
''')
ref_ark.write(f'''{filename} {ref}
''')
hyp_ark.close()
ref_ark.close()
return save_names
|
def cer(hypothesis, groundtruth, **kwargs):
err = 0
tot = 0
for (p, t) in zip(hypothesis, groundtruth):
err += float(ed.eval(p, t))
tot += len(t)
return (err / tot)
|
def per(*args, **kwargs):
return wer(*args, **kwargs)
|
def wer(hypothesis, groundtruth, **kwargs):
err = 0
tot = 0
for (p, t) in zip(hypothesis, groundtruth):
p = p.split(' ')
t = t.split(' ')
err += float(ed.eval(p, t))
tot += len(t)
return (err / tot)
|
def clean(ref):
ref = re.sub('B\\-(\\S+) ', '', ref)
ref = re.sub(' E\\-(\\S+)', '', ref)
return ref
|
def parse(hyp, ref):
gex = re.compile('B\\-(\\S+) (.+?) E\\-\\1')
hyp = re.sub(' +', ' ', hyp)
ref = re.sub(' +', ' ', ref)
hyp_slots = gex.findall(hyp)
ref_slots = gex.findall(ref)
ref_slots = ';'.join([':'.join([x[1], x[0]]) for x in ref_slots])
if (len(hyp_slots) > 0):
hyp_slots = ';'.join([':'.join([clean(x[1]), x[0]]) for x in hyp_slots])
else:
hyp_slots = ''
ref = clean(ref)
hyp = clean(hyp)
return (ref, hyp, ref_slots, hyp_slots)
|
def slot_type_f1(hypothesis, groundtruth, **kwargs):
F1s = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
if ((len(hyp_dict.keys()) == 0) and (len(ref_dict.keys()) == 0)):
F1 = 1.0
elif (len(hyp_dict.keys()) == 0):
F1 = 0.0
elif (len(ref_dict.keys()) == 0):
F1 = 0.0
else:
(P, R) = (0.0, 0.0)
for slot in ref_dict:
if (slot in hyp_dict):
R += 1
R = (R / len(ref_dict.keys()))
for slot in hyp_dict:
if (slot in ref_dict):
P += 1
P = (P / len(hyp_dict.keys()))
F1 = ((((2 * P) * R) / (P + R)) if ((P + R) > 0) else 0.0)
F1s.append(F1)
return (sum(F1s) / len(F1s))
|
def slot_value_cer(hypothesis, groundtruth, **kwargs):
value_hyps = []
value_refs = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_cer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_cer = cer([hyp_v], [ref_v])
if (min_cer > tmp_cer):
min_cer = tmp_cer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return cer(value_hyps, value_refs)
|
def slot_value_wer(hypothesis, groundtruth, **kwargs):
value_hyps = []
value_refs = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_wer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_wer = wer([hyp_v], [ref_v])
if (min_wer > tmp_wer):
min_wer = tmp_wer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return wer(value_hyps, value_refs)
|
def slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot, **kwargs):
(test_case, TPs, FNs, FPs) = ([], 0, 0, 0)
slot2F1 = {}
for (p, t) in zip(hypothesis, groundtruth):
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(p, t)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
unique_slots = list(ref_dict.keys())
if loop_over_all_slot:
unique_slots += [x for x in hyp_dict if (x not in ref_dict)]
for slot in unique_slots:
TP = 0
FP = 0
FN = 0
if (slot not in ref_dict):
for hyp_v in hyp_dict[slot]:
FP += 1
else:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
FN += 1
else:
match = False
for hyp_v in hyp_dict[slot]:
if (hyp_v == ref_v):
match = True
break
if match:
TP += 1
else:
FN += 1
FP += 1
slot2F1.setdefault(slot, [0, 0, 0])
slot2F1[slot][0] += TP
slot2F1[slot][1] += FN
slot2F1[slot][2] += FP
(all_TPs, all_FNs, all_FPs) = (0, 0, 0)
for slot in slot2F1.keys():
all_TPs += slot2F1[slot][0]
all_FNs += slot2F1[slot][1]
all_FPs += slot2F1[slot][2]
return (((100.0 * 2) * all_TPs) / (((2 * all_TPs) + all_FPs) + all_FNs))
|
def slot_edit_f1_full(hypothesis, groundtruth, **kwargs):
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=True, **kwargs)
|
def slot_edit_f1_part(hypothesis, groundtruth, **kwargs):
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=False, **kwargs)
|
class _BaseTextEncoder(abc.ABC):
@abc.abstractmethod
def encode(self, s):
raise NotImplementedError
@abc.abstractmethod
def decode(self, ids, ignore_repeat=False):
raise NotImplementedError
@abc.abstractproperty
def vocab_size(self):
raise NotImplementedError
@abc.abstractproperty
def token_type(self):
raise NotImplementedError
@abc.abstractclassmethod
def load_from_file(cls, vocab_file):
raise NotImplementedError
@property
def pad_idx(self):
return 0
@property
def eos_idx(self):
return 1
@property
def unk_idx(self):
return 2
def __repr__(self):
return '<{} vocab_size={}>'.format(type(self).__name__, self.vocab_size)
|
class CharacterTextEncoder(_BaseTextEncoder):
def __init__(self, vocab_list):
self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list)
self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)}
def encode(self, s):
s = s.strip('\r\n ')
return ([self.vocab_to_idx(v) for v in s] + [self.eos_idx])
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
elif (idx == self.eos_idx):
break
else:
vocabs.append(v)
return ''.join(vocabs)
@classmethod
def load_from_file(cls, vocab_file):
with open(vocab_file, 'r') as f:
vocab_list = [line.strip('\r\n') for line in f]
return cls(vocab_list)
@property
def vocab_size(self):
return len(self._vocab_list)
@property
def token_type(self):
return 'character'
def vocab_to_idx(self, vocab):
return self._vocab2idx.get(vocab, self.unk_idx)
def idx_to_vocab(self, idx):
return self._vocab_list[idx]
|
class CharacterTextSlotEncoder(_BaseTextEncoder):
def __init__(self, vocab_list, slots):
self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list)
self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)}
self.slots = slots
self.slot2id = {self.slots[i]: (i + len(self._vocab_list)) for i in range(len(self.slots))}
self.id2slot = {(i + len(self._vocab_list)): self.slots[i] for i in range(len(self.slots))}
def encode(self, s):
(sent, iobs) = s.strip('\r\n ').split('\t')
sent = sent.split(' ')[1:(- 1)]
iobs = iobs.split(' ')[1:(- 1)]
tokens = []
for (i, (wrd, iob)) in enumerate(zip(sent, iobs)):
if (wrd in '?!.,;-'):
continue
if (wrd == '&'):
wrd = 'AND'
if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))):
tokens.append(self.slot2id[('B-' + iob)])
tokens += [self.vocab_to_idx(v) for v in wrd]
if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))):
tokens.append(self.slot2id[('E-' + iob)])
if (i == (len(sent) - 1)):
tokens.append(self.eos_idx)
else:
tokens.append(self.vocab_to_idx(' '))
return tokens
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
elif (idx == self.eos_idx):
break
else:
vocabs.append(v)
return ''.join(vocabs)
@classmethod
def load_from_file(cls, vocab_file, slots_file):
with open(vocab_file, 'r') as f:
vocab_list = [line.strip('\r\n') for line in f]
org_slots = open(slots_file).read().split('\n')
slots = []
for slot in org_slots[1:]:
slots.append(('B-' + slot))
slots.append(('E-' + slot))
return cls(vocab_list, slots)
@property
def vocab_size(self):
return (len(self._vocab_list) + len(self.slots))
@property
def token_type(self):
return 'character-slot'
def vocab_to_idx(self, vocab):
return self._vocab2idx.get(vocab, self.unk_idx)
def idx_to_vocab(self, idx):
idx = int(idx)
if (idx < len(self._vocab_list)):
return self._vocab_list[idx]
else:
token = self.id2slot[idx]
if (token[0] == 'B'):
return (token + ' ')
elif (token[0] == 'E'):
return (' ' + token)
else:
raise ValueError('id2slot get:', token)
|
class SubwordTextEncoder(_BaseTextEncoder):
def __init__(self, spm):
if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)):
raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>')
self.spm = spm
def encode(self, s):
return self.spm.encode_as_ids(s)
def decode(self, idxs, ignore_repeat=False):
crop_idx = []
for (t, idx) in enumerate(idxs):
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
crop_idx.append(idx)
return self.spm.decode_ids(crop_idx)
@classmethod
def load_from_file(cls, filepath):
import sentencepiece as splib
spm = splib.SentencePieceProcessor()
spm.load(filepath)
spm.set_encode_extra_options(':eos')
return cls(spm)
@property
def vocab_size(self):
return len(self.spm)
@property
def token_type(self):
return 'subword'
|
class SubwordTextSlotEncoder(_BaseTextEncoder):
def __init__(self, spm, slots):
if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)):
raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>')
self.spm = spm
self.slots = slots
self.slot2id = {self.slots[i]: (i + len(self.spm)) for i in range(len(self.slots))}
self.id2slot = {(i + len(self.spm)): self.slots[i] for i in range(len(self.slots))}
def encode(self, s):
(sent, iobs) = s.strip().split('\t')
sent = sent.split(' ')[1:(- 1)]
iobs = iobs.split(' ')[1:(- 1)]
tokens = []
for (i, (wrd, iob)) in enumerate(zip(sent, iobs)):
if (wrd in '?!.,;-'):
continue
if (wrd == '&'):
wrd = 'AND'
if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))):
tokens.append(self.slot2id[('B-' + iob)])
tokens += self.spm.encode_as_ids(wrd)[:(- 1)]
if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))):
tokens.append(self.slot2id[('E-' + iob)])
if (tokens[(- 1)] != 1):
tokens.append(1)
return tokens
def decode(self, idxs, ignore_repeat=False):
crop_idx = []
for (t, idx) in enumerate(idxs):
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
crop_idx.append(idx)
sent = []
ret = []
for (i, x) in enumerate(crop_idx):
if (x >= len(self.spm)):
ret.append((self.spm.decode_ids(sent) + [self.id2slot[x]]))
else:
sent.append(x)
return ret
@classmethod
def load_from_file(cls, filepath, slots_file):
import sentencepiece as splib
spm = splib.SentencePieceProcessor()
spm.load(filepath)
spm.set_encode_extra_options(':eos')
org_slots = open(slots_file).read().split('\n')
slots = []
for slot in org_slots[1:]:
slots.append(('B-' + slot))
slots.append(('E-' + slot))
return cls(spm, slots)
@property
def vocab_size(self):
return (len(self.spm) + len(self.slots))
@property
def token_type(self):
return 'subword-slot'
|
class WordTextEncoder(CharacterTextEncoder):
def encode(self, s):
s = s.strip('\r\n ')
words = s.split(' ')
return ([self.vocab_to_idx(v) for v in words] + [self.eos_idx])
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
vocabs.append(v)
return ' '.join(vocabs)
@property
def token_type(self):
return 'word'
|
class BertTextEncoder(_BaseTextEncoder):
'Bert Tokenizer.\n\n https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/tokenization_bert.py\n '
def __init__(self, tokenizer):
self._tokenizer = tokenizer
self._tokenizer.pad_token = '<pad>'
self._tokenizer.eos_token = '<eos>'
self._tokenizer.unk_token = '<unk>'
def encode(self, s):
reduced_idx = []
for idx in self._tokenizer.encode(s):
try:
r_idx = (idx - BERT_FIRST_IDX)
assert (r_idx > 0)
reduced_idx.append(r_idx)
except:
reduced_idx.append(self.unk_idx)
reduced_idx.append(self.eos_idx)
return reduced_idx
def decode(self, idxs, ignore_repeat=False):
crop_idx = []
for (t, idx) in enumerate(idxs):
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
crop_idx.append((idx + BERT_FIRST_IDX))
return self._tokenizer.decode(crop_idx)
@property
def vocab_size(self):
return ((BERT_LAST_IDX - BERT_FIRST_IDX) + 1)
@property
def token_type(self):
return 'bert'
@classmethod
def load_from_file(cls, vocab_file):
from pytorch_transformers import BertTokenizer
return cls(BertTokenizer.from_pretrained(vocab_file))
@property
def pad_idx(self):
return 0
@property
def eos_idx(self):
return 1
@property
def unk_idx(self):
return 2
|
def load_text_encoder(mode, vocab_file, slots_file=None):
if (mode == 'character'):
return CharacterTextEncoder.load_from_file(vocab_file)
elif (mode == 'character-slot'):
return CharacterTextSlotEncoder.load_from_file(vocab_file, slots_file)
elif (mode == 'subword'):
return SubwordTextEncoder.load_from_file(vocab_file)
elif (mode == 'subword-slot'):
return SubwordTextSlotEncoder.load_from_file(vocab_file, slots_file)
elif (mode == 'word'):
return WordTextEncoder.load_from_file(vocab_file)
elif mode.startswith('bert-'):
return BertTextEncoder.load_from_file(mode)
else:
raise NotImplementedError('`{}` is not yet supported.'.format(mode))
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, rnn_layers, hidden_size, **kwargs):
super(Model, self).__init__()
self.use_rnn = (rnn_layers > 0)
if self.use_rnn:
self.rnn = nn.LSTM(input_dim, hidden_size, num_layers=rnn_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, output_class_num)
else:
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
features = features.float()
if self.use_rnn:
(hidden, _) = self.rnn(features)
predicted = self.linear(hidden)
else:
predicted = self.linear(features)
return predicted
|
def get_wav_paths(data_dirs):
wav_paths = find_files(data_dirs)
wav_dict = {}
for wav_path in wav_paths:
wav_name = splitext(basename(wav_path))[0]
start = wav_path.find('Session')
wav_path = wav_path[start:]
wav_dict[wav_name] = wav_path
return wav_dict
|
def preprocess(data_dirs, paths, out_path):
meta_data = []
for path in paths:
wav_paths = get_wav_paths(path_join(data_dirs, path, WAV_DIR_PATH))
label_dir = path_join(data_dirs, path, LABEL_DIR_PATH)
label_paths = list(os.listdir(label_dir))
label_paths = [label_path for label_path in label_paths if (splitext(label_path)[1] == '.txt')]
for label_path in label_paths:
with open(path_join(label_dir, label_path)) as f:
for line in f:
if (line[0] != '['):
continue
line = re.split('[\t\n]', line)
line = list(filter(None, line))
if (line[2] not in ['neu', 'hap', 'ang', 'sad', 'exc']):
continue
if (line[1] not in wav_paths):
continue
meta_data.append({'path': wav_paths[line[1]], 'label': line[2].replace('exc', 'hap'), 'speaker': re.split('_', basename(wav_paths[line[1]]))[0]})
data = {'labels': {'neu': 0, 'hap': 1, 'ang': 2, 'sad': 3}, 'meta_data': meta_data}
with open(out_path, 'w') as f:
json.dump(data, f)
|
def main(data_dir):
'Main function.'
paths = list(os.listdir(data_dir))
paths = [path for path in paths if (path[:7] == 'Session')]
paths.sort()
out_dir = os.path.join(data_dir, 'meta_data')
os.makedirs(out_dir, exist_ok=True)
for (i, path) in enumerate(paths):
os.makedirs(f'{out_dir}/{path}', exist_ok=True)
preprocess(data_dir, (paths[:i] + paths[(i + 1):]), path_join(f'{out_dir}/{path}', 'train_meta_data.json'))
preprocess(data_dir, [path], path_join(f'{out_dir}/{path}', 'test_meta_data.json'))
|
class IEMOCAPDataset(Dataset):
def __init__(self, data_dir, meta_path, pre_load=True):
self.data_dir = data_dir
self.pre_load = pre_load
with open(meta_path, 'r') as f:
self.data = json.load(f)
self.class_dict = self.data['labels']
self.idx2emotion = {value: key for (key, value) in self.class_dict.items()}
self.class_num = len(self.class_dict)
self.meta_data = self.data['meta_data']
(_, origin_sr) = torchaudio.load(path_join(self.data_dir, self.meta_data[0]['path']))
self.resampler = Resample(origin_sr, SAMPLE_RATE)
if self.pre_load:
self.wavs = self._load_all()
def _load_wav(self, path):
(wav, _) = torchaudio.load(path_join(self.data_dir, path))
wav = self.resampler(wav).squeeze(0)
return wav
def _load_all(self):
wavforms = []
for info in self.meta_data:
wav = self._load_wav(info['path'])
wavforms.append(wav)
return wavforms
def __getitem__(self, idx):
label = self.meta_data[idx]['label']
label = self.class_dict[label]
if self.pre_load:
wav = self.wavs[idx]
else:
wav = self._load_wav(self.meta_data[idx]['path'])
return (wav.numpy(), label, Path(self.meta_data[idx]['path']).stem)
def __len__(self):
return len(self.meta_data)
|
def collate_fn(samples):
return zip(*samples)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
DATA_ROOT = self.datarc['root']
meta_data = self.datarc['meta_data']
self.fold = (self.datarc.get('test_fold') or kwargs.get('downstream_variant'))
if (self.fold is None):
self.fold = 'fold1'
print(f'[Expert] - using the testing fold: "{self.fold}". Ps. Use -o config.downstream_expert.datarc.test_fold=fold2 to change test_fold in config.')
train_path = os.path.join(meta_data, self.fold.replace('fold', 'Session'), 'train_meta_data.json')
print(f'[Expert] - Training path: {train_path}')
test_path = os.path.join(meta_data, self.fold.replace('fold', 'Session'), 'test_meta_data.json')
print(f'[Expert] - Testing path: {test_path}')
dataset = IEMOCAPDataset(DATA_ROOT, train_path, self.datarc['pre_load'])
trainlen = int(((1 - self.datarc['valid_ratio']) * len(dataset)))
lengths = [trainlen, (len(dataset) - trainlen)]
torch.manual_seed(0)
(self.train_dataset, self.dev_dataset) = random_split(dataset, lengths)
self.test_dataset = IEMOCAPDataset(DATA_ROOT, test_path, self.datarc['pre_load'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(input_dim=self.modelrc['projector_dim'], output_dim=dataset.class_num, **model_conf)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.register_buffer('best_score', torch.zeros(1))
def get_downstream_name(self):
return self.fold.replace('fold', 'emotion')
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, filenames, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['loss'].append(loss.item())
records['filename'] += filenames
records['predict'] += [self.test_dataset.idx2emotion[idx] for idx in predicted_classid.cpu().tolist()]
records['truth'] += [self.test_dataset.idx2emotion[idx] for idx in labels.cpu().tolist()]
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ['acc', 'loss']:
values = records[key]
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(f'emotion-{self.fold}/{mode}-{key}', average, global_step=global_step)
with open((Path(self.expdir) / 'log.log'), 'a') as f:
if (key == 'acc'):
print(f'{mode} {key}: {average}')
f.write(f'''{mode} at step {global_step}: {average}
''')
if ((mode == 'dev') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
f.write(f'''New best on {mode} at step {global_step}: {average}
''')
save_names.append(f'{mode}-best.ckpt')
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_{self.fold}_predict.txt'), 'w') as file:
line = [f'''{f} {e}
''' for (f, e) in zip(records['filename'], records['predict'])]
file.writelines(line)
with open((Path(self.expdir) / f'{mode}_{self.fold}_truth.txt'), 'w') as file:
line = [f'''{f} {e}
''' for (f, e) in zip(records['filename'], records['truth'])]
file.writelines(line)
return save_names
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling\n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask=None):
'\n N: batch size, T: sequence length, H: Hidden dimension\n input:\n batch_rep : size (N, T, H)\n attention_weight:\n att_w : size (N, T, 1)\n return:\n utter_rep: size (N, H)\n '
att_logits = self.W(batch_rep).squeeze((- 1))
if (att_mask is not None):
att_logits = (att_mask + att_logits)
att_w = self.softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class CNNSelfAttention(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, padding, pooling, dropout, output_class_num, **kwargs):
super(CNNSelfAttention, self).__init__()
self.model_seq = nn.Sequential(nn.AvgPool1d(kernel_size, pooling, padding), nn.Dropout(p=dropout), nn.Conv1d(input_dim, hidden_dim, kernel_size, padding=padding), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(hidden_dim, hidden_dim, kernel_size, padding=padding), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(hidden_dim, hidden_dim, kernel_size, padding=padding))
self.pooling = SelfAttentionPooling(hidden_dim)
self.out_layer = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, output_class_num))
def forward(self, features, att_mask):
features = features.transpose(1, 2)
features = self.model_seq(features)
out = features.transpose(1, 2)
out = self.pooling(out, att_mask).squeeze((- 1))
predicted = self.out_layer(out)
return predicted
|
class FCN(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, padding, pooling, dropout, output_class_num, **kwargs):
super(FCN, self).__init__()
self.model_seq = nn.Sequential(nn.Conv1d(input_dim, 96, 11, stride=4, padding=5), nn.LocalResponseNorm(96), nn.ReLU(), nn.MaxPool1d(3, 2), nn.Dropout(p=dropout), nn.Conv1d(96, 256, 5, padding=2), nn.LocalResponseNorm(256), nn.ReLU(), nn.MaxPool1d(3, 2), nn.Dropout(p=dropout), nn.Conv1d(256, 384, 3, padding=1), nn.LocalResponseNorm(384), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(384, 384, 3, padding=1), nn.LocalResponseNorm(384), nn.ReLU(), nn.Conv1d(384, 256, 3, padding=1), nn.LocalResponseNorm(256), nn.MaxPool1d(3, 2))
self.pooling = SelfAttentionPooling(256)
self.out_layer = nn.Sequential(nn.Linear(256, 256), nn.ReLU(), nn.Linear(256, output_class_num))
def forward(self, features, att_mask):
features = features.transpose(1, 2)
features = self.model_seq(features)
out = features.transpose(1, 2)
out = self.pooling(out).squeeze((- 1))
predicted = self.out_layer(out)
return predicted
|
class DeepNet(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, padding, pooling, dropout, output_class_num, **kwargs):
super(DeepNet, self).__init__()
self.model_seq = nn.Sequential(nn.Conv1d(input_dim, 10, 9), nn.ReLU(), nn.Conv1d(10, 10, 5), nn.ReLU(), nn.Conv1d(10, 10, 3), nn.MaxPool1d(3, 1), nn.BatchNorm1d(10, affine=False), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(10, 40, 3), nn.ReLU(), nn.Conv1d(40, 40, 3), nn.MaxPool1d(2, 1), nn.BatchNorm1d(40, affine=False), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(40, 80, 10), nn.ReLU(), nn.Conv1d(80, 80, 1), nn.MaxPool1d(2, 1), nn.BatchNorm1d(80, affine=False), nn.ReLU(), nn.Dropout(p=dropout), nn.Conv1d(80, 80, 1))
self.pooling = SelfAttentionPooling(80)
self.out_layer = nn.Sequential(nn.Linear(80, 30), nn.ReLU(), nn.Linear(30, output_class_num))
def forward(self, features, att_mask):
features = features.transpose(1, 2)
features = self.model_seq(features)
out = features.transpose(1, 2)
out = self.pooling(out).squeeze((- 1))
predicted = self.out_layer(out)
return predicted
|
class DeepModel(nn.Module):
def __init__(self, input_dim, output_dim, model_type, pooling, **kwargs):
super(DeepModel, self).__init__()
self.pooling = pooling
self.model = eval(model_type)(input_dim=input_dim, output_class_num=output_dim, pooling=pooling, **kwargs)
def forward(self, features, features_len):
attention_mask = [torch.ones(math.ceil((l / self.pooling))) for l in features_len]
attention_mask = pad_sequence(attention_mask, batch_first=True)
attention_mask = ((1.0 - attention_mask) * (- 100000.0))
attention_mask = attention_mask.to(features.device)
predicted = self.model(features, attention_mask)
return (predicted, None)
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
"\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n LibriMix offeres different options for the users. For\n clean source separation, src=['mix_clean'] and tgt=['s1', 's2'].\n Please see https://github.com/JorisCos/LibriMix for details\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n "
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 1))
cond_list = ['s1', 's2', 'noise', 'mix_clean', 'mix_both', 'mix_single', 'noisy', 'clean']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.nn.functional.sigmoid, 'tanh': torch.nn.functional.tanh}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def main():
output_dir = '{}/wav{}/{}'.format(args.tgt_dir, args.sample_rate, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
if ((args.part == 'train') or (args.part == 'dev')):
dset = 'trainset_28spk_wav_16k'
elif (args.part == 'test'):
dset = 'testset_wav_16k'
for cond in ['clean', 'noisy']:
wav_dir = '{}/{}_{}'.format(args.src_dir, cond, dset)
filelist = [f for f in os.listdir(wav_dir) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
if (uttname.startswith('p226') or uttname.startswith('p287')):
if (args.part == 'train'):
continue
elif (args.part == 'dev'):
continue
wav_scp_file.write('{} {}/{}\n'.format(uttname, wav_dir, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['noisy'], tgt=['clean'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
'\n Args:\n data_dir (str):\n prepared data directory\n\n rate (int):\n audio sample rate\n\n src and tgt (list(str)):\n the input and desired output.\n\n n_fft (int):\n length of the windowed signal after padding with zeros.\n\n hop_length (int):\n number of audio samples between adjacent STFT columns.\n\n win_length (int):\n length of window for each frame\n\n window (str):\n type of window function, only support Hann window now\n\n center (bool):\n whether to pad input on both sides so that the\n t-th frame is centered at time t * hop_length\n\n The STFT related parameters are the same as librosa.\n '
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 1))
cond_list = ['noisy', 'clean']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
'\n source_wav_list (list(tensor)):\n list of audio samples for the source\n\n uttname_list (list(str)):\n list of utterance names\n\n source_attr (dict):\n dictionary containing magnitude and phase information for the sources\n\n source_wav (tensor):\n padded version of source_wav_list, with size [bs, max_T]\n\n target_attr (dict):\n dictionary containing magnitude and phase information for the targets\n\n feat_length (tensor):\n length of the STFT feature for each utterance\n\n wav_length (tensor):\n number of samples in each utterance\n '
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length)
|
class SepRNN(torch.nn.Module):
def __init__(self, input_dim, num_bins, rnn='lstm', num_spks=2, num_layers=3, hidden_size=896, dropout=0.0, non_linear='relu', bidirectional=True):
super(SepRNN, self).__init__()
if (non_linear not in ['relu', 'sigmoid', 'tanh', 'none']):
raise ValueError('Unsupported non-linear type:{}'.format(non_linear))
self.num_spks = num_spks
rnn = rnn.upper()
if (rnn not in ['RNN', 'LSTM', 'GRU']):
raise ValueError('Unsupported rnn type: {}'.format(rnn))
self.rnn = getattr(torch.nn, rnn)(input_dim, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.drops = torch.nn.Dropout(p=dropout)
self.linear = torch.nn.ModuleList([torch.nn.Linear(((hidden_size * 2) if bidirectional else hidden_size), num_bins) for _ in range(self.num_spks)])
self.non_linear = {'relu': torch.nn.functional.relu, 'sigmoid': torch.sigmoid, 'tanh': torch.nn.functional.tanh, 'none': torch.nn.Identity()}[non_linear]
self.num_bins = num_bins
def forward(self, x, train=True):
assert isinstance(x, PackedSequence)
(x, _) = self.rnn(x)
(x, len_x) = pad_packed_sequence(x, batch_first=True)
x = self.drops(x)
m = []
for linear in self.linear:
y = linear(x)
y = self.non_linear(y)
if (not train):
y = y.view((- 1), self.num_bins)
m.append(y)
return m
|
def main():
output_dir = '{}/wav{}/{}/{}'.format(args.tgt_dir, args.sample_rate, args.mode, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
wav_dir = '{}/wav{}/{}/{}'.format(args.src_dir, args.sample_rate, args.mode, args.part)
assert os.path.exists(wav_dir)
for cond in ['s1', 's2', 'mix_clean', 'mix_both', 'mix_single', 'noise']:
filelist = [f for f in os.listdir('{}/{}'.format(wav_dir, cond)) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
wav_scp_file.write('{} {}/{}/{}\n'.format(uttname, wav_dir, cond, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
def main():
output_dir = '{}/wav{}/{}'.format(args.tgt_dir, args.sample_rate, args.part)
if os.path.exists(output_dir):
raise ValueError('Warning: {} already exists, please check!')
else:
os.makedirs(output_dir)
if ((args.part == 'train') or (args.part == 'dev')):
dset = 'trainset_28spk_wav_16k'
elif (args.part == 'test'):
dset = 'testset_wav_16k'
for cond in ['clean', 'noisy']:
wav_dir = '{}/{}_{}'.format(args.src_dir, cond, dset)
filelist = [f for f in os.listdir(wav_dir) if f.endswith('.wav')]
filelist.sort()
cond_dir = '{}/{}'.format(output_dir, cond)
if (not os.path.exists(cond_dir)):
os.makedirs(cond_dir)
wav_scp_file = open('{}/wav.scp'.format(cond_dir), 'w')
utt2spk_file = open('{}/utt2spk'.format(cond_dir), 'w')
for f in filelist:
uttname = f.strip('.wav')
if (uttname.startswith('p226') or uttname.startswith('p287')):
if (args.part == 'train'):
continue
elif (args.part == 'dev'):
continue
wav_scp_file.write('{} {}/{}\n'.format(uttname, wav_dir, f))
utt2spk_file.write('{} {}\n'.format(uttname, uttname))
wav_scp_file.close()
utt2spk_file.close()
shutil.copyfile('{}/utt2spk'.format(cond_dir), '{}/spk2utt'.format(cond_dir))
return 0
|
class RandomDataset(Dataset):
def __init__(self, **kwargs):
self.class_num = 48
def __getitem__(self, idx):
samples = random.randint((EXAMPLE_WAV_MIN_SEC * SAMPLE_RATE), (EXAMPLE_WAV_MAX_SEC * SAMPLE_RATE))
wav = torch.randn(samples)
label = random.randint(0, (self.class_num - 1))
return (wav, label)
def __len__(self):
return EXAMPLE_DATASET_SIZE
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, upstream_rate, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n upstream_rate: int\n 160: for upstream with 10 ms per frame\n 320: for upstream with 20 ms per frame\n \n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/example/config.yaml\n\n expdir: string\n The expdir from command-line argument, you should save all results into\n this directory, like some logging files.\n\n **kwargs: dict\n All the arguments specified by the argparser in run_downstream.py\n and all the other fields in config.yaml, in case you need it.\n \n Note1. Feel free to add new argument for __init__ as long as it is\n a command-line argument or a config field. You can check the constructor\n code in downstream/runner.py\n "
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
self.train_dataset = RandomDataset(**self.datarc)
self.dev_dataset = RandomDataset(**self.datarc)
self.test_dataset = RandomDataset(**self.datarc)
self.connector = nn.Linear(upstream_dim, self.modelrc['input_dim'])
self.model = Model(output_class_num=self.train_dataset.class_num, **self.modelrc)
self.objective = nn.CrossEntropyLoss()
self.register_buffer('best_score', torch.zeros(1))
def get_dataloader(self, split, epoch: int=0):
"\n Args:\n split: string\n 'train'\n will always be called before the training loop\n\n 'dev', 'test', or more\n defined by the 'eval_dataloaders' field in your downstream config\n these will be called before the evaluation loops during the training loop\n\n Return:\n a torch.utils.data.DataLoader returning each batch in the format of:\n\n [wav1, wav2, ...], your_other_contents1, your_other_contents2, ...\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio\n "
if (split == 'train'):
return self._get_train_dataloader(self.train_dataset, epoch)
elif (split == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (split == 'test'):
return self._get_eval_dataloader(self.test_dataset)
def _get_train_dataloader(self, dataset, epoch: int):
from s3prl.utility.data import get_ddp_sampler
sampler = get_ddp_sampler(dataset, epoch)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def forward(self, split, features, your_other_contents1, records, **kwargs):
"\n Args:\n split: string\n 'train'\n when the forward is inside the training loop\n\n 'dev', 'test' or more\n when the forward is inside the evaluation loop\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n your_other_contents1, ... :\n in the order defined by your dataloader (dataset + collate_fn)\n these are all in cpu, and you can move them to the same device\n as features\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records (also customized by you)\n\n Note1. downstream/runner.py will call self.log_records\n 1. every `log_step` during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n "
features = pad_sequence(features, batch_first=True)
features = self.connector(features)
predicted = self.model(features)
utterance_labels = your_other_contents1
labels = torch.LongTensor(utterance_labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['loss'].append(loss.item())
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
return loss
def log_records(self, split, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
"\n Args:\n split: string\n 'train':\n records and batchids contain contents for `log_step` batches\n `log_step` is defined in your downstream config\n eg. downstream/example/config.yaml\n\n 'dev', 'test' or more:\n records and batchids contain contents for the entire evaluation dataset\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{your_task_name}/{split}-{key}' as key name to log your contents,\n preventing conflict with the logging of other tasks\n\n global_step:\n The global_step when training, which is helpful for Tensorboard logging\n\n batch_ids:\n The batches contained in records when enumerating over the dataloader\n\n total_batch_num:\n The total amount of batches in the dataloader\n \n Return:\n a list of string\n Each string is a filename we wish to use to save the current model\n according to the evaluation result, like the best.ckpt on the dev set\n You can return nothing or an empty list when no need to save the checkpoint\n "
save_names = []
for (key, values) in records.items():
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(f'example/{split}-{key}', average, global_step=global_step)
if ((split == 'dev') and (key == 'acc') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
save_names.append(f'{split}-best.ckpt')
return save_names
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
pooled = features.mean(dim=1)
predicted = self.linear(pooled)
return predicted
|
class FluentCommandsDataset(Dataset):
def __init__(self, df, base_path, Sy_intent):
self.df = df
self.base_path = base_path
self.max_length = (SAMPLE_RATE * EXAMPLE_WAV_MAX_SEC)
self.Sy_intent = Sy_intent
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
wav_path = os.path.join(self.base_path, self.df.loc[idx].path)
(wav, sr) = torchaudio.load(wav_path)
wav = wav.squeeze(0)
label = []
for slot in ['action', 'object', 'location']:
value = self.df.loc[idx][slot]
label.append(self.Sy_intent[slot][value])
return (wav.numpy(), np.array(label), Path(wav_path).stem)
def collate_fn(self, samples):
return zip(*samples)
|
def get_downstream_model(input_dim, output_dim, config):
model_cls = eval(config['select'])
model_conf = config.get(config['select'], {})
model = model_cls(input_dim, output_dim, **model_conf)
return model
|
class FrameLevel(nn.Module):
def __init__(self, input_dim, output_dim, hiddens=None, activation='ReLU', **kwargs):
super().__init__()
latest_dim = input_dim
self.hiddens = []
if (hiddens is not None):
for dim in hiddens:
self.hiddens += [nn.Linear(latest_dim, dim), getattr(nn, activation)()]
latest_dim = dim
self.hiddens = nn.Sequential(*self.hiddens)
self.linear = nn.Linear(latest_dim, output_dim)
def forward(self, hidden_state, features_len=None):
hidden_state = self.hiddens(hidden_state)
logit = self.linear(hidden_state)
return (logit, features_len)
|
class UtteranceLevel(nn.Module):
def __init__(self, input_dim, output_dim, pooling='MeanPooling', activation='ReLU', pre_net=None, post_net={'select': 'FrameLevel'}, **kwargs):
super().__init__()
latest_dim = input_dim
self.pre_net = (get_downstream_model(latest_dim, latest_dim, pre_net) if isinstance(pre_net, dict) else None)
self.pooling = eval(pooling)(input_dim=latest_dim, activation=activation)
self.post_net = get_downstream_model(latest_dim, output_dim, post_net)
def forward(self, hidden_state, features_len=None):
if (self.pre_net is not None):
(hidden_state, features_len) = self.pre_net(hidden_state, features_len)
(pooled, features_len) = self.pooling(hidden_state, features_len)
(logit, features_len) = self.post_net(pooled, features_len)
return (logit, features_len)
|
class MeanPooling(nn.Module):
def __init__(self, **kwargs):
super(MeanPooling, self).__init__()
def forward(self, feature_BxTxH, features_len, **kwargs):
' \n Arguments\n feature_BxTxH - [BxTxH] Acoustic feature with shape \n features_len - [B] of feature length\n '
agg_vec_list = []
for i in range(len(feature_BxTxH)):
agg_vec = torch.mean(feature_BxTxH[i][:features_len[i]], dim=0)
agg_vec_list.append(agg_vec)
return (torch.stack(agg_vec_list), torch.ones(len(feature_BxTxH)).long())
|
class AttentivePooling(nn.Module):
' Attentive Pooling module incoporate attention mask'
def __init__(self, input_dim, activation, **kwargs):
super(AttentivePooling, self).__init__()
self.sap_layer = AttentivePoolingModule(input_dim, activation)
def forward(self, feature_BxTxH, features_len):
' \n Arguments\n feature_BxTxH - [BxTxH] Acoustic feature with shape \n features_len - [B] of feature length\n '
device = feature_BxTxH.device
len_masks = torch.lt(torch.arange(features_len.max()).unsqueeze(0).to(device), features_len.unsqueeze(1))
(sap_vec, _) = self.sap_layer(feature_BxTxH, len_masks)
return (sap_vec, torch.ones(len(feature_BxTxH)).long())
|
class AttentivePoolingModule(nn.Module):
'\n Implementation of Attentive Pooling \n '
def __init__(self, input_dim, activation='ReLU', **kwargs):
super(AttentivePoolingModule, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = getattr(nn, activation)()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
'\n input:\n batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension\n \n attention_weight:\n att_w : size (B, T, 1)\n \n return:\n utter_rep: size (B, H)\n '
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze((- 1))
att_logits = (att_mask + att_logits)
att_w = self.softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return (utter_rep, att_w)
|
class VCC18SegmentalDataset(Dataset):
def __init__(self, dataframe, base_path, idtable='', valid=False):
self.base_path = Path(base_path)
self.dataframe = dataframe
self.segments_durations = 1
if Path.is_file(idtable):
self.idtable = torch.load(idtable)
for (i, judge_i) in enumerate(self.dataframe['JUDGE']):
self.dataframe['JUDGE'][i] = self.idtable[judge_i]
elif (not valid):
self.gen_idtable(idtable)
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
(wav_name, mean, mos, judge_id) = self.dataframe.loc[idx]
wav_path = ((self.base_path / 'Converted_speech_of_submitted_systems') / wav_name)
(wav, _) = apply_effects_file(str(wav_path), [['channels', '1'], ['rate', '16000'], ['norm']])
wav = wav.view((- 1))
wav_segments = unfold_segments(wav, self.segments_durations)
system_name = (wav_name[:3] + wav_name[(- 8):(- 4)])
return (wav_segments, mean, system_name, mos, judge_id)
def collate_fn(self, samples):
(wavs_segments, means, system_names, moss, judge_ids) = zip(*samples)
flattened_wavs_segments = [wav_segment for wav_segments in wavs_segments for wav_segment in wav_segments]
wav_segments_lengths = [len(wav_segments) for wav_segments in wavs_segments]
prefix_sums = list(accumulate(wav_segments_lengths, initial=0))
segment_judge_ids = []
for i in range((len(prefix_sums) - 1)):
segment_judge_ids.extend(([judge_ids[i]] * (prefix_sums[(i + 1)] - prefix_sums[i])))
return (torch.stack(flattened_wavs_segments), prefix_sums, torch.FloatTensor(means), system_names, torch.FloatTensor(moss), torch.LongTensor(segment_judge_ids))
def gen_idtable(self, idtable_path):
if (idtable_path == ''):
idtable_path = './idtable.pkl'
self.idtable = {}
count = 0
for (i, judge_i) in enumerate(self.dataframe['JUDGE']):
if (judge_i not in self.idtable.keys()):
self.idtable[judge_i] = count
count += 1
self.dataframe['JUDGE'][i] = self.idtable[judge_i]
else:
self.dataframe['JUDGE'][i] = self.idtable[judge_i]
torch.save(self.idtable, idtable_path)
|
class VCC16SegmentalDataset(Dataset):
def __init__(self, wav_list, base_path):
self.wav_dir = Path(base_path)
self.wav_list = wav_list
self.segments_durations = 1
def __len__(self):
return len(self.wav_list)
def __getitem__(self, idx):
wav_name = self.wav_list[idx]
wav_path = (self.wav_dir / wav_name)
(wav, _) = apply_effects_file(str(wav_path), [['channels', '1'], ['rate', '16000'], ['norm']])
wav = wav.view((- 1))
wav_segments = unfold_segments(wav, self.segments_durations)
system_name = wav_name.name.split('_')[0]
return (wav_segments, system_name)
def collate_fn(self, samples):
(wavs_segments, system_names) = zip(*samples)
flattened_wavs_segments = [wav_segment for wav_segments in wavs_segments for wav_segment in wav_segments]
wav_segments_lengths = [len(wav_segments) for wav_segments in wavs_segments]
prefix_sums = list(accumulate(wav_segments_lengths, initial=0))
return (torch.stack(flattened_wavs_segments), prefix_sums, None, system_names, None, None)
|
def unfold_segments(tensor, tgt_duration, sample_rate=16000):
seg_lengths = int((tgt_duration * sample_rate))
src_lengths = len(tensor)
step = (seg_lengths // 2)
tgt_lengths = (seg_lengths if (src_lengths <= seg_lengths) else (((src_lengths // step) + 1) * step))
pad_lengths = (tgt_lengths - src_lengths)
padded_tensor = torch.cat([tensor, torch.zeros(pad_lengths)])
segments = padded_tensor.unfold(0, seg_lengths, step).unbind(0)
return segments
|
class DownstreamExpert(nn.Module):
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
idtable = (Path(kwargs['expdir']) / 'idtable.pkl')
self.train_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'train_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable)
self.dev_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'valid_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable, valid=False)
self.vcc2018_test_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'test_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable, valid=False)
self.vcc2018_system_mos = pd.read_csv(Path(self.datarc['vcc2018_file_path'], 'VCC2018_Results/system_mos_all_trackwise.csv'))
self.vcc2016_test_dataset = VCC16SegmentalDataset(list(Path.iterdir(Path(self.datarc['vcc2016_file_path'], 'unified_speech'))), Path(self.datarc['vcc2016_file_path'], 'unified_speech'))
self.vcc2016_system_mos = pd.read_csv(Path(self.datarc['vcc2016_file_path'], 'system_mos.csv'), index_col=False)
self.connector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = Model(input_dim=self.modelrc['projector_dim'], clipping=(self.modelrc['clipping'] if ('clipping' in self.modelrc) else False), attention_pooling=(self.modelrc['attention_pooling'] if ('attention_pooling' in self.modelrc) else False), num_judges=5000)
self.objective = nn.MSELoss()
self.segment_weight = self.modelrc['segment_weight']
self.bias_weight = self.modelrc['bias_weight']
self.best_scores = {'dev_loss': np.inf, 'dev_LCC': (- np.inf), 'dev_SRCC': (- np.inf), 'vcc2016_test_LCC': (- np.inf), 'vcc2016_test_SRCC': (- np.inf)}
def get_dataloader(self, mode):
if (mode == 'train'):
return self._get_train_dataloader(self.train_dataset)
elif (mode == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (mode == 'vcc2018_test'):
return self._get_eval_dataloader(self.vcc2018_test_dataset)
elif (mode == 'vcc2016_test'):
return self._get_eval_dataloader(self.vcc2016_test_dataset)
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def forward(self, mode, features, prefix_sums, means, system_names, moses, judge_ids, records, **kwargs):
features = torch.stack(features)
features = self.connector(features)
uttr_scores = []
bias_scores = []
if (mode == 'train'):
means = means.to(features.device)
judge_ids = judge_ids.to(features.device)
moses = moses.to(features.device)
(segments_scores, segments_bias_scores) = self.model(features, judge_ids=judge_ids)
segments_loss = 0
uttr_loss = 0
bias_loss = 0
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
current_bias_scores = segments_bias_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
bias_score = current_bias_scores.mean(dim=(- 1))
bias_scores.append(bias_score.detach().cpu())
segments_loss += self.objective(current_segment_scores, means[i])
uttr_loss += self.objective(uttr_score, means[i])
bias_loss += self.objective(bias_score, moses[i])
segments_loss /= (len(prefix_sums) - 1)
uttr_loss /= (len(prefix_sums) - 1)
bias_loss /= (len(prefix_sums) - 1)
loss = (((self.segment_weight * segments_loss) + (self.bias_weight * bias_loss)) + uttr_loss)
records['segment loss'].append(segments_loss.item())
records['utterance loss'].append(uttr_loss.item())
records['bias loss'].append(bias_loss.item())
records['total loss'].append(loss.item())
records['pred_scores'] += uttr_scores
records['true_scores'] += means.detach().cpu().tolist()
if ((mode == 'dev') or (mode == 'vcc2018_test')):
means = means.to(features.device)
segments_scores = self.model(features)
segments_loss = 0
uttr_loss = 0
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
segments_loss += self.objective(current_segment_scores, means[i])
uttr_loss += self.objective(uttr_score, means[i])
segments_loss /= (len(prefix_sums) - 1)
uttr_loss /= (len(prefix_sums) - 1)
loss = (segments_loss + uttr_loss)
records['total loss'].append(loss.item())
records['pred_scores'] += uttr_scores
records['true_scores'] += means.detach().cpu().tolist()
if (mode == 'vcc2016_test'):
segments_scores = self.model(features)
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
if (len(records['system']) == 0):
records['system'].append(defaultdict(list))
for i in range(len(system_names)):
records['system'][0][system_names[i]].append(uttr_scores[i].tolist())
if (mode == 'train'):
return loss
return 0
def log_records(self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
save_names = []
if (mode == 'train'):
avg_uttr_loss = torch.FloatTensor(records['utterance loss']).mean().item()
avg_frame_loss = torch.FloatTensor(records['segment loss']).mean().item()
avg_bias_loss = torch.FloatTensor(records['bias loss']).mean().item()
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-utterance loss', avg_uttr_loss, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-segment loss', avg_frame_loss, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-bias loss', avg_bias_loss, global_step=global_step)
if ((mode == 'train') or (mode == 'dev')):
avg_total_loss = torch.FloatTensor(records['total loss']).mean().item()
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-total loss', avg_total_loss, global_step=global_step)
if ((mode == 'dev') or (mode == 'vcc2018_test')):
all_pred_scores = records['pred_scores']
all_true_scores = records['true_scores']
all_pred_scores = np.array(all_pred_scores)
all_true_scores = np.array(all_true_scores)
MSE = np.mean(((all_true_scores - all_pred_scores) ** 2))
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level MSE', MSE, global_step=global_step)
(pearson_rho, _) = pearsonr(all_true_scores, all_pred_scores)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level LCC', pearson_rho, global_step=global_step)
(spearman_rho, _) = spearmanr(all_true_scores.T, all_pred_scores.T)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level SRCC', spearman_rho, global_step=global_step)
tqdm.write(f'[{mode}] Utterance-level MSE = {MSE:.4f}')
tqdm.write(f'[{mode}] Utterance-level LCC = {pearson_rho:.4f}')
tqdm.write(f'[{mode}] Utterance-level SRCC = {spearman_rho:.4f}')
if ((mode == 'dev') or (mode == 'vcc2018_test')):
system_level_mos = self.vcc2018_system_mos
if (mode == 'vcc2016_test'):
system_level_mos = self.vcc2016_system_mos
if ((mode == 'dev') or (mode == 'vcc2018_test') or (mode == 'vcc2016_test')):
all_system_pred_scores = []
all_system_true_scores = []
for (key, values) in records['system'][0].items():
all_system_pred_scores.append(np.mean(values))
all_system_true_scores.append(system_level_mos[key].iloc[0])
all_system_pred_scores = np.array(all_system_pred_scores)
all_system_true_scores = np.array(all_system_true_scores)
MSE = np.mean(((all_system_true_scores - all_system_pred_scores) ** 2))
(pearson_rho, _) = pearsonr(all_system_true_scores, all_system_pred_scores)
(spearman_rho, _) = spearmanr(all_system_true_scores, all_system_pred_scores)
tqdm.write(f'[{mode}] System-level MSE = {MSE:.4f}')
tqdm.write(f'[{mode}] System-level LCC = {pearson_rho:.4f}')
tqdm.write(f'[{mode}] System-level SRCC = {spearman_rho:.4f}')
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level MSE', MSE, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level LCC', pearson_rho, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level SRCC', spearman_rho, global_step=global_step)
if (mode == 'dev'):
if (avg_total_loss < self.best_scores['dev_loss']):
self.best_scores[mode] = avg_total_loss
save_names.append(f'{mode}-best.ckpt')
if (pearson_rho > self.best_scores['dev_LCC']):
self.best_scores['dev_LCC'] = pearson_rho
save_names.append(f'{mode}-LCC-best.ckpt')
if (spearman_rho > self.best_scores['dev_SRCC']):
self.best_scores['dev_SRCC'] = spearman_rho
save_names.append(f'{mode}-SRCC-best.ckpt')
if (mode == 'vcc2016_test'):
if (pearson_rho > self.best_scores['vcc2016_test_LCC']):
self.best_scores['vcc2016_test_LCC'] = pearson_rho
save_names.append(f'{mode}-LCC-best.ckpt')
if (spearman_rho > self.best_scores['vcc2016_test_SRCC']):
self.best_scores['vcc2016_test_SRCC'] = spearman_rho
save_names.append(f'{mode}-SRCC-best.ckpt')
return save_names
|
def preprocess(base_path, txt_file):
dataframe = pd.read_csv(Path(base_path, txt_file), index_col=False)
return dataframe
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling\n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n\n attention_weight:\n att_w : size (N, T, 1)\n\n return:\n utter_rep: size (N, H)\n '
softmax = nn.functional.softmax
att_w = softmax(self.W(batch_rep).squeeze((- 1))).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
class Model(nn.Module):
def __init__(self, input_dim, clipping=False, attention_pooling=False, num_judges=5000, **kwargs):
super(Model, self).__init__()
self.mean_net_linear = nn.Linear(input_dim, 1)
self.mean_net_clipping = clipping
self.mean_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.bias_net_linear = nn.Linear(input_dim, 1)
self.bias_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.judge_embbeding = nn.Embedding(num_embeddings=num_judges, embedding_dim=input_dim)
def forward(self, features, judge_ids=None):
if (self.mean_net_pooling is not None):
x = self.mean_net_pooling(features)
segment_score = self.mean_net_linear(x)
else:
x = self.mean_net_linear(features)
segment_score = x.squeeze((- 1)).mean(dim=(- 1))
if self.mean_net_clipping:
segment_score = ((torch.tanh(segment_score) * 2) + 3)
if (judge_ids is None):
return segment_score.squeeze((- 1))
else:
time = features.shape[1]
judge_features = self.judge_embbeding(judge_ids)
judge_features = torch.stack([judge_features for i in range(time)], dim=1)
bias_features = (features + judge_features)
if (self.bias_net_pooling is not None):
y = self.bias_net_pooling(bias_features)
bias_score = self.bias_net_linear(y)
else:
y = self.bias_net_linear(bias_features)
bias_score = y.squeeze((- 1)).mean(dim=(- 1))
bias_score = (bias_score + segment_score)
return (segment_score.squeeze((- 1)), bias_score.squeeze((- 1)))
|
class MOSEIDataset(Dataset):
def __init__(self, split, data, path):
self.split = split
self.data = data
self.path = path
def __getitem__(self, idx):
wav_path = os.path.join(self.path, 'Segmented_Audio', self.split, self.data[idx][0])
(wav, sr) = torchaudio.load(wav_path)
label = self.data[idx][1]
return (wav.view((- 1)), torch.tensor(label).long())
def __len__(self):
return len(self.data)
def collate_fn(self, samples):
(wavs, labels) = ([], [])
for (wav, label) in samples:
wavs.append(wav)
labels.append(label)
return (wavs, labels)
|
class DownstreamExpert(nn.Module):
'\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n '
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/downstream/example/config.yaml\n\n **kwargs: dict\n The arguments specified by the argparser in run_downstream.py\n in case you need it.\n "
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
(self.train_data, self.dev_data, self.test_data) = ([], [], [])
df = pd.read_csv(self.datarc['label_path'], encoding='latin-1')
for row in df.itertuples():
filename = (((row.file + '_') + str(row.index)) + '.wav')
if (self.datarc['num_class'] == 2):
label = row.label2a
elif (self.datarc['num_class'] == 3):
label = (row.label2b + 1)
elif (self.datarc['num_class'] == 6):
label = row.label6
elif (self.datarc['num_class'] == 7):
label = (row.label7 + 3)
else:
raise ValueError('Unsupported num_class')
if (row.split == 0):
self.train_data.append((filename, label))
elif (row.split == 1):
self.dev_data.append((filename, label))
elif (row.split == 2):
self.test_data.append((filename, label))
self.train_dataset = MOSEIDataset('train', self.train_data, self.datarc['data_dir'])
self.dev_dataset = MOSEIDataset('dev', self.dev_data, self.datarc['data_dir'])
self.test_dataset = MOSEIDataset('test', self.test_data, self.datarc['data_dir'])
self.connector = nn.Linear(upstream_dim, self.modelrc['input_dim'])
self.model = Model(output_class_num=self.datarc['num_class'], **self.modelrc)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.logging = os.path.join(self.expdir, 'log.log')
self.best = defaultdict((lambda : 0))
self.answer = []
def _get_train_dataloader(self, dataset, epoch: int):
from s3prl.utility.data import get_ddp_sampler
sampler = get_ddp_sampler(dataset, epoch)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
'\n Datalaoder Specs:\n Each dataloader should output a list in the following format:\n\n [[wav1, wav2, ...], your_other_contents1, your_other_contents2, ...]\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio without any preprocessing\n '
def get_train_dataloader(self, epoch: int):
return self._get_train_dataloader(self.train_dataset, epoch)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode, epoch: int=0):
if (mode == 'train'):
return eval(f'self.get_{mode}_dataloader')(epoch)
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, records, **kwargs):
"\n This function will be used in both train/dev/test, you can use\n self.training (bool) to control the different behavior for\n training or evaluation (dev/test)\n\n Args:\n mode: str\n 'train' or 'dev' or 'test'\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n records:\n defaultdict(list), by dumping contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records\n\n Note1. downstream/runner.py will call self.log_records\n 1. every log_step during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. log_step is defined in your downstream config\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n "
features = pad_sequence(features, batch_first=True)
features = self.connector(features)
predicted = self.model(features)
utterance_labels = labels
labels = torch.LongTensor(utterance_labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['predicted'] += predicted_classid.cpu().float().tolist()
records['original'] += labels.cpu().float().tolist()
if (not self.training):
pass
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
"\n This function will be used in both train/dev/test, you can use\n self.training (bool) to control the different behavior for\n training or evaluation (dev/test)\n\n Args:\n mode: str\n 'train' or 'dev' or 'test'\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n "
prefix = f'mosei/{mode}-'
average = torch.FloatTensor(records['acc']).mean().item()
f1 = sklearn.metrics.f1_score(records['original'], records['predicted'], average='macro')
logger.add_scalar(f'{prefix}acc', average, global_step=global_step)
if (mode in ['dev', 'test']):
print(f'{prefix}acc: {average}')
message = f'''{mode} at step {global_step}: {average} (acc), {f1} (f1)
'''
save_ckpt = []
if (average > self.best[prefix]):
self.best[prefix] = average
message = f'New best on {message}'
name = prefix.split('/')[(- 1)].split('-')[0]
save_ckpt.append(f'{name}-best.ckpt')
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_predict.txt'), 'w') as file:
line = [f'''{f}
''' for f in records['predicted']]
file.writelines(line)
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_truth.txt'), 'w') as file:
line = [f'''{f}
''' for f in records['original']]
file.writelines(line)
with open(self.logging, 'a') as f:
f.write(message)
if (not self.training):
pass
return save_ckpt
|
class Model(nn.Module):
def __init__(self, input_dim, output_class_num, **kwargs):
super(Model, self).__init__()
self.linear = nn.Linear(input_dim, output_class_num)
def forward(self, features):
pooled = features.mean(dim=1)
predicted = self.linear(pooled)
return predicted
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.