code stringlengths 17 6.64M |
|---|
def qsub(args):
name = qsub_name_from_args(args)
run(((['qsub', '-cwd', '-S', '/bin/bash', '-j', 'yes', '-o', 'fullsum-scores'] + qsub_opts) + ['-N', name]), input=' '.join(args).encode('utf8'))
|
def get_wers(fn):
wers = {}
for l in open(fn).read().splitlines():
(k, v) = l.split(':', 1)
epoch = r_epoch.match(k).group(1)
wers[int(epoch)] = float(v)
return wers
|
def get_best_epoch(model):
fn = ('scores/%s.recog.%ss.txt' % (model, Settings.recog_metric_name))
assert os.path.exists(fn)
wers = get_wers(fn)
return sorted([(score, ep) for (ep, score) in wers.items()], reverse=(not Settings.recog_score_lower_is_better))[0]
|
def get_train_scores(train_scores_file):
train_scores = {}
for l in open(train_scores_file).read().splitlines():
m = re.match('epoch +([0-9]+) ?(.*): *(.*)', l)
if (not m):
continue
(ep, key, value) = m.groups()
if (('error' in key) or ('score' in key) or (not key)):
train_scores.setdefault(key, {})[int(ep)] = float(value)
return train_scores
|
def open_res(fn):
txt = open(fn).read()
txt = re.sub('<.*>', 'None', txt)
txt = re.sub('NumbersDict\\({', '({', txt)
try:
d = eval(txt)
except Exception as exc:
print('Parse exception:', exc)
print('txt:')
print(txt)
raise
assert isinstance(d, dict)
return d
|
def check_sge_job_exists(args):
name = qsub_name_from_args(args)
from subprocess import Popen, DEVNULL
p = Popen(['qstat', '-j', name], stdout=DEVNULL, stderr=DEVNULL)
ret = p.wait()
return (ret == 0)
|
def main():
argparser = ArgumentParser()
argparser.add_argument('--calc', help='none, local or sge')
args = argparser.parse_args()
for model in models:
(score, ep) = get_best_epoch(model)
print(('model %s, best epoch: %s' % (model, ep)))
print((' WER (dev): %.1f%%' % score))
train_scores_fn = ('scores/%s.train.info.txt' % model)
assert os.path.exists(train_scores_fn)
train_scores = get_train_scores(train_scores_fn)
dev_err_key = 'dev_error'
if (dev_err_key not in train_scores):
dev_err_key = 'dev_error_output'
print((' FER (cv): %.1f%%' % (train_scores[dev_err_key][ep] * 100.0)))
prefix = ('fullsum-scores/out.%s.ep%03i.' % (model, ep))
sm_prior_fn = (prefix + 'softmax-prior.txt')
print(' sm prior exists:', os.path.exists(sm_prior_fn))
if os.path.exists(sm_prior_fn):
sm_prior = numpy.loadtxt(sm_prior_fn)
assert (sm_prior.shape == (num_labels,))
print((' sm prior sil label prob: %.1f%%' % (numpy.exp(sm_prior[sil_label_id]) * 100.0)))
res = None
for variant in variants[:(len(variants) if os.path.exists(sm_prior_fn) else 1)]:
prior = variant['prior']
tdp_scale = variant.get('tdp_scale', 1.0)
am_scale = variant.get('am_scale', 1.0)
prior_scale = variant.get('prior_scale', 1.0)
res_fn = (prefix + ('fullsum-scores.prior_%s.am_scale_%f.prior_scale_%f.tdp_scale_%f.txt' % (prior, float(am_scale), float(prior_scale), float(tdp_scale))))
print((' variant %r exists:' % variant), os.path.exists(res_fn))
if (not os.path.exists(res_fn)):
cmd_args = ['./calc_full_sum_score.py', '--model', model, '--epoch', str(ep), '--prior', prior, '--tdp_scale', str(tdp_scale), '--am_scale', str(am_scale), '--prior_scale', str(prior_scale)]
if (not args.calc):
print(' not calculating. SGE job exists:', check_sge_job_exists(cmd_args))
elif (args.calc == 'local'):
run(cmd_args)
elif (args.calc == 'sge'):
if check_sge_job_exists(cmd_args):
print(' SGE job already exists')
else:
qsub(cmd_args)
else:
raise Exception(('invalid calc %r' % args.calc))
else:
res = open_res(res_fn)
print((' fullsum score: %.3f' % res['scores']['cost:output_fullsum']))
|
def cp(src_dir, dst_dir, filename):
src_fn = ((src_dir + '/') + filename)
dst_fn = ((dst_dir + '/') + filename)
assert os.path.exists(src_fn), ('%r does not exist' % src_fn)
try:
os.makedirs(os.path.dirname(dst_fn))
except os.error:
pass
print(('copy (%s) %s' % (dst_dir, filename)))
shutil.copyfile(src_fn, dst_fn)
|
def main():
for (corpus_src, corpus_dst, experiments) in [(quaero_src_base_dir, quaero_dst_base_dir, quaero_experiments), (swb_src_base_dir, swb_dst_base_dir, swb_experiments)]:
for fn in base_files:
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=fn)
for setup_name in experiments:
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=('config-train/%s.config' % setup_name))
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=('scores/%s.recog.wers.txt' % setup_name))
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=('scores/%s.train.info.txt' % setup_name))
|
def EpochData(learningRate, error):
d = {}
d['learning_rate'] = learningRate
d.update(error)
return d
|
def add_suggest(ep, temp=None, reason=None):
if (ep in ds):
return
ds[ep] = {'epoch': ep, 'temporary_suggestion': temp, 'reason': reason}
|
def main():
argparser = ArgumentParser()
argparser.add_argument('file', help="by Returnn search, in 'py' format")
argparser.add_argument('--out', required=True, help='output filename')
args = argparser.parse_args()
d = eval(open(args.file, 'r').read())
assert isinstance(d, dict)
assert (not os.path.exists(args.out))
with open(args.out, 'w') as out:
out.write('{\n')
for (seq_tag, txt) in sorted(d.items()):
if isinstance(txt, list):
(_, txt) = txt[0]
assert isinstance(txt, str)
out.write(('%r: %r,\n' % (seq_tag, txt.replace('@@ ', ''))))
out.write('}\n')
print('# Done.')
|
def run(args, **kwargs):
import subprocess
kwargs = kwargs.copy()
print(('$ %s' % ' '.join(args)), {k: (v if (k != 'input') else '...') for (k, v) in kwargs.items()})
try:
subprocess.run(args, **kwargs, check=True)
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
|
def check_recog_bpe_file():
with open(recog_bpe_file, 'w') as f:
f.close()
os.remove(recog_bpe_file)
|
def handle_part(name, keep_existing_ogg):
'\n :param str name: "train", "dev" or "test"\n :param bool keep_existing_ogg:\n '
dirname = ('%s/%s/stm' % (BaseDir, name))
assert os.path.isdir(dirname)
dest_dirname = ('%s/%s' % (DestDir, name))
dest_meta_filename = ('%s/%s.txt' % (DestDir, name))
dest_meta_file = open(dest_meta_filename, 'w')
dest_meta_file.write('[\n')
os.makedirs(dest_dirname, exist_ok=True)
for seq in stm_reader.read_stm_dir(dirname):
sph_filename = ('%s/%s/sph/%s.sph' % (BaseDir, name, seq.speaker))
assert os.path.isfile(sph_filename)
assert (seq.start < seq.end)
duration = (seq.end - seq.start)
assert (duration > 0)
dest_filename = ('%s/%s_%s_%s.ogg' % (dest_dirname, seq.speaker, seq.start, seq.end))
if (os.path.exists(dest_filename) and keep_existing_ogg):
print(('already exists, skip: %s' % os.path.basename(dest_filename)))
else:
if os.path.exists(dest_filename):
print(('already exists, delete: %s' % os.path.basename(dest_filename)))
os.remove(dest_filename)
cmd = ['ffmpeg', '-i', sph_filename, '-ss', str(seq.start), '-t', str(duration), dest_filename]
print(('$ %s' % ' '.join(cmd)))
check_call(cmd)
dest_meta_file.write(("{'text': %r, 'tags': %r, 'file': %r, 'duration': %s},\n" % (seq.text, seq.tags, os.path.basename(dest_filename), duration)))
dest_meta_file.write(']\n')
dest_meta_file.close()
|
def print_stats(name):
'\n :param str name: "train", "dev" or "test"\n '
print(('%s:' % name))
filename = ('%s/%s.txt' % (DestDir, name))
assert os.path.isfile(filename)
data = eval(open(filename).read())
assert isinstance(data, list)
print(' num seqs:', len(data))
total_duration = 0.0
total_num_chars = 0
for seq in data:
total_duration += seq['duration']
total_num_chars += len(seq['text'])
print(' total duration:', total_duration)
print(' total num chars:', total_num_chars)
|
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument('--keep_existing_ogg', action='store_true')
arg_parser.add_argument('--stats_only', action='store_true')
args = arg_parser.parse_args()
assert os.path.isdir(BaseDir)
if (not args.stats_only):
os.makedirs(DestDir, exist_ok=True)
handle_part('train', keep_existing_ogg=args.keep_existing_ogg)
handle_part('dev', keep_existing_ogg=args.keep_existing_ogg)
handle_part('test', keep_existing_ogg=args.keep_existing_ogg)
print_stats('train')
print_stats('dev')
print_stats('test')
|
def parse_stm_seq(line):
'\n :param str line:\n :rtype: StmSeq|None\n '
m = StmSeqRegExp.match(line)
if (not m):
m2 = re.match(StmSeqRegExpPattern[:(- 1)], line)
raise Exception(('line %r, no match to %r. but prefix: %r' % (line, StmSeqRegExp, (line[:m2.end()] if m2 else None))))
(name1, name2, start, end, tags, text) = m.groups()
if (text == 'ignore_time_segment_in_scoring'):
return None
(start, end) = (Decimal(start), Decimal(end))
return StmSeq(speaker=name1, start=start, end=end, tags=tags, text=text)
|
def read_stm(filename):
'\n :param str filename:\n :rtype: yields StmSeq\n '
lines = open(filename).read().splitlines()
for line in lines:
seq = parse_stm_seq(line)
if (not seq):
continue
(yield seq)
|
def read_stm_dir(dirname):
'\n :param str dirname:\n :rtype: yields StmSeq\n '
files = glob((dirname + '/*.stm'))
assert files, ('no stm files in %r found' % dirname)
for fn in files:
(yield from read_stm(fn))
|
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument('file')
args = arg_parser.parse_args()
assert os.path.exists(args.file)
(name, ext) = os.path.splitext(os.path.basename(args.file))
assert (ext == '.zip')
zip_file = ZipFile(args.file)
data = eval(zip_file.open(('%s.txt' % name)).read())
for seq in data:
print(seq['text'])
|
def EpochData(learningRate, error):
d = {}
d['learning_rate'] = learningRate
d.update(error)
return d
|
def add_suggest(ep, temp=None, reason=None):
if (ep in ds):
return
ds[ep] = {'epoch': ep, 'temporary_suggestion': temp, 'reason': reason}
|
def main():
argparser = ArgumentParser()
argparser.add_argument('file', help="by Returnn search, in 'py' format")
argparser.add_argument('--out', required=True, help='output filename')
args = argparser.parse_args()
d = eval(open(args.file, 'r').read())
assert isinstance(d, dict)
assert (not os.path.exists(args.out))
with open(args.out, 'w') as out:
out.write('{\n')
for (seq_tag, txt) in sorted(d.items()):
if isinstance(txt, list):
(_, txt) = txt[0]
assert isinstance(txt, str)
out.write(('%r: %r,\n' % (seq_tag, txt.replace('@@ ', ''))))
out.write('}\n')
print('# Done.')
|
def run(args, **kwargs):
import subprocess
kwargs = kwargs.copy()
print(('$ %s' % ' '.join(args)), {k: (v if (k != 'input') else '...') for (k, v) in kwargs.items()})
try:
subprocess.run(args, **kwargs, check=True)
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
|
def check_recog_bpe_file():
with open(recog_bpe_file, 'w') as f:
f.close()
os.remove(recog_bpe_file)
|
def main():
with tk.block('data_preparation'):
(bliss_dict, zip_dict, transcription_text_dict) = prepare_data_librispeech()
(bpe_codes, bpe_vocab, num_classes) = build_subwords([bliss_dict['train-clean-100']], num_segments=10000, name='librispeech-100')
(mean, stddev) = get_asr_dataset_stats(zip_dict['train-clean-100'])
asr_global_parameter_dict = {'ext_norm_mean': mean, 'ext_norm_std_dev': stddev, 'ext_bpe_file': bpe_codes, 'ext_vocab_file': bpe_vocab, 'ext_num_classes': num_classes}
initial_checkpoint_training_params = {'ext_partition_epoch': 20, 'ext_training_zips': [zip_dict['train-clean-100']], 'ext_dev_zips': [zip_dict['dev-clean'], zip_dict['dev-other']], 'ext_num_epochs': 80}
models = {}
with tk.block('baseline_training'):
initial_checkpoint_training_params.update(asr_global_parameter_dict)
asr_training_config = Path('returnn_configs/asr/train-clean-100.exp3.ctc.ogg.lrwarmupextra10.config')
initial_training_job = train_asr_config(asr_training_config, 'librispeech-100-initial-training', initial_checkpoint_training_params)
baseline_training_params = copy.deepcopy(initial_checkpoint_training_params)
baseline_training_params['ext_num_epochs'] = 250
baseline_training_params['ext_partition_epoch'] = 5
baseline_training_params['load'] = initial_training_job.models[80].model
baseline_training_params.update(asr_global_parameter_dict)
continued_training_job = train_asr_config(asr_training_config, 'librispeech-100-baseline-training', baseline_training_params)
from recipe.returnn.search import GetBestEpoch
best_epoch = GetBestEpoch(continued_training_job.model_dir, continued_training_job.learning_rates, key='dev_score_output/output_prob').out_var
models['baseline'] = (continued_training_job, best_epoch)
with tk.block('specaug_training'):
asr_specaug_config = Path('returnn_configs/asr/train-clean-100.exp3.ctc.ogg.lrwarmupextra10.specaug.config')
continued_training_job = train_asr_config(asr_specaug_config, 'librispeech-100-specaug-training', baseline_training_params)
from recipe.returnn.search import GetBestEpoch
best_epoch = GetBestEpoch(continued_training_job.model_dir, continued_training_job.learning_rates, key='dev_score_output/output_prob').out_var
models['specaug'] = (continued_training_job, best_epoch)
tts_bliss_dict = {k: v for (k, v) in bliss_dict.items() if (k in ['dev-clean', 'train-clean-100', 'train-clean-360'])}
(tts_bliss_corpora, tts_zip_corpora, char_vocab) = prepare_ttf_data(tts_bliss_dict)
(mean, stddev) = get_ttf_dataset_stats(tts_zip_corpora['tts-train-clean-100'])
tts_global_parameter_dict = {'ext_norm_mean_value': mean, 'ext_norm_std_dev_value': stddev, 'ext_char_vocab': char_vocab, 'ext_training_zips': [tts_zip_corpora['tts-train-clean-100']], 'ext_dev_zips': [tts_zip_corpora['tts-dev-clean']], 'ext_num_epochs': 200, 'ext_partition_epoch': 3}
tts_training_config = Path('returnn_configs/tts/tts-clean-100.dec640.enc256.enclstm512.config', hash_overwrite='TTS_DEC640_ENC256_ENCLSTM512_v1')
tts_training_job = train_ttf_config(tts_training_config, name='tts-baseline-training', parameter_dict=tts_global_parameter_dict)
f2l_global_parameter_dict = copy.deepcopy(tts_global_parameter_dict)
f2l_global_parameter_dict['ext_num_epochs'] = 100
f2l_global_parameter_dict['ext_partition_epoch'] = 1
f2l_global_parameter_dict.pop('ext_char_vocab')
f2l_training_config = Path('returnn_configs/f2l/f2l.2layer.blstm.residual.config', hash_overwrite='F2L_2LAYER_ENC256_ENCLSTM512_v1')
f2l_training_job = train_ttf_config(f2l_training_config, name='f2l-baseline-training', parameter_dict=f2l_global_parameter_dict)
embeddings = generate_speaker_embeddings(config_file=tts_training_config, model_dir=tts_training_job.model_dir, epoch=200, zip_corpus=tts_zip_corpora['tts-train-clean-100'], name='tts-baseline', default_parameter_dict=tts_global_parameter_dict)
from recipe.tts.corpus import DistributeSpeakerEmbeddings
dist_speaker_embeds_job = DistributeSpeakerEmbeddings(tts_bliss_dict['train-clean-360'], embeddings, use_full_seq_name=False, options=None)
tk.register_output('embed_dist.hdf', dist_speaker_embeds_job.out)
TTS_GENERATION_SPLITS = 10
segment_job = SegmentCorpus(tts_bliss_corpora['tts-train-clean-360'], TTS_GENERATION_SPLITS)
segments = segment_job.segment_files
verification_result = None
corpora = []
for i in range(TTS_GENERATION_SPLITS):
(unstacked_features, decode_job, convert_job) = decode_with_speaker_embeddings(config_file=tts_training_config, model_dir=tts_training_job.model_dir, epoch=200, zip_corpus=tts_zip_corpora['tts-train-clean-360'], speaker_hdf=dist_speaker_embeds_job.out, segment_file=segments[i], name=('tts-baseline_decode_%i' % i), default_parameter_dict=tts_global_parameter_dict)
if verification_result:
decode_job.add_input(verification_result)
(linear_features, f2l_job) = convert_with_f2l(f2l_training_config, name=('tts-baseline_forward_%i' % i), features=unstacked_features, model_dir=f2l_training_job.model_dir, epoch=100)
(generated_audio_bliss, gl_job) = griffin_lim_ogg(linear_features, name=('tts-baseline_gl_%i' % i))
verification_result = VerifyCorpus(generated_audio_bliss).out
cleanup_success = AutoCleanup([decode_job, convert_job, f2l_job], verification_result).out
tk.register_output(('cleanup_result/cleanup_%i' % i), cleanup_success)
corpora.append(generated_audio_bliss)
merge_job = MergeCorpora(corpora, 'synthetic-ls-360', subcorpora=False)
merge_job.add_input(verification_result)
synthetic_audio_corpus = merge_job.merged_corpus
synthetic_corpus = BlissAddTextFromBliss(synthetic_audio_corpus, bliss_dict['train-clean-360']).out
synthetic_zip_corpus = BlissToZipDataset('synthetic-ls-360', synthetic_corpus, use_full_seq_name=False).out
tk.register_output('synthetic_data/synthetic_librispeech_360h.zip', synthetic_zip_corpus)
with tk.block('baseline_with_synthetic'):
synthetic_training_params = copy.deepcopy(initial_checkpoint_training_params)
synthetic_training_params['ext_num_epochs'] = 250
synthetic_training_params['load'] = initial_training_job.models[80].model
synthetic_training_params.update(asr_global_parameter_dict)
synthetic_training_params['ext_training_zips'] = [zip_dict['train-clean-100'], synthetic_zip_corpus]
synthetic_training_job = train_asr_config(asr_training_config, 'librispeech-100-synthetic-training', synthetic_training_params)
from recipe.returnn.search import GetBestEpoch
best_epoch = GetBestEpoch(continued_training_job.model_dir, continued_training_job.learning_rates, key='dev_score_output/output_prob').out_var
models['baseline+synthetic'] = (synthetic_training_job, best_epoch)
with tk.block('specaug_with_synthetic'):
synthetic_training_params = copy.deepcopy(initial_checkpoint_training_params)
synthetic_training_params['ext_num_epochs'] = 250
synthetic_training_params['load'] = initial_training_job.models[80].model
synthetic_training_params.update(asr_global_parameter_dict)
synthetic_training_params['ext_training_zips'] = [zip_dict['train-clean-100'], synthetic_zip_corpus]
synthetic_training_job = train_asr_config(asr_specaug_config, 'librispeech-100-specaug-synthetic-training', synthetic_training_params)
from recipe.returnn.search import GetBestEpoch
best_epoch = GetBestEpoch(continued_training_job.model_dir, continued_training_job.learning_rates, key='dev_score_output/output_prob').out_var
models['specaug+synthetic'] = (synthetic_training_job, best_epoch)
for (experiment_name, (training_job, best_epoch)) in models.items():
with tk.block(('%s_decoding' % experiment_name)):
for key in transcription_text_dict:
wer = decode_and_evaluate_asr_config(key, asr_training_config, training_job.model_dir, epoch=best_epoch, zip_corpus=zip_dict[key], text=transcription_text_dict[key], parameter_dict=asr_global_parameter_dict, training_name=experiment_name)
tk.register_output(('results/%s_%s.wer' % (experiment_name, key)), wer)
|
def get_asr_dataset_stats(zip_dataset):
'\n This function computes the global dataset statistics (mean and stddev) on a zip corpus to be used in the\n training dataset parameters of the OggZipDataset\n\n\n :param zip_dataset:\n :return:\n '
config = {'train': {'class': 'OggZipDataset', 'audio': {}, 'targets': None, 'path': zip_dataset}}
from recipe.returnn.dataset import ExtractDatasetStats
dataset_stats_job = ExtractDatasetStats(config)
dataset_stats_job.add_alias('data/stats/ExtractDatasetStats')
mean = dataset_stats_job.mean_file
std_dev = dataset_stats_job.std_dev_file
tk.register_output('data/stats/norm.mean.txt', mean)
tk.register_output('data/stats/norm.std_dev.txt', std_dev)
return (mean, std_dev)
|
def train_asr_config(config, name, parameter_dict=None):
'\n This function trains a RETURNN asr model, given the config and parameters\n\n :param config:\n :param name:\n :param parameter_dict:\n :return:\n '
asr_train_job = RETURNNTrainingFromFile(config, parameter_dict=parameter_dict, mem_rqmt=16)
asr_train_job.add_alias(('asr_training/' + name))
asr_train_job.rqmt['time'] = 167
asr_train_job.rqmt['cpu'] = 8
tk.register_output((('asr_training/' + name) + '_model'), asr_train_job.model_dir)
tk.register_output((('asr_training/' + name) + '_training-scores'), asr_train_job.learning_rates)
return asr_train_job
|
def decode_and_evaluate_asr_config(name, config_file, model_path, epoch, zip_corpus, text, parameter_dict, training_name=None):
'\n This function creates the RETURNN decoding/search job, converts the output into the format for scoring and computes\n the WER score\n\n :param str name: name of the decoding, usually the evaluation set name and decoding options\n :param Path config_file: training config or special decoding config file path\n :param Path model_path: .model_dir variable of the training job\n :param int|tk.Variable epoch: the epoch to select from the model folder\n :param Path zip_corpus: zip corpus for decoding\n :param Path text: text dictionary file for WER computation\n :param dict parameter_dict: network options\n :param str training_name: optional name of the trained model for alias and output naming\n :return:\n '
with tk.block('evaluation'):
path_prefix = 'asr_evaluation/'
if training_name:
path_prefix += (training_name + '/')
local_parameter_dict = {'ext_eval_zip': zip_corpus, 'ext_decoding': True, 'ext_model': model_path, 'ext_load_epoch': epoch}
if (model_path == None):
local_parameter_dict.pop('ext_model')
local_parameter_dict.pop('ext_load_epoch')
local_parameter_dict.update(parameter_dict)
asr_recog_job = RETURNNSearchFromFile(config_file, parameter_dict=local_parameter_dict, mem_rqmt=12, time_rqmt=1, output_mode='py')
asr_recog_job.add_alias((path_prefix + ('search_%s/recognition' % name)))
tk.register_output((path_prefix + ('search_%s/asr_out' % name)), asr_recog_job.out)
bpe_to_words_job = SearchBPEtoWords(asr_recog_job.out)
bpe_to_words_job.add_alias((path_prefix + ('search_%s/bpe_to_words' % name)))
tk.register_output((path_prefix + ('search_%s/words_out' % name)), bpe_to_words_job.out)
wer_score_job = ReturnnScore(bpe_to_words_job.out, text)
wer_score_job.add_alias((path_prefix + ('search_%s/wer_scoring' % name)))
tk.register_output((path_prefix + ('search_%s/WER' % name)), wer_score_job.out)
return wer_score_job.out
|
def prepare_data_librispeech():
'\n This function creates the LibriSpeech data in Bliss format and zip format.\n For the evaluation sets, the text is extracted in dictionary form for WER scoring\n\n :return:\n '
dataset_names = ['dev-clean', 'dev-other', 'test-clean', 'test-other', 'train-clean-100', 'train-clean-360']
evaluation_names = ['dev-clean', 'dev-other', 'test-clean', 'test-other']
bliss_flac_corpus_dict = {}
zip_flac_corpus_dict = {}
transcription_corpus_dict = {}
for dataset_name in dataset_names:
dataset_path = Path(('../data/dataset-raw/LibriSpeech/%s/' % dataset_name))
ls_to_bliss_job = LibriSpeechToBliss(corpus_path=dataset_path, name=dataset_name)
ls_to_bliss_job.add_alias(('data/LibriSpeechToBliss/%s' % dataset_name))
bliss_flac_corpus_dict[dataset_name] = ls_to_bliss_job.out
tk.register_output(('data/bliss/%s.xml.gz' % dataset_name), ls_to_bliss_job.out)
bliss_to_zip_job = BlissToZipDataset(name=dataset_name, corpus_file=ls_to_bliss_job.out, use_full_seq_name=False)
bliss_to_zip_job.add_alias(('data/BlissToZipDataset/%s' % dataset_name))
zip_flac_corpus_dict[dataset_name] = bliss_to_zip_job.out
tk.register_output(('data/asr_zip/%s.zip' % dataset_name), bliss_to_zip_job.out)
for dataset_name in evaluation_names:
bliss_to_text_dict_job = BlissExtractTextDictionary(bliss_flac_corpus_dict[dataset_name], segment_key_only=True)
bliss_to_text_dict_job.add_alias(('data/BlissExtractTextDictionary/%s' % dataset_name))
transcription_corpus_dict[dataset_name] = bliss_to_text_dict_job.out
return (bliss_flac_corpus_dict, zip_flac_corpus_dict, transcription_corpus_dict)
|
def build_subwords(bliss_corpora, num_segments, name):
'\n This function creates the subword codes and vocabulary files for a given bliss dataset\n\n :param list bliss_corpora: bliss corpus for subword training\n :param int num_segments: number of bpe merge operations / bpe segments\n :param str name: name of the subwords\n :return:\n '
corpus_texts = []
for bliss_corpus in bliss_corpora:
extract_text_job = BlissExtractRawText(bliss_corpus)
corpus_texts.append(extract_text_job.out)
from recipe.text import Concatenate
text = Concatenate(corpus_texts).out
subwords_job = CreateSubwordsAndVocab(text=text, num_segments=num_segments)
subwords_job.add_alias(('data/subwords/CreateSubwordsAndVocab-%s' % name))
bpe_codes = subwords_job.out_bpe
bpe_vocab = subwords_job.out_vocab
bpe_vocab_size = subwords_job.out_vocab_size
tk.register_output(('data/subwords/%s.bpe.codes' % name), bpe_codes)
tk.register_output(('data/subwords/%s.bpe.vocab' % name), bpe_vocab)
tk.register_output(('data/subwords/%s.bpe.vocab_size' % name), bpe_vocab_size)
return (bpe_codes, bpe_vocab, bpe_vocab_size)
|
def train_f2l_config(config_file, name, parameter_dict=None):
from recipe.returnn import RETURNNTrainingFromFile
f2l_train = RETURNNTrainingFromFile(config_file, parameter_dict=parameter_dict, mem_rqmt=16)
f2l_train.add_alias(('f2l_training/' + name))
f2l_train.rqmt['time'] = 96
f2l_train.rqmt['cpu'] = 8
tk.register_output((('f2l_training/' + name) + '_model'), f2l_train.model_dir)
tk.register_output((('f2l_training/' + name) + '_training-scores'), f2l_train.learning_rates)
return f2l_train
|
def convert_with_f2l(config_file, name, model_dir, epoch, features):
from recipe.returnn.forward import RETURNNForwardFromFile
parameter_dict = {'ext_forward': True, 'ext_model': model_dir, 'ext_load_epoch': epoch, 'ext_eval_features': features}
f2l_apply = RETURNNForwardFromFile(config_file, parameter_dict=parameter_dict, hdf_outputs=['linear_features'], mem_rqmt=8)
f2l_apply.add_alias(('f2l_forward/' + name))
f2l_apply.rqmt['qsub_args'] = (('-l qname=%s' % '*080*') + ' -l h_fsize=400G')
f2l_apply.rqmt['time'] = 24
f2l_apply.rqmt['mem'] = 16
return (f2l_apply.outputs['linear_features'], f2l_apply)
|
def griffin_lim_ogg(linear_hdf, name, iterations=1):
from recipe.tts.toolchain import GriffinLim
gl_job = GriffinLim(linear_hdf, iterations=iterations, sample_rate=16000, window_shift=0.0125, window_size=0.05, preemphasis=0.97)
gl_job.add_alias(('gl_conversion/' + name))
tk.register_output((('generated_audio/' + name) + '_audio'), gl_job.out_folder)
return (gl_job.out_corpus, gl_job)
|
def process_corpus(bliss_corpus, char_vocab, silence_duration):
'\n process a bliss corpus to be suited for TTS training\n :param self:\n :param bliss_corpus:\n :param name:\n :return:\n '
from recipe.text.bliss import ProcessBlissText
ljs = ProcessBlissText(bliss_corpus, [('end_token', {'token': '~'})], vocabulary=char_vocab)
from recipe.corpus.ffmpeg import BlissFFMPEGJob, BlissRecoverDuration
filter_string = ('-af "silenceremove=stop_periods=-1:stop_duration=%f:stop_threshold=-40dB"' % silence_duration)
ljs_nosilence = BlissFFMPEGJob(ljs.out, filter_string, ffmpeg_binary=FFMPEG_BINARY, output_format='wav')
ljs_nosilence.rqmt['time'] = 24
ljs_nosilence_recover = BlissRecoverDuration(ljs_nosilence.out)
return ljs_nosilence_recover.out
|
def prepare_ttf_data(bliss_dict):
'\n\n :param dict bliss_dict:\n :return:\n '
from recipe.returnn.vocabulary import BuildCharacterVocabulary
build_char_vocab_job = BuildCharacterVocabulary(uppercase=True)
char_vocab = build_char_vocab_job.out
processed_corpora = {}
processed_zip_corpora = {}
for (name, corpus) in bliss_dict.items():
tts_name = ('tts-' + name)
processed_corpus = process_corpus(bliss_corpus=corpus, char_vocab=char_vocab, silence_duration=0.1)
processed_corpora[tts_name] = processed_corpus
tk.register_output(('data/bliss/%s.processed.xml.gz' % name), processed_corpus)
processed_zip_corpora[tts_name] = BlissToZipDataset(tts_name, processed_corpus).out
return (processed_corpora, processed_zip_corpora, char_vocab)
|
def get_ttf_dataset_stats(zip_dataset):
config = {'train': {'class': 'OggZipDataset', 'audio': {'feature_options': {'fmin': 60}, 'features': 'db_mel_filterbank', 'num_feature_filters': 80, 'peak_normalization': False, 'preemphasis': 0.97, 'step_len': 0.0125, 'window_len': 0.05}, 'targets': None, 'path': zip_dataset}}
from recipe.returnn.dataset import ExtractDatasetStats
dataset_stats_job = ExtractDatasetStats(config)
dataset_stats_job.add_alias('data/tts_stats/ExtractDatasetStats')
mean = dataset_stats_job.mean
std_dev = dataset_stats_job.std_dev
tk.register_output('data/tts_stats/norm.mean.txt', mean)
tk.register_output('data/tts_stats/norm.std_dev.txt', std_dev)
return (mean, std_dev)
|
def train_ttf_config(config, name, parameter_dict=None):
from recipe.returnn import RETURNNTrainingFromFile
asr_train = RETURNNTrainingFromFile(config, parameter_dict=parameter_dict, mem_rqmt=16)
asr_train.add_alias(('tts_training/' + name))
asr_train.rqmt['time'] = 167
asr_train.rqmt['cpu'] = 8
tk.register_output((('tts_training/' + name) + '_model'), asr_train.model_dir)
tk.register_output((('tts_training/' + name) + '_training-scores'), asr_train.learning_rates)
return asr_train
|
def generate_speaker_embeddings(config_file, model_dir, epoch, zip_corpus, name, default_parameter_dict=None):
from recipe.returnn.forward import RETURNNForwardFromFile
parameter_dict = {'ext_gen_speakers': True, 'ext_model': model_dir, 'ext_load_epoch': epoch, 'ext_eval_zip': zip_corpus}
parameter_dict.update(default_parameter_dict)
generate_speaker_embeddings_job = RETURNNForwardFromFile(config_file, parameter_dict=parameter_dict, hdf_outputs=['speaker_embeddings'], mem_rqmt=8)
generate_speaker_embeddings_job.add_alias(('tts_speaker_generation/' + name))
tk.register_output((('tts_speaker_generation/' + name) + '_speakers.hdf'), generate_speaker_embeddings_job.outputs['speaker_embeddings'])
return generate_speaker_embeddings_job.outputs['speaker_embeddings']
|
def decode_with_speaker_embeddings(config_file, model_dir, epoch, zip_corpus, speaker_hdf, name, default_parameter_dict=None, segment_file=None):
from recipe.returnn.forward import RETURNNForwardFromFile
from recipe.tts.toolchain import ConvertFeatures
parameter_dict = {'ext_forward': True, 'ext_model': model_dir, 'ext_load_epoch': epoch, 'ext_eval_zip': zip_corpus, 'ext_speaker_embeddings': speaker_hdf, 'ext_forward_segment_file': segment_file}
if (segment_file == None):
parameter_dict.pop('ext_forward_segment_file')
parameter_dict.update(default_parameter_dict)
decode_with_speakers_job = RETURNNForwardFromFile(config_file, parameter_dict=parameter_dict, hdf_outputs=['stacked_features'], mem_rqmt=8)
decode_with_speakers_job.rqmt['qsub_args'] = '-l h_fsize=200G'
decode_with_speakers_job.add_alias(('tts_decode_with_speakers/' + name))
convert_features_job = ConvertFeatures(decode_with_speakers_job.outputs['stacked_features'], 3)
convert_features_job.add_alias(('tts_convert_features/' + name))
return (convert_features_job.out, decode_with_speakers_job, convert_features_job)
|
def evaluate_tts(ttf_config_file, ttf_model_dir, ttf_epoch, f2l_config_file, f2l_model_dir, f2l_epoch, train_zip_corpus, test_zip_corpus, test_bliss_corpus, test_text, default_parameter_dict, name):
embedding_hdf = generate_speaker_embeddings(ttf_config_file, ttf_model_dir, ttf_epoch, train_zip_corpus, name=name, default_parameter_dict=default_parameter_dict)
from recipe.tts.corpus import DistributeSpeakerEmbeddings
dist_speaker_embeds_job = DistributeSpeakerEmbeddings(test_bliss_corpus, embedding_hdf, use_full_seq_name=False, options=None)
dist_embeddings_hdf = dist_speaker_embeds_job.out
(unstacked_feature_hdf, _, _) = decode_with_speaker_embeddings(ttf_config_file, ttf_model_dir, ttf_epoch, test_zip_corpus, dist_embeddings_hdf, name, default_parameter_dict)
(linear_features, _) = convert_with_f2l(f2l_config_file, name, f2l_model_dir, f2l_epoch, unstacked_feature_hdf)
(generated_audio_bliss, _) = griffin_lim_ogg(linear_features, name)
asr_bliss = BlissAddTextFromBliss(generated_audio_bliss, test_bliss_corpus).out
from recipe.corpus import BlissToZipDataset
asr_zip = BlissToZipDataset('test', asr_bliss, use_full_seq_name=False).out
from config.asr import decode_and_evaluate_asr_config
trafo_specaug = Path('/u/rossenbach/experiments/switchboard_test/config/alberts_configs/trafo.specaug4.12l.ffdim4.pretrain3.natctc_recognize_pretrained.config')
decode_and_evaluate_asr_config(('test-%s' % name), trafo_specaug, None, 0, asr_zip, test_text, {})
|
class BlissToZipDataset(Job):
'\n convert bliss corpus with single segment recordings into the Zip format for RETURNN training\n '
def __init__(self, name, corpus_file, segment_file=None, use_full_seq_name=False, no_audio=False):
'\n\n :param str name:\n :param str|Path corpus_file:\n :param str|Path segment_file:\n :param bool use_full_seq_name: only use the segment name as sequence name, compatible to old use file as name behavior\n :param bool no_audio:\n '
self.name = name
self.corpus_file = corpus_file
self.segment_file_path = segment_file
self.use_full_seq_name = use_full_seq_name
self.no_audio = no_audio
self.out = self.output_path(('%s.zip' % name))
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
import zipfile
zip_file = zipfile.ZipFile(tk.uncached_path(self.out), mode='w', compression=zipfile.ZIP_STORED)
dict_file_path = (self.name + '.txt')
dict_file = open(dict_file_path, 'wt')
dict_file.write('[\n')
c = corpus.Corpus()
assert (len(c.subcorpora) == 0)
c.load(tk.uncached_path(self.corpus_file))
if self.segment_file_path:
if tk.uncached_path(self.segment_file_path).endswith('gz'):
segment_file = gzip.open(tk.uncached_path(self.segment_file_path), 'rb')
else:
segment_file = open(tk.uncached_path(self.segment_file), 'rt')
segments = [line.decode().strip() for line in segment_file]
for recording in c.recordings:
if (not recording.segments):
continue
assert (len(recording.segments) == 1)
segment = recording.segments[0]
segment_name = '/'.join([c.name, recording.name, segment.name])
if (self.segment_file_path and (segment_name not in segments)):
continue
if (not self.use_full_seq_name):
segment_name = segment.name
if self.no_audio:
dict_file.write(('{"duration": %f, "text": "%s", "seq_name": "%s"},\n' % (segment.end, segment.orth.replace('"', '\\"'), segment_name)))
else:
audio_path = recording.audio
arc_path = os.path.join(self.name, os.path.basename(audio_path))
zip_file.write(audio_path, arcname=arc_path)
dict_file.write(('{"file": "%s", "duration": %f, "text": "%s", "seq_name": "%s"},\n' % (os.path.basename(audio_path), segment.end, segment.orth.replace('"', '\\"'), segment_name)))
dict_file.write(']\n')
dict_file.close()
zip_file.write(dict_file_path, dict_file_path)
zip_file.close()
|
class MergeCorpora(Job):
'\n Merges Bliss Corpora into a single file as subcorpora\n This is preferably done after using corpus compression\n\n :param Iterable[Path] corpora: any iterable of bliss corpora file paths to merge\n :param name: name of the new corpus (subcorpora will keep the original names)\n '
def __init__(self, corpora, name, subcorpora=True):
self.corpora = corpora
self.name = name
self.subcorpora = subcorpora
self.merged_corpus = self.output_path('merged.xml.gz')
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
merged_corpus = corpus.Corpus()
merged_corpus.name = self.name
for corpus_path in self.corpora:
c = corpus.Corpus()
c.load(str(corpus_path))
if self.subcorpora:
merged_corpus.add_subcorpus(c)
else:
for rec in c.all_recordings():
merged_corpus.add_recording(rec)
merged_corpus.dump(tk.uncached_path(self.merged_corpus))
|
class SegmentCorpus(Job):
def __init__(self, corpus_path, num_segments, use_fullname=False):
self.set_vis_name('Segment Corpus')
self.corpus_path = corpus_path
self.num_segments = num_segments
self.use_fullname = use_fullname
self.segment_files = [self.output_path(('segments.%d' % i)) for i in range(num_segments)]
def tasks(self):
(yield Task('run', resume='run', mini_task=True))
def run(self):
c = corpus.Corpus()
c.load(tk.uncached_path(self.corpus_path))
all_segments = list(c.segments())
for (idx, segments) in enumerate(chunks(all_segments, self.num_segments)):
with open(self.segment_files[idx].get_path(), 'wt') as segment_file:
for segment in segments:
if self.use_fullname:
segment_file.write((segment.fullname() + '\n'))
else:
segment_file.write((segment.name + '\n'))
|
class BlissAddTextFromBliss(Job):
'\n This Job is used to add the text to a bliss corpus containing only audio from another bliss corpus\n containing the same sequences.\n '
def __init__(self, empty_bliss_corpus, bliss_corpus):
self.empty_bliss_corpus = empty_bliss_corpus
self.bliss_corpus = bliss_corpus
self.out = self.output_path('corpus.xml.gz')
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
orth_c = corpus.Corpus()
orth_c.load(tk.uncached_path(self.bliss_corpus))
orths = {}
for r in orth_c.all_recordings():
assert (len(r.segments) == 1), 'needs to be a single segment recording'
orth = r.segments[0].orth
tag = r.segments[0].name
orths[tag] = orth
c = corpus.Corpus()
c.load(tk.uncached_path(self.empty_bliss_corpus))
for r in c.all_recordings():
assert (len(r.segments) == 1), 'needs to be a single segment recording'
tag = r.segments[0].name
orth = orths[tag]
r.segments[0].orth = orth
c.dump(tk.uncached_path(self.out))
|
class BlissFFMPEGJob(Job):
'\n Changes the speed of all audio files in the corpus (shifting time AND frequency)\n\n '
def __init__(self, corpus_file, ffmpeg_option_string, ffmpeg_binary=None, output_format=None):
self.corpus_file = corpus_file
self.ffmpeg_option_string = ffmpeg_option_string
self.audio_folder = self.output_path('audio/', directory=True)
self.out = self.output_path('corpus.xml.gz')
self.rqmt = {'time': 4, 'cpu': 4, 'mem': 8}
self.ffmpeg_binary = (ffmpeg_binary if ffmpeg_binary else 'ffmpeg')
self.output_format = output_format
def tasks(self):
(yield Task('run', rqmt=self.rqmt))
def perform_ffmpeg(self, r):
audio_name = r.audio.split('/')[(- 1)]
if (self.output_format is not None):
(name, ext) = os.path.splitext(audio_name)
audio_name = ((name + '.') + self.output_format)
target = ((tk.uncached_path(self.audio_folder) + '/') + audio_name)
seconds = None
if (not os.path.exists(target)):
result = self.sh(('%s -hide_banner -y -i %s %s {audio_folder}/%s' % (self.ffmpeg_binary, r.audio, self.ffmpeg_option_string, audio_name)), include_stderr=True)
else:
print(('found %s' % target))
return seconds
def run(self):
c = corpus.Corpus()
nc = corpus.Corpus()
c.load(tk.uncached_path(self.corpus_file))
nc.name = c.name
nc.speakers = c.speakers
nc.default_speaker = c.default_speaker
nc.speaker_name = c.speaker_name
for r in c.recordings:
nr = corpus.Recording()
nr.name = r.name
nr.segments = r.segments
nr.speaker_name = r.speaker_name
nr.speakers = r.speakers
nr.default_speaker = r.default_speaker
audio_name = r.audio.split('/')[(- 1)]
if (self.output_format is not None):
(name, ext) = os.path.splitext(audio_name)
audio_name = ((name + '.') + self.output_format)
nr.audio = os.path.join(tk.uncached_path(self.audio_folder), audio_name)
nc.add_recording(nr)
from multiprocessing import pool
p = pool.Pool(4)
p.map(self.perform_ffmpeg, c.recordings)
nc.dump(tk.uncached_path(self.out))
|
class BlissRecoverDuration(Job):
def __init__(self, bliss_corpus):
self.bliss_corpus = bliss_corpus
self.out = self.output_path('corpus.xml.gz')
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
import soundfile
c = corpus.Corpus()
c.load(tk.uncached_path(self.bliss_corpus))
for r in c.all_recordings():
assert (len(r.segments) == 1), 'needs to be a single segment recording'
old_duration = r.segments[0].end
(data, sample_rate) = soundfile.read(open(r.audio, 'rb'))
new_duration = (len(data) / sample_rate)
print(('%s: %f vs. %f' % (r.segments[0].name, old_duration, new_duration)))
r.segments[0].end = new_duration
c.dump(tk.uncached_path(self.out))
|
class LibriSpeechToBliss(Job):
def __init__(self, corpus_path, name):
'\n Generate a bliss xml from the LibriSpeech corpus.\n :param Path corpus_path:\n :param str name:\n '
self.corpus_path = corpus_path
self.name = name
self.out = self.output_path('out.xml.gz')
def run(self):
from recipe.lib.corpus import Corpus, Speaker, Recording, Segment
corpus = Corpus()
corpus.name = self.name
corpus_path = tk.uncached_path(self.corpus_path)
assert os.path.isdir(corpus_path)
for speaker_folder in glob((corpus_path + '/*/')):
speaker = Speaker()
speaker.name = os.path.basename(os.path.dirname(speaker_folder))
print(('Add speaker %s' % speaker.name))
corpus.add_speaker(speaker)
if os.path.isdir(speaker_folder):
for subfolder in glob((speaker_folder + '/*/')):
audio_file_dict = {}
text_dict = {}
for audio_file in glob((subfolder + '/*.flac')):
file_id = audio_file.split('/')[(- 1)].split('.')[0]
audio_file_dict[file_id] = audio_file
text_file = glob((subfolder + '/*.trans.txt'))[0]
with open(text_file) as f:
for line in f:
(file_id, text) = line.split(' ', 1)
text_dict[file_id] = text.strip()
for file_id in audio_file_dict.keys():
r = Recording()
r.name = file_id
r.audio = audio_file_dict[file_id]
s = Segment()
s.name = file_id
s.speaker_name = speaker.name
s.orth = text_dict[file_id].strip()
audio_file = soundfile.SoundFile(audio_file_dict[file_id])
frames = audio_file._prepare_read(start=0, stop=None, frames=(- 1))
audio_duration = (frames / audio_file.samplerate)
s.start = 0
s.end = audio_duration
r.add_segment(s)
corpus.add_recording(r)
print(('finished: ' + speaker_folder.split('/')[(- 2)]))
corpus.dump(tk.uncached_path(self.out))
def tasks(self):
(yield Task('run', rqmt={'time': 16}))
|
class NamedEntity():
def __init__(self):
super().__init__()
self.name = None
|
class CorpusSection():
def __init__(self):
super().__init__()
self.speaker_name = None
self.default_speaker = None
self.speakers = collections.OrderedDict()
|
class CorpusParser(sax.handler.ContentHandler):
'\n This classes methods are called by the sax-parser whenever it encounters an event in the xml-file\n (tags/characters/namespaces/...). It uses a stack of elements to remember the part of the corpus that\n is currently beeing read.\n '
def __init__(self, corpus, path):
super().__init__()
self.elements = [corpus]
self.path = path
self.chars = ''
def startElement(self, name, attrs):
e = self.elements[(- 1)]
if (name == 'corpus'):
assert (len(self.elements) == 1), '<corpus> may only occur as the root element'
e.name = attrs['name']
elif (name == 'subcorpus'):
assert isinstance(e, Corpus), '<subcorpus> may only occur within a <corpus> or <subcorpus> element'
subcorpus = Corpus()
subcorpus.name = attrs['name']
subcorpus.parent_corpus = e
e.subcorpora.append(subcorpus)
self.elements.append(subcorpus)
elif (name == 'include'):
assert isinstance(e, Corpus), '<include> may only occur within a <corpus> or <subcorpus> element'
path = os.path.join(os.path.dirname(self.path), attrs['file'])
c = Corpus()
c.load(path)
if (c.name != e.name):
print(('Warning: included corpus (%s) has a different name than the current corpus (%s)' % (c.name, e.name)))
e.subcorpora.extend(c.subcorpora)
e.recordings.extend(c.recordings)
e.speakers.update(c.speakers)
elif (name == 'recording'):
assert isinstance(e, Corpus), '<recording> may only occur within a <corpus> or <subcorpus> element'
rec = Recording()
rec.name = attrs['name']
rec.audio = attrs['audio']
rec.corpus = e
e.recordings.append(rec)
self.elements.append(rec)
elif (name == 'segment'):
assert isinstance(e, Recording), '<segment> may only occur within a <recording> element'
seg = Segment()
seg.name = attrs.get('name', str((len(e.segments) + 1)))
seg.start = float(attrs.get('start', '0.0'))
seg.end = float(attrs.get('end', '0.0'))
seg.track = (int(attrs['track']) if ('track' in attrs) else None)
seg.recording = e
e.segments.append(seg)
self.elements.append(seg)
elif (name == 'speaker-description'):
assert isinstance(e, CorpusSection), '<speaker-description> may only occur within a <corpus>, <subcorpus> or <recording>'
speaker = Speaker()
speaker.name = attrs.get('name', None)
if (speaker.name is not None):
e.speakers[speaker.name] = speaker
else:
e.default_speaker = speaker
self.elements.append(speaker)
elif (name == 'speaker'):
assert isinstance(e, (CorpusSection, Segment)), '<speaker> may only occur within a <corpus>, <subcorpus>, <recording> or <segment>'
e.speaker_name = attrs['name']
self.chars = ''
def endElement(self, name):
e = self.elements[(- 1)]
if (name == 'orth'):
assert isinstance(e, Segment)
text = self.chars.strip()
text = re.sub(' +', ' ', text)
text = re.sub('\n', '', text)
e.orth = text
elif (isinstance(e, Speaker) and (name != 'speaker-description')):
e.attribs[name] = self.chars.strip()
if (name in ['corpus', 'subcorpus', 'recording', 'segment', 'speaker-description']):
self.elements.pop()
def characters(self, characters):
self.chars += characters
|
class Corpus(NamedEntity, CorpusSection):
'\n This class represents a corpus in the Bliss format. It is also used to represent subcorpora when the parent_corpus\n attribute is set. Corpora with include statements can be read but are written back as a single file.\n '
def __init__(self):
super().__init__()
self.parent_corpus = None
self.subcorpora = []
self.recordings = []
def segments(self):
'\n @return : an iterator over all segments within the corpus\n '
for r in self.recordings:
(yield from r.segments)
for sc in self.subcorpora:
(yield from sc.segments())
def all_recordings(self):
'\n :rtype: Iterator[Recording]\n '
(yield from self.recordings)
for sc in self.subcorpora:
(yield from sc.all_recordings())
def remove_recording(self, recording):
to_delete = []
for (idx, r) in enumerate(self.recordings):
if ((r is recording) or (r == recording) or (r.name == recording)):
to_delete.append(idx)
for idx in reversed(to_delete):
del self.recordings[idx]
for sc in self.subcorpora:
sc.remove_recording(recording)
def add_recording(self, recording):
assert isinstance(recording, Recording)
recording.corpus = self
self.recordings.append(recording)
def add_subcorpus(self, corpus):
assert isinstance(corpus, Corpus)
corpus.parent_corpus = self
self.subcorpora.append(corpus)
def add_speaker(self, speaker):
assert isinstance(speaker, Speaker)
self.speakers[speaker.name] = speaker
def fullname(self):
if (self.parent_corpus is not None):
return ((self.parent_corpus.fullname() + '/') + self.name)
else:
return self.name
def speaker(self, speaker_name, default_speaker):
if (speaker_name is None):
speaker_name = self.speaker_name
if (speaker_name in self.speakers):
return self.speakers[speaker_name]
else:
if (default_speaker is None):
default_speaker = self.default_speaker
if (self.parent_corpus is not None):
return self.parent_corpus.speaker(speaker_name, default_speaker)
else:
return default_speaker
def load(self, path):
'\n :param str path:\n '
open_fun = (gzip.open if path.endswith('.gz') else open)
with open_fun(path, 'rt') as f:
handler = CorpusParser(self, path)
sax.parse(f, handler)
def dump(self, path):
'\n :param str path:\n '
open_fun = (gzip.open if path.endswith('.gz') else open)
with open_fun(path, 'wt') as f:
f.write('<?xml version="1.0" encoding="utf8"?>\n')
self._dump_internal(f)
def _dump_internal(self, out, indentation=''):
if (self.parent_corpus is None):
out.write(('<corpus name="%s">\n' % self.name))
else:
out.write(('%s<subcorpus name="%s">\n' % (indentation, self.name)))
for s in self.speakers.values():
s.dump(out, (indentation + ' '))
for r in self.recordings:
r.dump(out, (indentation + ' '))
for sc in self.subcorpora:
sc._dump_internal(out, (indentation + ' '))
if (self.parent_corpus is None):
out.write('</corpus>\n')
else:
out.write(('%s</subcorpus>\n' % (indentation,)))
|
class Recording(NamedEntity, CorpusSection):
def __init__(self):
super().__init__()
self.audio = None
self.corpus = None
self.segments = []
def fullname(self):
return ((self.corpus.fullname() + '/') + self.name)
def speaker(self, speaker_name=None):
if (speaker_name is None):
speaker_name = self.speaker_name
if (speaker_name in self.speakers):
return self.speakers[speaker_name]
else:
return self.corpus.speaker(speaker_name, self.default_speaker)
def dump(self, out, indentation=''):
out.write(('%s<recording name="%s" audio="%s">\n' % (indentation, self.name, self.audio)))
for s in self.speakers.values():
s.dump(out, (indentation + ' '))
for s in self.segments:
s.dump(out, (indentation + ' '))
out.write(('%s</recording>\n' % indentation))
def add_segment(self, segment):
assert isinstance(segment, Segment)
segment.recording = self
self.segments.append(segment)
|
class Segment(NamedEntity):
def __init__(self):
super().__init__()
self.start = 0.0
self.end = 0.0
self.track = None
self.orth = None
self.speaker_name = None
self.recording = None
def fullname(self):
return ((self.recording.fullname() + '/') + self.name)
def speaker(self):
return self.recording.speaker(self.speaker_name)
def dump(self, out, indentation=''):
has_child_element = ((self.orth is not None) or (self.speaker_name is not None))
t = ((' track="%d"' % self.track) if (self.track is not None) else '')
new_line = ('\n' if has_child_element else '')
out.write(('%s<segment name="%s" start="%.4f" end="%.4f"%s>%s' % (indentation, self.name, self.start, self.end, t, new_line)))
if (self.speaker_name is not None):
out.write(('%s <speaker name="%s"/>\n' % (indentation, self.speaker_name)))
if (self.orth is not None):
out.write(('%s <orth> %s </orth>\n' % (indentation, saxutils.escape(self.orth))))
if has_child_element:
out.write(('%s</segment>\n' % indentation))
else:
out.write('</segment>\n')
|
class Speaker(NamedEntity):
def __init__(self):
super().__init__()
self.attribs = {}
def dump(self, out, indentation=''):
out.write(('%s<speaker-description%s>' % (indentation, ((' name="%s"' % self.name) if (self.name is not None) else ''))))
if (len(self.attribs) > 0):
out.write('\n')
for (k, v) in self.attribs.items():
out.write(('%s <%s>%s</%s>\n' % (indentation, k, v, k)))
out.write(('%s</speaker-description>\n' % (indentation if (len(self.attribs) > 0) else '')))
|
class SegmentMap(object):
def __init__(self):
self.map_entries = []
def load(self, path):
'\n :param str path:\n '
open_fun = (gzip.open if path.endswith('.gz') else open)
with open_fun(path, 'rb') as f:
for (event, elem) in ET.iterparse(f, events=('start',)):
if (elem.tag == 'map-item'):
elem = elem
item = SegmentMapItem()
item.key = elem.attrib['key']
item.value = elem.attrib['value']
self.map_entries.append(item)
def dump(self, path):
'\n :param str path:\n '
open_fun = (gzip.open if path.endswith('.gz') else open)
with open_fun(path, 'wt') as f:
f.write('<?xml version="1.0" encoding="utf8"?>\n')
f.write('<segment-key-map>\n')
for s in self.map_entries:
s.dump(f)
f.write('</segment-key-map>\n')
|
class SegmentMapItem(object):
def __init__(self):
self.key = None
self.value = None
def dump(self, out):
out.write(('<map-item key="%s" value="%s" />\n' % (self.key, self.value)))
|
def instanciate_vars(o):
if isinstance(o, Variable):
o = o.get()
elif isinstance(o, list):
for k in range(len(o)):
o[k] = instanciate_vars(o[k])
elif isinstance(o, tuple):
o = tuple((instanciate_vars(e) for e in o))
elif isinstance(o, dict):
for k in o:
o[k] = instanciate_vars(o[k])
return o
|
class RETURNNConfig():
PYTHON_CODE = textwrap.dedent(' #!rnn.py\n\n ${REGULAR_CONFIG}\n\n locals().update(**config)\n\n ${EXTRA_PYTHON_CODE}\n ')
def __init__(self, config, post_config, extra_python_code='', extra_python_hash=None):
self.config = config
self.post_config = post_config
self.extra_python_code = extra_python_code
self.extra_python_hash = (extra_python_hash if (extra_python_hash is not None) else extra_python_code)
def get(self, key, default=None):
if (key in self.post_config):
return self.post_config[key]
return self.config.get(key, default)
def write(self, path):
config = self.config
config.update(self.post_config)
config = instanciate_vars(config)
config_lines = []
unreadable_data = {}
pp = pprint.PrettyPrinter(indent=2, width=150)
for (k, v) in sorted(config.items()):
if pprint.isreadable(v):
config_lines.append(('%s = %s' % (k, pp.pformat(v))))
elif isinstance(v, tk.Path):
unreadable_data[k] = v
if (len(unreadable_data) > 0):
config_lines.append('import json')
json_data = json.dumps(unreadable_data).replace('"', '\\"')
config_lines.append(('config = json.loads("%s")' % json_data))
else:
config_lines.append('config = {}')
python_code = string.Template(self.PYTHON_CODE).substitute({'REGULAR_CONFIG': '\n'.join(config_lines), 'EXTRA_PYTHON_CODE': self.extra_python_code})
with open(path, 'wt', encoding='utf-8') as f:
f.write(python_code)
def hash(self):
return {'returnn_config': self.config, 'extra_python_hash': self.extra_python_hash}
|
class WriteRETURNNConfigJob(Job):
def __init__(self, returnn_config):
assert isinstance(returnn_config, RETURNNConfig)
self.returnn_config = returnn_config
self.returnn_config_file = self.output_path('returnn.config')
def tasks(self):
(yield Task('run', resume='run', mini_task=True))
def run(self):
self.returnn_config.write(self.returnn_config_file.get_path())
@classmethod
def hash(self, kwargs):
return super().hash(kwargs['returnn_config'].hash())
|
class ExtractDatasetStats(Job):
def __init__(self, config, returnn_python_exe=RETURNN_PYTHON_EXE, returnn_root=RETURNN_SRC_ROOT):
self.config = RETURNNConfig(config, {})
self.crnn_python_exe = returnn_python_exe
self.crnn_root = returnn_root
self.mean = self.output_var('mean_var')
self.std_dev = self.output_var('std_dev_var')
self.mean_file = self.output_path('mean')
self.std_dev_file = self.output_path('std_dev')
def tasks(self):
(yield Task('run', rqmt={'cpu': 1, 'mem': 4, 'time': 4}, mini_task=True))
def run(self):
self.config.write('crnn.config')
with open('rnn.sh', 'wt') as f:
f.write(('#!/usr/bin/env bash\n%s' % ' '.join([tk.uncached_path(self.crnn_python_exe), os.path.join(tk.uncached_path(self.crnn_root), 'tools/dump-dataset.py'), 'crnn.config', '--endseq -1', '--stats', '--dump_stats stats'])))
os.chmod('rnn.sh', ((((((stat.S_IRUSR | stat.S_IRGRP) | stat.S_IROTH) | stat.S_IWUSR) | stat.S_IXUSR) | stat.S_IXGRP) | stat.S_IXOTH))
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'UTF-8'
subprocess.check_call(['./rnn.sh'], env=env)
self.sh('cp stats.mean.txt {mean_file}')
self.sh('cp stats.std_dev.txt {std_dev_file}')
total_mean = 0
total_var = 0
mean_file = open('stats.mean.txt')
std_dev_file = open('stats.std_dev.txt')
for (i, (mean, std_dev)) in enumerate(zip(mean_file, std_dev_file)):
mean = float(mean)
var = (float(std_dev.strip()) ** 2)
print(var)
total_mean = (((total_mean * i) + mean) / (i + 1))
total_var = ((((total_var * i) + var) + ((((total_mean - mean) ** 2) * i) / (i + 1))) / (i + 1))
print(total_var)
self.mean.set(total_mean)
self.std_dev.set(numpy.sqrt(total_var))
print(numpy.sqrt(total_var))
|
class RETURNNForwardFromFile(RETURNNJob):
def __init__(self, returnn_config_file, parameter_dict, hdf_outputs, time_rqmt=4, mem_rqmt=4, returnn_python_exe=RETURNN_PYTHON_EXE, returnn_root=RETURNN_SRC_ROOT):
super().__init__(parameter_dict, returnn_config_file, returnn_python_exe, returnn_root)
self.rqmt = {'gpu': 1, 'cpu': 2, 'mem': mem_rqmt, 'time': time_rqmt}
self.parameter_dict['forward_override_hdf_output'] = True
self.outputs = {}
for output in hdf_outputs:
self.outputs[output] = self.output_path((output + '.hdf'))
def update(self):
if (('ext_model' in self.parameter_dict.keys()) and ('ext_load_epoch' in self.parameter_dict.keys())):
epoch = self.parameter_dict['ext_load_epoch']
epoch = (epoch.get() if isinstance(epoch, tk.Variable) else epoch)
model_dir = self.parameter_dict['ext_model']
if isinstance(model_dir, tk.Path):
self.add_input(Path((str(model_dir) + ('/epoch.%03d.index' % epoch)), creator=model_dir.creator))
else:
self.add_input(Path((str(model_dir) + ('/epoch.%03d.index' % epoch))))
def tasks(self):
(yield Task('create_files', mini_task=True))
(yield Task('run', resume='run', rqmt=self.rqmt))
def run(self):
super().run()
for (k, v) in self.outputs.items():
self.sh(('mv %s %s' % ((k + '.hdf'), v)))
self.sh('rm dump*', pipefail=False, except_return_codes=(0, 1))
self.sh('rm *.hdf', pipefail=False, except_return_codes=(0, 1))
@classmethod
def hash(cls, kwargs):
d = {'returnn_config_file': kwargs['returnn_config_file'], 'parameter_dict': kwargs['parameter_dict'], 'returnn_python_exe': kwargs['returnn_python_exe'], 'returnn_root': kwargs['returnn_root']}
return super().hash(d)
|
class RETURNNJob(Job):
'\n Provides common functions for the returnn jobs\n '
def __init__(self, parameter_dict, returnn_config_file, returnn_python_exe, returnn_root):
'\n\n :param dict parameter_dict:\n :param Path returnn_config_file:\n :param Path|str returnn_python_exe:\n :param Path|str returnn_root:\n '
self.returnn_config_file_in = returnn_config_file
self.returnn_config_file = self.output_path('returnn.config')
self.parameter_dict = parameter_dict
if (self.parameter_dict is None):
self.parameter_dict = {}
self.returnn_python_exe = returnn_python_exe
self.returnn_root = returnn_root
def get_parameter_list(self):
parameter_list = []
for (k, v) in sorted(self.parameter_dict.items()):
if isinstance(v, tk.Variable):
v = str(v.get())
elif isinstance(v, tk.Path):
v = tk.uncached_path(v)
elif isinstance(v, list):
v = ('"%s"' % str(v).replace(' ', ''))
else:
v = str(v)
if ((k == 'ext_model') and (not v.endswith('/epoch'))):
v = (v + '/epoch')
if v.startswith('-'):
v = ('-- ' + v)
parameter_list.append(('++%s' % k))
parameter_list.append(v)
return parameter_list
def create_files(self):
shutil.copy(tk.uncached_path(self.returnn_config_file_in), tk.uncached_path(self.returnn_config_file))
parameter_list = self.get_parameter_list()
with open('rnn.sh', 'wt') as f:
f.write(('#!/usr/bin/env bash\n%s' % ' '.join(([tk.uncached_path(self.returnn_python_exe), os.path.join(tk.uncached_path(self.returnn_root), 'rnn.py'), self.returnn_config_file.get_path()] + parameter_list))))
os.chmod('rnn.sh', ((((((stat.S_IRUSR | stat.S_IRGRP) | stat.S_IROTH) | stat.S_IWUSR) | stat.S_IXUSR) | stat.S_IXGRP) | stat.S_IXOTH))
def run(self):
sp.check_call(['./rnn.sh'])
|
def main():
argparser = ArgumentParser()
argparser.add_argument('file', help="by Returnn search, in 'py' format")
argparser.add_argument('--out', required=True, help='output filename')
args = argparser.parse_args()
d = eval(open(args.file, 'r').read())
assert isinstance(d, dict)
assert (not os.path.exists(args.out))
with open(args.out, 'w') as out:
out.write('{\n')
for (seq_tag, txt) in sorted(d.items()):
if ('#' in seq_tag):
print(seq_tag)
tag_split = seq_tag.split('/')
(recording_name, segment_name) = tag_split[2].split('#')
seq_tag = ((((tag_split[0] + '/') + recording_name) + '/') + segment_name)
print(seq_tag)
out.write(('%r: %r,\n' % (seq_tag, txt.replace('@@ ', ''))))
out.write('}\n')
print('# Done.')
|
class SeqInfo():
__slots__ = ('idx', 'tag', 'orth_raw', 'orth_seq', 'audio_path', 'audio_start', 'audio_end', 'rec_name')
|
def parse_bliss_xml(filename):
'\n This takes e.g. around 5 seconds for the Switchboard 300h train corpus.\n Should be as fast as possible to get a list of the segments.\n All further parsing and loading can then be done in parallel and lazily.\n :param str filename:\n :param boolean use_compressed_corpus:\n :return: nothing, fills self._segments\n '
import gzip
import xml.etree.ElementTree as etree
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(etree.iterparse(corpus_file, events=('start', 'end')))
elem_tree = []
name_tree = []
cur_recording = None
cur_recording_name = None
cur_recording_seg_nr = 0
idx = 0
seqs = []
for (event, elem) in context:
if (event == 'start'):
elem_tree += [elem]
if (elem.tag == 'segment'):
cur_recording_seg_nr += 1
if (('name' not in elem.attrib) and (elem.tag == 'segment')):
name_tree.append(str(cur_recording_seg_nr))
else:
name_tree.append(elem.attrib.get('name', None))
if (elem.tag == 'recording'):
cur_recording = elem.attrib['audio']
cur_recording_name = elem.attrib['name']
cur_recording_seg_nr = 0
if ((event == 'end') and (elem.tag == 'segment')):
info = SeqInfo()
info.idx = idx
info.tag = '/'.join(name_tree)
info.orth_raw = elem.find('orth').text
info.audio_path = cur_recording
info.audio_start = float(elem.attrib['start'])
info.audio_end = float(elem.attrib['end'])
info.rec_name = cur_recording_name
seqs.append(info)
idx += 1
if elem_tree:
elem_tree[0].clear()
if (event == 'end'):
assert (elem_tree[(- 1)] is elem)
elem_tree = elem_tree[:(- 1)]
name_tree = name_tree[:(- 1)]
return seqs
|
def main():
argparser = ArgumentParser()
argparser.add_argument('file', help="by Returnn search, in 'py' format, words")
argparser.add_argument('--corpus', required=True, help='Bliss XML corpus')
argparser.add_argument('--out', required=True, help='output CTM filename')
argparser.add_argument('--only-segment-name', action='store_true', default=False)
args = argparser.parse_args()
d = eval(open(args.file, 'r').read())
assert isinstance(d, dict)
corpus = parse_bliss_xml(args.corpus)
"\n example CTM:\n\n ;; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n ;; hub5_00/en_4156a/1 (301.85-302.48)\n en_4156a 1 301.850 0.110 oh 0.9505\n en_4156a 1 301.960 0.510 yeah 0.9966\n ;; hub5_00/en_4156a/2 (304.71-306.72)\n en_4156a 1 304.710 0.170 well 0.6720\n en_4156a 1 304.880 0.130 i'm 0.9975\n en_4156a 1 305.010 0.130 going 0.7758\n en_4156a 1 305.140 0.060 to 0.7755\n en_4156a 1 305.200 0.190 have 0.9999\n en_4156a 1 305.680 0.060 to 0.2071\n en_4156a 1 305.740 0.140 do 0.4953\n en_4156a 1 305.880 0.230 more 0.9929\n en_4156a 1 306.110 0.600 classes 0.9957\n ;; hub5_00/en_4156a/3 (307.63-311.16)\n en_4156a 1 307.690 0.290 now 0.9262\n en_4156a 1 307.980 0.210 i'm 0.8732\n ...\n "
assert (not os.path.exists(args.out))
with open(args.out, 'w') as out:
out.write(';; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n')
for seq in corpus:
out.write((';; %s (%f-%f)\n' % (seq.tag, seq.audio_start, seq.audio_end)))
if args.only_segment_name:
seq.tag = seq.tag.split('/')[(- 1)]
words = d[seq.tag].split()
avg_dur = (((seq.audio_end - seq.audio_start) * 0.9) / max(len(words), 1))
for i in range(len(words)):
out.write(('%s 1 %f %f %s 0.99\n' % (seq.rec_name, (seq.audio_start + (i * avg_dur)), avg_dur, words[i])))
print('# Done.')
|
class RETURNNSearchFromFile(RETURNNJob):
'\n Run a returnn search directly from a prepared config file.\n\n Currently it requires "ext_model" and "ext_load_epoch" to be set.\n '
def __init__(self, returnn_config_file, parameter_dict, output_mode='py', time_rqmt=4, mem_rqmt=4, returnn_python_exe=RETURNN_PYTHON_EXE, returnn_root=RETURNN_SRC_ROOT):
super().__init__(parameter_dict, returnn_config_file, returnn_python_exe, returnn_root)
self.rqmt = {'gpu': 1, 'cpu': 2, 'mem': mem_rqmt, 'time': time_rqmt}
assert (output_mode in ['py', 'txt'])
self.out = self.output_path(('search_out.%s' % output_mode))
self.parameter_dict['search_output_file'] = tk.uncached_path(self.out)
self.parameter_dict['search_output_file_format'] = output_mode
def update(self):
if (('ext_model' in self.parameter_dict.keys()) and ('ext_load_epoch' in self.parameter_dict.keys())):
epoch = self.parameter_dict['ext_load_epoch']
epoch = (epoch.get() if isinstance(epoch, tk.Variable) else epoch)
model_dir = self.parameter_dict['ext_model']
if isinstance(model_dir, tk.Path):
self.add_input(Path((str(model_dir) + ('/epoch.%03d.index' % epoch)), creator=model_dir.creator))
else:
self.add_input(Path((str(model_dir) + ('/epoch.%03d.index' % epoch))))
def tasks(self):
(yield Task('create_files', mini_task=True))
(yield Task('run', resume='run', rqmt=self.rqmt))
@classmethod
def hash(cls, kwargs):
d = {'returnn_config_file': kwargs['returnn_config_file'], 'parameter_dict': kwargs['parameter_dict'], 'returnn_python_exe': kwargs['returnn_python_exe'], 'returnn_root': kwargs['returnn_root'], 'output_mode': kwargs['output_mode']}
return super().hash(d)
|
class GetBestEpoch(Job):
def __init__(self, model_dir, learning_rates, index=0, key=None):
self.model_dir = model_dir
self.learning_rates = learning_rates
self.index = index
self.out_var = self.output_var('epoch')
self.key = key
assert ((index >= 0) and isinstance(index, int))
def run(self):
def EpochData(learningRate, error):
return {'learning_rate': learningRate, 'error': error}
with open(self.learning_rates.get_path(), 'rt') as f:
text = f.read()
import math
data = eval(text, {'inf': math.inf, 'nan': math.nan, 'EpochData': EpochData})
epochs = list(sorted(data.keys()))
if (self.key == None):
dev_score_keys = [k for k in data[epochs[(- 1)]]['error'] if k.startswith('dev_score')]
dsk = dev_score_keys[0]
else:
dsk = self.key
dev_scores = [(epoch, data[epoch]['error'][dsk]) for epoch in epochs if (dsk in data[epoch]['error'])]
sorted_scores = list(sorted(dev_scores, key=(lambda x: x[1])))
self.out_var.set(sorted_scores[self.index][0])
def tasks(self):
(yield Task('run', mini_task=True))
|
class SearchBPEtoWords(Job):
'\n Converts BPE Search output from returnn into words\n :param search_output:\n :param script:\n '
def __init__(self, search_output_bpe, script=Path('scripts/search-bpe-to-words.py')):
self.search_output_bpe = search_output_bpe
self.script = script
self.out = self.output_path('search_output.words')
def run(self):
self.sh('python3 {script} {search_output_bpe} --out {out}')
def tasks(self):
(yield Task('run', mini_task=True))
|
class SearchWordsToCTM(Job):
'\n Converts search output (in words) from returnn into a ctm file\n :param search_output:\n :param script:\n '
__sis_hash_exclude__ = {'only_segment_name': False}
def __init__(self, search_output_words, corpus, only_segment_name=False, script=Path('scripts/search-words-to-ctm.py')):
self.search_output_words = search_output_words
self.corpus = corpus
self.script = script
self.only_segment_name = only_segment_name
self.out = self.output_path('search_output.ctm')
def run(self):
self.sh(('python3 {script} {search_output_words} --corpus {corpus} %s --out {out}' % ('--only-segment-name' if self.only_segment_name else '')))
def tasks(self):
(yield Task('run', mini_task=True))
|
class ReturnnScore(Job):
def __init__(self, hypothesis, reference, returnn_python_exe=RETURNN_PYTHON_EXE, returnn_root=RETURNN_SRC_ROOT):
self.set_attrs(locals())
self.out = self.output_path('wer')
def run(self):
call = [str(self.returnn_python_exe), os.path.join(str(self.returnn_root), 'tools/calculate-word-error-rate.py'), '--expect_full', '--hyps', str(self.hypothesis), '--refs', str(self.reference), '--out', str(self.out)]
print(('run %s' % ' '.join(call)))
sp.check_call(call)
def tasks(self):
(yield Task('run', mini_task=True))
|
class RETURNNModel():
def __init__(self, crnn_config_file, model, epoch):
self.crnn_config_file = crnn_config_file
self.model = model
self.epoch = epoch
|
class RETURNNTrainingFromFile(RETURNNJob):
'\n The Job allows to directly execute returnn config files. The config files have to have the line\n ext_model = config.value("ext_model", None) and set model = ext_model to correctly set the model path\n\n If the learning rate file should be available, add\n ext_learning_rate_file = config.value("ext_learning_rate_file", None) and\n set learning_rate_file = ext_learning_rate_file\n\n Other externally controllable parameters may also defined in the same way, and can be set by providing the parameter\n value in the parameter_dict. The "ext_" prefix is used for naming convention only, and is not mandatory.\n\n Also make sure that task="train" is set.\n '
def __init__(self, returnn_config_file, parameter_dict, time_rqmt=72, mem_rqmt=8, returnn_python_exe=RETURNN_PYTHON_EXE, returnn_root=RETURNN_SRC_ROOT):
'\n\n :param tk.Path|str returnn_config_file: a returnn training config file\n :param dict parameter_dict: provide external parameters to the rnn.py call\n :param int|str time_rqmt:\n :param int|str mem_rqmt:\n :param tk.Path|str returnn_python_exe: the executable for running returnn\n :param tk.Path |str returnn_root: the path to the returnn source folder\n '
super().__init__(parameter_dict, returnn_config_file, returnn_python_exe, returnn_root)
self.rqmt = {'gpu': 1, 'cpu': 2, 'mem': mem_rqmt, 'time': time_rqmt}
self.learning_rates = self.output_path('learning_rates')
self.model_dir = self.output_path('models', directory=True)
self.models = None
if ('ext_num_epochs' in parameter_dict):
self.models = {k: RETURNNModel(self.returnn_config_file, self.output_path(('models/epoch.%.3d.meta' % k)), k) for k in range(1, (parameter_dict['ext_num_epochs'] + 1))}
self.parameter_dict['ext_model'] = (tk.uncached_path(self.model_dir) + '/epoch')
self.parameter_dict['ext_learning_rate_file'] = tk.uncached_path(self.learning_rates)
def tasks(self):
(yield Task('create_files', mini_task=True))
(yield Task('run', resume='run', rqmt=self.rqmt))
def path_available(self, path):
res = super().path_available(path)
if res:
return res
if (path == self.learning_rates):
return super().path_available(path)
res = os.path.exists(path.get_path())
if res:
return res
file = os.path.basename(path.get_path())
directory = os.path.dirname(path.get_path())
if file.startswith('epoch.'):
segments = file.split('.')
pretrain_file = '.'.join([segments[0], 'pretrain', segments[1]])
pretrain_path = os.path.join(directory, pretrain_file)
return os.path.exists(pretrain_path)
return False
@classmethod
def hash(cls, kwargs):
d = {'returnn_config_file': kwargs['returnn_config_file'], 'parameter_dict': kwargs['parameter_dict'], 'returnn_python_exe': kwargs['returnn_python_exe'], 'returnn_root': kwargs['returnn_root']}
return super().hash(d)
|
class BuildCharacterVocabulary(Job):
'\n Build a character vocbulary for Returnn\n '
def __init__(self, languages=['en'], uppercase=False):
'\n\n :param list[str] languages:\n '
self.languages = languages
self.uppercase = uppercase
self.out = self.output_path('orth_vocab.pkl')
self.vocab_size = self.output_var('vocab_length')
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
_pad = '_'
_eos = '~'
_space = ' '
_characters = 'abcdefghijklmnñopqrstuvwxyzàèìòùáéíóúïü!\'"(),-.:;?'
if ('es' in self.languages):
_characters += '¿¡'
if ('de' in self.languages):
_characters += 'äöüß'
if ('it' in self.languages):
_characters += 'îû'
if ('ca' in self.languages):
_characters += 'ç'
if ('fr' in self.languages):
_characters += 'çæœÿêôë'
symbols = ([_pad, _eos, _space] + list((c for c in _characters)))
if self.uppercase:
symbols = [s.upper() for s in symbols]
vocab = {k: v for (v, k) in enumerate(symbols)}
pickle.dump(vocab, open(tk.uncached_path(self.out), 'wb'))
print(('Vocab Size: %i' % len(symbols)))
self.vocab_size.set(len(symbols))
|
class Concatenate(Job):
' Concatenate all given input files '
def __init__(self, inputs):
assert inputs
if isinstance(inputs, set):
inputs = list(inputs)
inputs.sort(key=(lambda x: str(x)))
assert isinstance(inputs, list)
if (len(inputs) == 1):
self.out = inputs.pop()
else:
self.out = self.output_path('out.gz')
for input in inputs:
assert (isinstance(input, Path) or isinstance(input, str)), 'input to Concatenate is not a valid path'
self.inputs = inputs
def run(self):
self.f_list = ' '.join((str(i) for i in self.inputs))
self.sh('zcat -f {f_list} | gzip > {out}')
def tasks(self):
(yield Task('run', rqmt={'mem': 3, 'time': 3}))
|
class PP_Module(object):
def __init__(self, **kwargs):
pass
def process(self, orth: str):
return orth
|
class Lowercase(PP_Module):
def process(self, orth: str):
return orth.lower()
|
class Uppercase(PP_Module):
def process(self, orth: str):
return orth.upper()
|
class End_Token(PP_Module):
def __init__(self, token):
super().__init__()
self.token = token
assert ((len(token) == 1) and isinstance(token, str))
def process(self, orth: str):
return (orth + self.token)
|
class RemoveSymbol(PP_Module):
def __init__(self, symbol):
super().__init__()
self.symbol = symbol
assert (len(symbol) == 1)
def process(self, orth: str):
return orth.replace(self.symbol, '')
|
class RemovePunctuation(PP_Module):
def __init__(self):
super().__init__()
self.converter = str.maketrans('', '', string.punctuation)
def process(self, orth: str):
return orth.translate(self.converter)
|
class RegexReplace(PP_Module):
def __init__(self, search, replace):
super().__init__()
self.regex_search = search
self.regex_replace = replace
def process(self, orth: str):
return re.sub(self.regex_search, self.regex_replace, orth)
|
class ProcessBlissText(Job):
'\n Provides a set of processing modules to process the orth tags in a bliss corpus file\n '
def __init__(self, corpus, process_list, vocabulary=None):
'\n\n :param Path corpus: path to the corpus file\n :param list[(str, dict)] process_list: list of module tuples with (module_name, parameter_dict)\n :param Path vocabulary: a character vocabulary to be used as whitelist\n '
self.corpus = corpus
self.process_list = process_list
self.out = self.output_path('pp_corpus.xml.gz')
self.vocabulary = vocabulary
self.module_instances = []
for (module, params) in process_list:
assert (module in module_dict.keys()), ('module %s was not in the list of available modules %s' % (module, sorted(module_dict.keys())))
module_instance = module_dict[module](**params)
self.module_instances.append(module_instance)
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
corpus = Corpus()
corpus.load(tk.uncached_path(self.corpus))
if self.vocabulary:
vocab = pickle.load(open(tk.uncached_path(self.vocabulary), 'rb'))
for recording in corpus.all_recordings():
for segment in recording.segments:
orth = segment.orth.strip()
for module_instance in self.module_instances:
orth = module_instance.process(orth)
if self.vocabulary:
orth = ''.join([c for c in orth if (c in vocab.keys())])
segment.orth = orth
corpus.dump(tk.uncached_path(self.out))
|
class BlissExtractRawText(Job):
'\n Extract the Text from a Bliss corpus into a raw gziptext file\n '
def __init__(self, corpus, segments=None, segment_key_only=True):
self.corpus_path = corpus
self.out = self.output_path('text.gz')
self.segments_file_path = segments
self.segment_key_only = segment_key_only
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
import gzip
corpus = Corpus()
corpus.load(tk.uncached_path(self.corpus_path))
outfile = gzip.open(tk.uncached_path(self.out), 'wt')
segments = None
if self.segments_file_path:
if tk.uncached_path(self.segments_file_path).endswith('gz'):
segment_file = gzip.open(tk.uncached_path(self.segments_file_path), 'rb')
else:
segment_file = open(tk.uncached_path(self.segments_file_path), 'rt')
segments = [line.decode().strip() for line in segment_file]
for recording in corpus.all_recordings():
for segment in recording.segments:
full_segment_key = '/'.join([corpus.name, recording.name, segment.name])
if segments:
if (full_segment_key not in segments):
continue
orth = (segment.orth.strip() + '\n')
outfile.write(orth)
outfile.close()
|
class BlissExtractTextDictionary(Job):
'\n Extract the Text from a Bliss corpus to fit the "{key: text}" structure\n '
def __init__(self, corpus, segments=None, segment_key_only=True):
'\n\n :param corpus: bliss corpus file\n :param segments: a segment file as optional whitelist\n :param segment_key_only: if true, only the segment is used as key, instead of corpus/recording/segment\n '
self.corpus_path = corpus
self.out = self.output_path('text_dictionary.py')
self.segments_file_path = segments
self.segment_key_only = segment_key_only
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
import pprint
corpus = Corpus()
corpus.load(tk.uncached_path(self.corpus_path))
dictionary = {}
segments = None
if self.segments_file_path:
if tk.uncached_path(self.segments_file_path).endswith('gz'):
segment_file = gzip.open(tk.uncached_path(self.segments_file_path), 'rb')
else:
segment_file = open(tk.uncached_path(self.segments_file_path), 'rt')
segments = [line.decode().strip() for line in segment_file]
for recording in corpus.all_recordings():
for segment in recording.segments:
orth = segment.orth.strip()
full_segment_key = '/'.join([corpus.name, recording.name, segment.name])
key = (segment.name if self.segment_key_only else full_segment_key)
if segments:
if (full_segment_key not in segments):
continue
dictionary[key] = orth
dictionary_string = pprint.pformat(dictionary, width=1000)
outfile = open(tk.uncached_path(self.out), 'wt')
outfile.write(dictionary_string)
outfile.close()
|
class CreateSubwordsAndVocab(Job):
def __init__(self, text, num_segments, subword_nmt=SUBWORD_NMT_DIR):
self.text = text
self.num_segments = num_segments
self.subword_nmt = subword_nmt
self.out_bpe = self.output_path('bpe.codes')
self.out_vocab = self.output_path('bpe.vocab')
self.out_vocab_size = self.output_var('bpe_vocab_size')
def run(self):
with tk.mktemp() as self.tmp:
self.sh('zcat -f `cf {text}` > {tmp}')
self.sh('python3 {subword_nmt}/learn_bpe.py -s {num_segments} --input {tmp} --output {out_bpe}')
self.sh('python3 {subword_nmt}/create-py-vocab.py --txt {tmp} --bpe {out_bpe} --unk "<unk>" --out {out_vocab}')
self.sh('rm {tmp}')
vocab = eval(open(str(self.out_vocab)).read())
self.out_vocab_size.set(len(set(vocab.values())))
def tasks(self):
rqmt = {'cpu': 1, 'mem': 16, 'time': 8}
(yield Task('run', rqmt=rqmt))
|
class DistributeSpeakerEmbeddings(Job):
'\n distribute speaker embeddings contained in an hdf file to a new hdf file with mappings to the given bliss corpus\n '
def __init__(self, bliss_corpus, speaker_embedding_hdf, options=None, use_full_seq_name=False):
self.bliss_corpus = bliss_corpus
self.speaker_embedding_hdf = speaker_embedding_hdf
self.use_full_seq_name = use_full_seq_name
self.options = options
if (self.options is None):
self.options = {'mode': 'random'}
assert (self.options['mode'] in ['random', 'length_buckets']), ('invalid mode %s' % options['mode'])
self.out = self.output_path('speaker_embeddings.hdf')
def tasks(self):
(yield Task('run', mini_task=True))
def _random(self):
if ('seed' in self.options):
random.seed(self.options['seed'])
random.shuffle(self.speaker_embedding_features)
embedding_index = 0
for recording in self.c.recordings:
assert (len(recording.segments) == 1)
segment = recording.segments[0]
segment_name = '/'.join([self.c.name, recording.name, segment.name])
if (not self.use_full_seq_name):
segment_name = segment.name
self.hdf_writer.insert_batch(numpy.asarray([self.speaker_embedding_features[embedding_index]]), [1], [segment_name])
embedding_index += 1
if (embedding_index >= len(self.speaker_embedding_features)):
embedding_index = 0
def _random_matching_length(self):
text_corpus = corpus.Corpus()
assert (len(text_corpus.subcorpora) == 0)
text_corpus.load(tk.uncached_path(self.options['corpus']))
text_durations = {}
max_duration = 0
for recording in text_corpus.recordings:
assert (len(recording.segments) == 1)
segment = recording.segments[0]
segment_name = '/'.join([self.c.name, recording.name, segment.name])
if (not self.use_full_seq_name):
segment_name = segment.name
seg_len = len(segment.orth)
text_durations[segment_name] = seg_len
if (seg_len > max_duration):
max_duration = seg_len
bucket_size = int(self.options['bucket_size'])
buckets = [[] for i in range(0, (max_duration + bucket_size), bucket_size)]
bucket_indices = ([0] * len(buckets))
for (tag, feature) in zip(self.speaker_embedding_tags, self.speaker_embedding_features):
buckets[(text_durations[tag] // bucket_size)].append(feature)
for bucket in buckets:
random.shuffle(bucket)
for recording in self.c.recordings:
assert (len(recording.segments) == 1)
segment = recording.segments[0]
segment_name = '/'.join([self.c.name, recording.name, segment.name])
if (not self.use_full_seq_name):
segment_name = segment.name
target_bucket = (len(segment.orth) // bucket_size)
for i in range(1000):
if ((0 <= (target_bucket + i) < len(buckets)) and (len(buckets[(target_bucket + i)]) > 0)):
target_bucket = (target_bucket + i)
break
if ((0 <= (target_bucket - i) < len(buckets)) and (len(buckets[(target_bucket - i)]) > 0)):
target_bucket = (target_bucket - i)
break
speaker_embedding = buckets[target_bucket][bucket_indices[target_bucket]]
self.hdf_writer.insert_batch(numpy.asarray([speaker_embedding]), [1], [segment_name])
bucket_indices[target_bucket] += 1
if (bucket_indices[target_bucket] >= len(buckets[target_bucket])):
bucket_indices[target_bucket] = 0
def run(self):
speaker_embedding_data = h5py.File(tk.uncached_path(self.speaker_embedding_hdf), 'r')
speaker_embedding_inputs = speaker_embedding_data['inputs']
speaker_embedding_raw_tags = speaker_embedding_data['seqTags']
speaker_embedding_lengths = speaker_embedding_data['seqLengths']
self.speaker_embedding_features = []
self.speaker_embedding_tags = []
offset = 0
for (tag, length) in zip(speaker_embedding_raw_tags, speaker_embedding_lengths):
self.speaker_embedding_features.append(speaker_embedding_inputs[offset:(offset + length[0])])
self.speaker_embedding_tags.append((tag.decode() if isinstance(tag, bytes) else tag))
offset += length[0]
self.hdf_writer = SimpleHDFWriter(tk.uncached_path(self.out), dim=self.speaker_embedding_features[0].shape[(- 1)])
self.c = corpus.Corpus()
self.c.load(tk.uncached_path(self.bliss_corpus))
assert (len(self.c.subcorpora) == 0)
mode = self.options.get('mode')
if (mode == 'random'):
self._random()
elif (mode == 'length_buckets'):
self._random_matching_length()
else:
assert False
self.hdf_writer.close()
|
class VerifyCorpus(Job):
'\n verifies the audio files of a bliss corpus by loading it with the soundfile library\n '
def __init__(self, bliss_corpus, channels=1, sample_rate=16000):
self.bliss_corpus = bliss_corpus
self.channels = channels
self.sample_rate = sample_rate
self.out = self.output_path('errors.log')
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
import soundfile
c = corpus.Corpus()
c.load(tk.uncached_path(self.bliss_corpus))
out_file = open(tk.uncached_path(self.out), 'wt')
success = True
for r in c.all_recordings():
try:
(audio, sr) = soundfile.read(open(r.audio, 'rb'))
if (self.channels == 1):
assert (len(audio.shape) == 1)
else:
assert (audio.shape[1] == self.channels)
assert (sr == self.sample_rate)
except Exception as e:
print(('error in file %s: %s' % (r.audio, str(e))))
out_file.write(('error in file %s: %s\n' % (r.audio, str(e))))
success = False
assert success, 'there was an error, please see error.log'
|
class ConvertFeatures(Job):
'\n Convert output features of a decoding job that have merged frames to single frames\n '
def __init__(self, stacked_hdf_features, conversion_factor):
'\n\n :param Path stacked_hdf_features: hdf features with stacked frames\n :param int conversion_factor: the number of frames per stack\n '
self.stacked_hdf_features = stacked_hdf_features
self.conversion_factor = conversion_factor
self.out = self.output_path('features.hdf')
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
input_data = h5py.File(tk.uncached_path(self.stacked_hdf_features), 'r')
inputs = input_data['inputs']
tags = input_data['seqTags']
lengths = input_data['seqLengths']
print('loaded arrays')
dim = (inputs[0].shape[0] // self.conversion_factor)
print(('out dim: %d' % dim))
writer = SimpleHDFWriter(tk.uncached_path(self.out), dim=dim, ndim=2)
offset = 0
for (tag, length) in zip(tags, lengths):
in_data = inputs[offset:(offset + length[0])]
dim = int((in_data.shape[1] / int(self.conversion_factor)))
out_data = numpy.reshape(in_data, ((- 1), dim), order='C')
offset += length[0]
writer.insert_batch(numpy.asarray([out_data]), [out_data.shape[0]], [tag])
writer.close()
|
class GriffinLim(Job):
'\n Run Griffin & Lim algorithm on linear spectogram features with specified audio settings\n '
def __init__(self, linear_features, iterations, sample_rate, window_shift, window_size, preemphasis, file_format='ogg', corpus_format='bliss'):
'\n\n :param linear_features:\n :param iterations:\n :param sample_rate:\n :param window_shift:\n :param window_size:\n :param preemphasis:\n :param file_format:\n :param corpus_format:\n :param input_hdf:\n '
self.set_attrs(locals())
self.out_folder = self.output_path('audio', directory=True)
if (corpus_format == 'bliss'):
self.out_corpus = self.output_path('audio/corpus.xml.gz')
else:
self.out_corpus = None
self.rqmt = {'cpu': 4, 'mem': 8, 'time': 12}
def tasks(self):
(yield Task('run', rqmt=self.rqmt))
def run(self):
assert os.path.isdir(tk.uncached_path(self.out_folder))
assert (self.file_format in ['wav', 'ogg'])
assert (self.corpus_format in ['bliss', 'json', None])
ref_linear_data = h5py.File(tk.uncached_path(self.linear_features), 'r')
rl_inputs = ref_linear_data['inputs']
rl_tags = ref_linear_data['seqTags']
rl_lengths = ref_linear_data['seqLengths']
n_fft = (rl_inputs[0].shape[0] * 2)
print(('N_FFT from HDF: % i' % n_fft))
converter = GLConverter(out_folder=tk.uncached_path(self.out_folder), out_corpus=tk.uncached_path(self.out_corpus), sample_rate=self.sample_rate, window_shift=self.window_shift, window_size=self.window_size, n_fft=n_fft, iterations=self.iterations, preemphasis=self.preemphasis, file_format=self.file_format, corpus_format=self.corpus_format)
p = pool.Pool(4)
loaded_spectograms = []
offset = 0
group = 0
for (i, (tag, length)) in enumerate(zip(rl_tags, rl_lengths)):
tag = (tag if isinstance(tag, str) else tag.decode())
loaded_spectograms.append((tag, numpy.asarray(rl_inputs[offset:(offset + length[0])])))
offset += length[0]
if (len(loaded_spectograms) > 512):
print(('decode group %i' % group))
group += 1
recordings = p.map(converter.convert, loaded_spectograms)
if (self.corpus_format == 'bliss'):
for recording in recordings:
converter.corpus.add_recording(recording)
del loaded_spectograms
loaded_spectograms = []
if (len(loaded_spectograms) > 0):
recordings = p.map(converter.convert, loaded_spectograms)
if (self.corpus_format == 'bliss'):
for recording in recordings:
converter.corpus.add_recording(recording)
converter.save_corpus()
|
class MultiPath():
def __init__(self, path_template, hidden_paths, cached=False, path_root=None, hash_overwrite=None):
self.path_template = path_template
self.hidden_paths = hidden_paths
self.cached = cached
self.path_root = path_root
self.hash_overwrite = hash_overwrite
def __str__(self):
if (self.path_root is not None):
result = os.path.join(self.path_root, self.path_template)
else:
result = self.path_template
if self.cached:
result = ('`cf %s`' % result)
return result
def __sis_state__(self):
return {'path_template': (self.path_template if (self.hash_overwrite is None) else self.hash_overwrite), 'hidden_paths': self.hidden_paths, 'cached': self.cached}
|
class MultiOutputPath(MultiPath):
def __init__(self, creator, path_template, hidden_paths, cached=False):
super().__init__(os.path.join(creator._sis_path(gs.JOB_OUTPUT), path_template), hidden_paths, cached, gs.BASE_DIR)
|
def write_paths_to_file(file, paths):
with open(tk.uncached_path(file), 'w') as f:
for p in paths:
f.write((tk.uncached_path(p) + '\n'))
|
def zmove(src, target):
src = tk.uncached_path(src)
target = tk.uncached_path(target)
if (not src.endswith('.gz')):
tmp_path = (src + '.gz')
if os.path.exists(tmp_path):
os.unlink(tmp_path)
sp.check_call(['gzip', src])
src += '.gz'
if (not target.endswith('.gz')):
target += '.gz'
shutil.move(src, target)
|
def delete_if_exists(file):
if os.path.exists(file):
os.remove(file)
|
def delete_if_zero(file):
if (os.path.exists(file) and (os.stat(file).st_size == 0)):
os.remove(file)
|
def backup_if_exists(file):
if os.path.exists(file):
(dir, base) = os.path.split(file)
base = add_suffix(base, '.gz')
idx = 1
while os.path.exists(os.path.join(dir, ('backup.%.4d.%s' % (idx, base)))):
idx += 1
zmove(file, os.path.join(dir, ('backup.%.4d.%s' % (idx, base))))
|
def remove_suffix(string, suffix):
if string.endswith(suffix):
return string[:(- len(suffix))]
return string
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.