code stringlengths 17 6.64M |
|---|
def add_suffix(string, suffix):
if (not string.endswith(suffix)):
return (string + suffix)
return string
|
def partition_into_tree(l, m):
' Transforms the list l into a nested list where each sub-list has at most length m + 1'
nextPartition = partition = l
while (len(nextPartition) > 1):
partition = nextPartition
nextPartition = []
d = (len(partition) // m)
mod = (len(partition) % m)
if (mod <= d):
p = 0
for i in range(mod):
nextPartition.append(partition[p:((p + m) + 1)])
p += (m + 1)
for i in range((d - mod)):
nextPartition.append(partition[p:(p + m)])
p += m
assert (p == len(partition))
else:
p = 0
for i in range(d):
nextPartition.append(partition[p:(p + m)])
p += m
nextPartition.append(partition[p:(p + mod)])
assert ((p + mod) == len(partition))
return partition
|
def reduce_tree(func, tree):
return func([(reduce_tree(func, e) if (type(e) == list) else e) for e in tree])
|
def uopen(path, *args, **kwargs):
path = tk.uncached_path(path)
if path.endswith('.gz'):
return gzip.open(path, *args, **kwargs)
else:
return open(path, *args, **kwargs)
|
def get_val(var):
if isinstance(var, Variable):
return var.get()
return var
|
def chunks(l, n):
'\n :param list[T] l: list which should be split into chunks\n :param int n: number of chunks\n :return: yields n chunks\n :rtype: list[list[T]]\n '
bigger_count = (len(l) % n)
start = 0
block_size = (len(l) // n)
for i in range(n):
end = ((start + block_size) + (1 if (i < bigger_count) else 0))
(yield l[start:end])
start = end
|
class AutoCleanup(Job):
def __init__(self, job_list, trigger):
'\n :param list[Job] job_list:\n :param tk.Path trigger:\n '
self.job_list = job_list
self.trigger = trigger
self.out = self.output_path('cleanup_complete')
assert (trigger.creator not in job_list)
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
import shutil
for job in self.job_list:
print(('remove %s' % str(job)))
shutil.rmtree(job._sis_path(abspath=True))
self.sh('touch {out}')
|
def engine():
from sisyphus.localengine import LocalEngine
return LocalEngine(cpus=4, gpus=1)
|
def number_convert(word):
try:
f = float(word)
return num2words(f)
except:
return word
|
def main():
parser = argparse.ArgumentParser(description='TTS decoder running RETURNN TTS and an MB-MelGAN vocoder')
parser.add_argument('--returnn_config', type=str, help='RETURNN config file (.config)')
parser.add_argument('--vocab_file', type=str, help='RETURNN vocab file (.pkl)')
parser.add_argument('--pronunciation_lexicon', type=str, help='CMU style pronuncation lexicon')
parser.add_argument('--pwg_config', type=str, help='ParallelWaveGAN config (.yaml)')
parser.add_argument('--pwg_checkpoint', type=str, help='ParallelWaveGAN checkpoint (.pkl)')
args = parser.parse_args()
rnn.init(args.returnn_config)
rnn.engine.use_search_flag = True
rnn.engine.init_network_from_config(rnn.config)
returnn_vocab = Vocabulary(vocab_file=args.vocab_file, unknown_label=None)
returnn_output_dict = {'output': rnn.engine.network.get_default_output_layer().output.placeholder}
pwg_config = yaml.load(open(args.pwg_config), Loader=yaml.Loader)
pyt_device = torch.device('cpu')
generator = pwg_models.MelGANGenerator(**pwg_config['generator_params'])
generator.load_state_dict(torch.load(args.pwg_checkpoint, map_location='cpu')['model']['generator'])
generator.remove_weight_norm()
pwg_model = generator.eval().to(pyt_device)
pwg_pad_fn = torch.nn.ReplicationPad1d(pwg_config['generator_params'].get('aux_context_window', 0))
pwg_pqmf = PQMF(pwg_config['generator_params']['out_channels']).to(pyt_device)
pronunciation_dictionary = {}
with open(args.pronunciation_lexicon, 'rt') as lexicon:
for lexicon_entry in lexicon.readlines():
(word, phonemes) = lexicon_entry.strip().split(' ', maxsplit=1)
pronunciation_dictionary[word] = phonemes.split(' ')
tokenizer = ['perl', './scripts/tokenizer/tokenizer.perl', '-l', 'en', '-no-escape']
audios = []
for line in sys.stdin.readlines():
line = line.strip().lower()
p = subprocess.Popen(tokenizer, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
line = p.communicate(input=line.encode('UTF-8'))[0].decode('UTF-8').strip()
p.terminate()
print(line)
words = list(map(number_convert, line.split(' ')))
print(words)
phoneme_sequence = ' _ '.join([' '.join(pronunciation_dictionary[w]) for w in words if (w in pronunciation_dictionary.keys())])
phoneme_sequence += ' _ ~'
try:
classes = numpy.asarray(returnn_vocab.get_seq(phoneme_sequence), dtype='int32')
feed_dict = {'classes': classes}
dataset = StaticDataset([feed_dict], output_dim={'classes': (77, 1)})
result = rnn.engine.run_single(dataset, 0, returnn_output_dict)
except Exception as e:
print(e)
raise e
feature_data = numpy.squeeze(result['output']).T
print(feature_data.shape)
with torch.no_grad():
input_features = pwg_pad_fn(torch.from_numpy(feature_data).unsqueeze(0)).to(pyt_device)
audio_waveform = pwg_pqmf.synthesis(pwg_model(input_features)).view((- 1)).cpu().numpy()
audios.append(numpy.asarray((audio_waveform * ((2 ** 15) - 1)), dtype='int16').tobytes())
for (i, audio) in enumerate(audios):
wave_writer = wave.open(('out_%i.wav' % i), 'wb')
wave_writer.setnchannels(1)
wave_writer.setframerate(16000)
wave_writer.setsampwidth(2)
wave_writer.writeframes(audio)
wave_writer.close()
|
class PWGTrain(Job):
'\n\n '
def __init__(self, pwg_config, pwg_train_dataset, pwg_dev_dataset, pwg_exe=PWG_EXE, pwg_src_root=PWG_ROOT):
'\n\n :param dict pwg_config:\n :param Path pwg_train_dataset:\n :param Path pwg_dev_dataset:\n :param Path|str pwg_exe:\n :param Path|str pwg_src_root:\n '
self.pwg_config = pwg_config
self.pwg_train_dataset = pwg_train_dataset
self.pwg_dev_dataset = pwg_dev_dataset
self.pwg_exe = pwg_exe
self.pwg_src_root = pwg_src_root
self.config_file = self.output_path('pwg_config.py')
self.model_folder = self.output_path('model', directory=True)
max_steps = pwg_config['train_max_steps']
interval = pwg_config['save_interval_steps']
self.models = {i: self.output_path(('model/checkpoint-%isteps.pkl' % i)) for i in range(interval, (max_steps + interval), interval)}
self.rqmt = {'cpu': 4, 'gpu': 1, 'time': 120, 'mem': 16}
def tasks(self):
(yield Task('create_files', mini_task=True))
(yield Task('run', 'run', rqmt=self.rqmt))
def path_available(self, path):
res = super().path_available(path)
if res:
return res
res = os.path.exists(path.get_path())
if res:
return res
return False
def build_train_call(self, checkpoint=None):
train_call = [tk.uncached_path(self.pwg_exe), '-m', 'parallel_wavegan.bin.train']
train_call += ['--config', tk.uncached_path(self.config_file)]
train_call += ['--train-dumpdir', tk.uncached_path(self.pwg_train_dataset)]
train_call += ['--dev-dumpdir', tk.uncached_path(self.pwg_dev_dataset)]
train_call += ['--outdir', tk.uncached_path(self.model_folder)]
if checkpoint:
train_call += ['--resume', str(checkpoint)]
return train_call
def create_files(self):
config_lines = []
unreadable_data = {}
pp = pprint.PrettyPrinter(indent=2, width=150)
for (k, v) in sorted(self.pwg_config.items()):
if pprint.isreadable(v):
config_lines.append(("'%s': %s," % (k, pp.pformat(v))))
else:
unreadable_data[k] = v
if (len(unreadable_data) > 0):
assert False, 'unreadable data'
with open(tk.uncached_path(self.config_file), 'wt', encoding='utf-8') as f:
f.write('{\n')
f.write('\n'.join(config_lines))
f.write('}\n')
with open('run.sh', 'wt') as f:
f.write('#!/bin/bash\n')
f.write(('export PYTHONPATH=%s\n' % self.pwg_src_root))
train_call = self.build_train_call()
f.write((' '.join(train_call) + '\n'))
os.chmod('run.sh', ((((((stat.S_IRUSR | stat.S_IRGRP) | stat.S_IROTH) | stat.S_IWUSR) | stat.S_IXUSR) | stat.S_IXGRP) | stat.S_IXOTH))
def run(self):
checkpoints = glob.glob(os.path.join(tk.uncached_path(self.model_folder), '*.pkl'))
if (len(checkpoints) > 0):
print('resume training')
latest_checkpoint = max(checkpoints, key=os.path.getctime)
with open('resume.sh', 'wt') as f:
f.write('#!/bin/bash\n')
f.write(('export PYTHONPATH=%s\n' % self.pwg_src_root))
train_call = self.build_train_call(checkpoint=latest_checkpoint)
f.write((' '.join(train_call) + '\n'))
os.chmod('resume.sh', ((((((stat.S_IRUSR | stat.S_IRGRP) | stat.S_IROTH) | stat.S_IWUSR) | stat.S_IXUSR) | stat.S_IXGRP) | stat.S_IXOTH))
subprocess.check_call(['./resume.sh'])
else:
subprocess.check_call(['./run.sh'])
|
class PWGBuildDataset(Job):
'\n This Job converts a dataset in zip_format and an HDF output file generated into the appriopriate format for\n the PWG training. The frame hop needs to be specified to adjust for the possible length mismatch between\n features and audio.\n '
def __init__(self, zip_dataset, returnn_hdf_mels, frame_hop, segment_file=None, compression=None):
'\n\n :param Path zip_dataset:\n :param Path returnn_hdf_mels:\n :param int frame_hop:\n :param Path segment_file:\n :param str|None compression:\n '
self.zip_dataset = zip_dataset
self.returnn_hdf_mels = returnn_hdf_mels
self.frame_hop = frame_hop
self.segment_file = segment_file
self.compression = compression
assert (compression in [None, 'gzip', 'lzf'])
self.out = self.output_path('pwg_dataset', directory=True)
def tasks(self):
(yield Task('run', mini_task=True))
def _read_segment_list(self, segment_file):
'\n read a list of segment names in either plain text or gzip\n\n :param str segment_file:\n '
if segment_file.endswith('.gz'):
import gzip
segment_file_handle = gzip.open(segment_file)
return set([s.decode() for s in segment_file_handle.read().splitlines()])
else:
segment_file_handle = open(segment_file, 'rt')
return set(segment_file_handle.read().splitlines())
def run(self):
import zipfile
import soundfile
import h5py
import numpy
import io
zip_dataset_path = tk.uncached_path(self.zip_dataset)
zip_data = zipfile.ZipFile(zip_dataset_path)
(zip_name, _) = os.path.splitext(os.path.basename(zip_dataset_path))
data = eval(zip_data.read(('%s.txt' % zip_name)))
assert (data and isinstance(data, list))
first_entry = data[0]
assert isinstance(first_entry, dict)
assert isinstance(first_entry['text'], str)
assert isinstance(first_entry['file'], str)
assert isinstance(first_entry['seq_name'], str)
assert isinstance(first_entry['duration'], float)
segments = (self._read_segment_list(tk.uncached_path(self.segment_file)) if self.segment_file else None)
feature_hdf = h5py.File(tk.uncached_path(self.returnn_hdf_mels), 'r')
feature_data = feature_hdf['inputs']
feature_seq_names = feature_hdf['seqTags']
feature_seq_lengths = feature_hdf['seqLengths']
seq_positions = {}
offset = 0
for (name, length) in zip(feature_seq_names, feature_seq_lengths):
name = (name.decode('UTF-8') if isinstance(name, bytes) else name)
seq_positions[name] = (offset, (offset + length[0]))
offset += length[0]
for entry in data:
name = entry['seq_name']
if (segments and (name not in segments)):
continue
if (name not in seq_positions.keys()):
print(('%s was not in the features, skipping' % name))
continue
raw_audio = io.BytesIO(zip_data.read(('%s/%s' % (zip_name, entry['file']))))
(audio, samplerate) = soundfile.read(raw_audio, dtype='float32')
audio_len = audio.shape[0]
start_pos = seq_positions[name][0]
end_pos = seq_positions[name][1]
features = feature_data[start_pos:end_pos]
matching_num_features = (audio_len // self.frame_hop)
features = features[:matching_num_features]
audio_len = (matching_num_features * self.frame_hop)
audio = audio[:audio_len]
path = (os.path.join(tk.uncached_path(self.out), name) + '.h5')
os.makedirs(os.path.dirname(path), exist_ok=True)
pwg_seq_h5_file = h5py.File(path)
wavs = pwg_seq_h5_file.create_dataset('wave', (audio_len,), compression=self.compression, dtype='float32')
feats = pwg_seq_h5_file.create_dataset('feats', shape=features.shape, compression=self.compression, dtype='float32')
wavs[:] = audio
feats[:] = numpy.asarray(features, dtype='float32')
pwg_seq_h5_file.close()
|
def generic_open(filename, mode='r'):
'\n Wrapper around :func:`open`.\n Automatically wraps :func:`gzip.open` if filename ends with ``".gz"``.\n\n :param str filename:\n :param str mode: text mode by default\n :rtype: typing.TextIO|typing.BinaryIO\n '
if filename.endswith('.gz'):
import gzip
if ('b' not in mode):
mode += 't'
return gzip.open(filename, mode)
return open(filename, mode)
|
def sh(*args):
print(('$ %s' % ' '.join(args)))
subprocess.check_call(args)
|
@contextlib.contextmanager
def pushd(d):
'\n :param str d: directory\n '
assert os.path.isdir(d)
old_working_dir = os.getcwd()
os.chdir(d)
(yield)
os.chdir(old_working_dir)
|
def create_librispeech_txt(dataset_dir):
'\n Create separate txt files to be used with :class:`returnn.OggZipDataset`.\n Example:\n https://github.com/rwth-i6/returnn-experiments/blob/master/2019-asr-e2e-trafo-vs-lstm/tedlium2/full-setup/03_convert_to_ogg.py\n\n :param str dataset_dir:\n '
output_dir = dataset_dir
with pushd(output_dir):
for part in Parts:
dest_meta_filename_gz = ('%s.txt.gz' % part)
if os.path.exists(dest_meta_filename_gz):
print('File exists:', dest_meta_filename_gz)
continue
dest_meta_filename = ('%s.txt' % part)
dest_meta_file = open(dest_meta_filename, 'w')
dest_meta_file.write('[\n')
zip_filename = ('%s/%s.zip' % (dataset_dir, part))
assert os.path.exists(zip_filename)
zip_file = ZipFile(zip_filename)
assert zip_file.filelist
count_lines = 0
for info in zip_file.filelist:
assert isinstance(info, ZipInfo)
path = info.filename.split('/')
if path[0].startswith(part):
subdir = path[0]
assert (subdir == part)
if path[(- 1)].endswith('.trans.txt'):
print('read', part, path[(- 1)])
for line in zip_file.read(info).decode('utf8').splitlines():
(seq_name, txt) = line.split(' ', 1)
count_lines += 1
ogg_filename = ('%s/%s.flac.ogg' % ('/'.join(path[:(- 1)]), seq_name))
ogg_bytes = zip_file.read(ogg_filename)
assert (len(ogg_bytes) > 0)
with tempfile.NamedTemporaryFile(suffix='.ogg') as temp_file:
temp_file.write(ogg_bytes)
temp_file.flush()
duration_str = subprocess.check_output(['ffprobe', temp_file.name, '-show_entries', 'format=duration', '-v', 'quiet', '-of', 'compact'], stderr=subprocess.STDOUT).decode('utf8').strip()
duration_str = duration_str.split('=')[(- 1)]
assert (float(duration_str) > 0)
dest_meta_file.write(("{'text': %r, 'file': %r, 'seq_name': '%s', 'duration': %s},\n" % (txt, ogg_filename, ('%s-%s' % (part, seq_name)), duration_str)))
assert (count_lines > 0)
dest_meta_file.write(']\n')
dest_meta_file.close()
sh('gzip', dest_meta_filename)
assert os.path.exists(dest_meta_filename_gz)
|
def extract_raw_strings_py(part):
'\n :param str part:\n :rtype: str\n '
dataset_dir = ('%s/data/dataset-ogg' % my_dir)
dataset_path_prefix = ('%s/%s' % (dataset_dir, part))
py_txt_output_path = ('%s/data/dataset/%s.py.txt.gz' % (my_dir, part))
if os.path.exists(py_txt_output_path):
print('File exists, skipping:', py_txt_output_path)
return py_txt_output_path
args = [('%s/returnn/tools/dump-dataset-raw-strings.py' % my_dir), '--dataset', repr({'class': 'OggZipDataset', 'path': [('%s.zip' % dataset_path_prefix), ('%s.txt.gz' % dataset_path_prefix)], 'use_cache_manager': True, 'audio': None, 'targets': None}), '--out', py_txt_output_path]
sh(*args)
assert os.path.exists(py_txt_output_path)
return py_txt_output_path
|
def main():
os.makedirs(('%s/data/dataset' % my_dir), exist_ok=True)
create_librispeech_txt(dataset_dir=('%s/data/dataset-ogg' % my_dir))
trans_file = open(('%s/data/dataset/train-trans-all.txt' % my_dir), 'w')
for part in Parts:
py_txt_output_path = extract_raw_strings_py(part)
if part.startswith('train'):
py_txt = eval(generic_open(py_txt_output_path).read())
assert (isinstance(py_txt, dict) and (len(py_txt) > 0))
(example_key, example_value) = next(iter(py_txt.items()))
assert (isinstance(example_key, str) and isinstance(example_value, str))
for (seq_tag, raw_txt) in sorted(py_txt.items()):
trans_file.write(('%s\n' % raw_txt))
trans_file.close()
|
def get_filename(config):
for base_dir in base_dirs:
fn = ('%s/config-train/%s.config' % (base_dir, config))
print(fn)
if os.path.exists(fn):
return fn
raise Exception(('not found: %s' % config))
|
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for base_dir in base_dirs:
assert os.path.exists(base_dir)
for config in configs:
fn = get_filename(config)
local_fn = ('%s.config' % config)
if (not os.path.exists(local_fn)):
shutil.copy(fn, local_fn)
|
def save_corpus_segments_to_file(output_filename, input_dict):
'\n :param str output_filename:\n :param dict[str,str] input_dict:\n '
with open(output_filename, 'w') as file:
file.write('{\n')
for (key, value) in input_dict.items():
value = value.lstrip()
file.write('"{}": "{}",\n'.format(key, value))
file.write('}\n')
|
class PhoneMapper():
def __init__(self, lexicon_filename, phone_unicode_map_filename=None):
'\n :param str lexicon_filename:\n :param str phone_unicode_map_filename:\n '
self._lexicon_filename = lexicon_filename
self._phone_unicode_map = phone_unicode_map_filename
self._lex_dict = {}
self.lexicon = None
self.phone_unicode_map = None
self._loaded = False
def _lazy_load(self):
if self._loaded:
return
self._loaded = True
self.lexicon = Lexicon(self._lexicon_filename)
if self._phone_unicode_map:
self.phone_unicode_map = eval(open(self._phone_unicode_map).read())
for word in self.lexicon.lemmas:
list_phones = []
for item in self.lexicon.lemmas[word]['phons']:
list_phones.append(item['phon'])
if self.phone_unicode_map:
self._lex_dict[word] = self._to_unicode_list(list_phones)
else:
self._lex_dict[word] = list_phones
duplicates = {}
for (word, phones) in sorted(self._lex_dict.items()):
for phone in phones:
if (phone in duplicates):
self._lex_dict[word].remove(phone)
self._lex_dict[word].insert(0, ('%s#%s' % (phone, duplicates[phone])))
duplicates[phone] += 1
else:
duplicates[phone] = 1
def map_words_to_phones(self, seq):
'\n :param str seq: white-space separated string, seq of words\n :rtype: str\n '
self._lazy_load()
words = seq.split()
result = []
for word in words:
if (word in self._lex_dict):
result.append(''.join(self._lex_dict[word][0].split()))
return ' '.join(result)
def _to_unicode(self, phones):
'\n :param str phones:\n :rtype: str\n '
phone_list = phones.split()
result = ''
for k in phone_list:
result += self.phone_unicode_map[k]
return result
def _to_unicode_list(self, list_of_phones):
'\n :param list[str] list_of_phones:\n :rtype: list[str]\n '
res = []
for item in list_of_phones:
res.append(self._to_unicode(item))
return res
def get_phones_to_word_mapping(self, disamb=True):
'\n :return: phones->word\n :rtype: dict[str,str]\n '
self._lazy_load()
reverse_lex_dict = {}
for (word, phone_list) in self._lex_dict.items():
for phone_seq in phone_list:
reverse_lex_dict[phone_seq] = word
return reverse_lex_dict
def map_phones_to_word_on_corpus(self, segments):
'\n :param dict[str,str] segments: seq_tag->seq of unicode phones\n :return: seq_tag->seq of words\n :rtype: dict[str,str]\n '
reverse_lex_dict = self.get_phones_to_word_mapping()
result_disamb = {}
for (seg_tag, word_seq) in segments.items():
transform = []
for unicode in word_seq.split():
if (unicode in reverse_lex_dict):
transform.append(reverse_lex_dict[unicode])
else:
transform.append('[UNKNOWN]')
result_disamb[seg_tag] = ' '.join(transform)
return result_disamb
|
def convert(string_num):
if (isinstance(string_num, str) and string_num.startswith('0')):
return ('zero ' + convert(string_num[1:]))
num = int(string_num)
units = ('', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine ', 'ten ', 'eleven ', 'twelve ', 'thirteen ', 'fourteen ', 'fifteen ', 'sixteen ', 'seventeen ', 'eighteen ', 'nineteen ')
tens = ('', '', 'twenty ', 'thirty ', 'forty ', 'fifty ', 'sixty ', 'seventy ', 'eighty ', 'ninety ')
if (num < 0):
return ('minus ' + convert((- num)))
if (num < 20):
return units[num]
if (num < 100):
return (tens[(num // 10)] + units[int((num % 10))])
if (num < 1000):
return ((units[(num // 100)] + 'hundred ') + convert(int((num % 100))))
if (num < 1000000):
return ((convert((num // 1000)) + 'thousand ') + convert(int((num % 1000))))
if (num < 1000000000):
return ((convert((num // 1000000)) + 'million ') + convert(int((num % 1000000))))
return ((convert((num // 1000000000)) + 'billion ') + convert(int((num % 1000000000))))
|
def hasNumber(inputString):
return any((char.isdigit() for char in inputString))
|
def separate(iString):
prev_char = iString[0]
tmp = []
new = iString[0]
for (x, i) in enumerate(iString[1:]):
if (i.isalpha() and prev_char.isalpha()):
new += i
elif (i.isnumeric() and prev_char.isnumeric()):
new += i
else:
tmp.append(new)
new = i
prev_char = i
if (x == (len(iString) - 2)):
tmp.append(new)
new = ''
if (len(iString) > 1):
return tmp
return [iString]
|
def to_unicode_list(input_l):
res = []
for item in input_l:
res.append(to_unicode(item))
return res
|
def to_unicode(input):
text = input.split()
result = ''
for k in text:
result += phone_to_unicode[k]
return result
|
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument('--bpe_vocab', required=True)
arg_parser.add_argument('--lexicon', required=True)
arg_parser.add_argument('--phones_bpe', required=True)
arg_parser.add_argument('--bpe', action='store_true')
arg_parser.add_argument('--char', action='store_true')
arg_parser.add_argument('--charbpe', action='store_true')
arg_parser.add_argument('--disamb', action='store_true')
arg_parser.add_argument('--output', required=True)
args = arg_parser.parse_args()
bpe1k_file = args.bpe_vocab
lexicon_file = args.lexicon
phones_bpe_file = args.phones_bpe
def create_specialTree(input):
if (input == '</s>'):
lemma = ET.SubElement(lex_root, 'lemma', special='sentence-end')
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[SENTENCE-END]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
elif (input == '<s>'):
lemma = ET.SubElement(lex_root, 'lemma', special='sentence-begin')
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[SENTENCE-BEGIN]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
elif (input == '<unk>'):
lemma = ET.SubElement(lex_root, 'lemma', special='unknown')
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[UNKNOWN]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
with codecs.open(bpe1k_file, 'rU', 'utf-8') as file:
seq = {}
for line in file:
if line.startswith(('{', '}')):
continue
line = line.replace(',', '')
line = line.replace("'", '')
(key, value) = line.strip().split(':')
value = value.strip()
seq[key] = value
special_sign = ['L', 'N', 'S', 'V']
extra_sign = ['</s>', '<s>', '<unk>']
lex = Lexicon(lexicon_file)
count = 0
temp_lemmas = []
for word in lex.lemmas:
count += 1
if (count > 9):
if args.char:
if hasNumber(lex.lemmas[word]['orth']):
word_ = ''
list_ = separate(lex.lemmas[word]['orth'])
for item in list_:
if item.isdigit():
word_ += convert(item)
temp_lemmas.append(word_.strip())
else:
temp_lemmas.append(lex.lemmas[word]['orth'])
lex_root = ET.Element('lexicon')
phone_inventory = ET.SubElement(lex_root, 'phoneme-inventory')
for (key, v) in sorted(seq.items()):
if (key not in extra_sign):
phone = ET.SubElement(phone_inventory, 'phoneme')
p_sym = ET.SubElement(phone, 'symbol')
p_var = ET.SubElement(phone, 'variation')
if (key in special_sign):
p_var.text = 'none'
if (key == 'L'):
p_sym.text = '[LAUGHTER]'
elif (key == 'N'):
p_sym.text = '[NOISE]'
elif (key == 'V'):
p_sym.text = '[VOCALIZEDNOISE]'
else:
p_sym.text = '[SILENCE]'
else:
p_var.text = 'context'
p_sym.text = key
elif (key == '<s>'):
create_specialTree(key)
elif (key == '</s>'):
create_specialTree(key)
elif (key == '<unk>'):
create_specialTree(key)
for item in ['[NOISE]', '[VOCALIZEDNOISE]', '[LAUGHTER]']:
lemma = ET.SubElement(lex_root, 'lemma')
orth = ET.SubElement(lemma, 'orth')
phon = ET.SubElement(lemma, 'phon', score='0.0')
phon.text = item
orth.text = item
synt = ET.SubElement(lemma, 'synt')
eval = ET.SubElement(lemma, 'eval')
phon_dict = {}
if args.char:
for word in lex.lemmas:
if hasNumber(word):
word_ = ''
list_ = separate(word)
for item in list_:
if item.isdigit():
word_ += convert(item)
phon_dict[word] = word_
else:
phon_dict[word] = word
else:
for word in lex.lemmas:
len_phons = len(lex.lemmas[word]['phons'])
list_of_phons = []
for x in range(len_phons):
list_of_phons.append(lex.lemmas[word]['phons'][x]['phon'])
if args.bpe:
phon_dict[word] = to_unicode_list(list_of_phons)
else:
phon_dict[word] = list_of_phons
if args.disamb:
duplicates = {}
for (word, phones) in sorted(phon_dict.items()):
for phone in phones:
if (phone in duplicates):
phon_dict[word].remove(phone)
phon_dict[word].insert(0, ('%s #%s' % (phone, duplicates[phone])))
duplicates[phone] += 1
else:
duplicates[phone] = 1
with open('word_phone.txt', 'w') as f:
print(phon_dict, file=f)
with open('file_to_map.txt', 'w') as file:
file.write('{\n')
for (key, value) in phon_dict.items():
file.write('{}:{},\n'.format(key, value))
file.write('}\n')
with open('file_to_map.txt', 'r') as inp:
with open('file_output.txt', 'w') as out:
for i in range(6):
inp.readline()
for line in inp:
if line.startswith('}'):
break
line = line.replace(',', '')
(_, right) = line.split(':')
lst = right[1:(- 2)].split(',')
lst = [x.replace("'", '') for x in lst]
output = ' '.join(lst)
out.write('{}\n'.format(output))
with open(phones_bpe_file, 'r') as file_r:
res_ = []
for line in file_r:
ls = line.strip().split()
phon_seq = []
merge = []
for item in ls:
if ('@@' in item):
merge.append(item)
else:
merge.append(item)
phon_seq.append(' '.join(merge))
merge = []
res_.append(phon_seq)
dict_tmp = list(phon_dict.items())
for (idx, x) in enumerate(res_):
dict_tmp[(4 + idx)] = (dict_tmp[(4 + idx)][0], x)
phon_dict = dict(dict_tmp)
with open('unicode_phone.txt', 'w') as f:
print(phon_dict, file=f)
if args.char:
orth_to_lemma = {}
for (idx, elem) in enumerate(temp_lemmas):
elem_lower = elem.lower()
if (elem_lower in orth_to_lemma):
lemma = orth_to_lemma[elem_lower]
else:
lemma = ET.SubElement(lex_root, 'lemma')
orth_to_lemma[elem_lower] = lemma
res = ''
for char in list(elem):
res += char
res += ' '
phon = ET.SubElement(lemma, 'phon')
phon.text = res.strip()
orth = ET.SubElement(lemma, 'orth')
orth.text = elem
else:
orth_to_lemma = {}
for (idx, elem) in enumerate(temp_lemmas):
elem_lower = elem.lower()
if (elem_lower in orth_to_lemma):
lemma = orth_to_lemma[elem_lower]
else:
lemma = ET.SubElement(lex_root, 'lemma')
orth_to_lemma[elem_lower] = lemma
assert (elem_lower in phon_dict)
for p in phon_dict[elem_lower]:
phon = ET.SubElement(lemma, 'phon')
phon.text = p
orth = ET.SubElement(lemma, 'orth')
orth.text = elem
if args.output:
my_data = minidom.parseString(ET.tostring(lex_root)).toprettyxml(indent=' ')
with open(args.output, 'w') as f:
f.write(my_data)
|
def run(args, **kwargs):
import subprocess
kwargs = kwargs.copy()
print(('$ %s' % ' '.join(args)), {k: (v if (k != 'input') else '...') for (k, v) in kwargs.items()})
try:
subprocess.run(args, **kwargs, check=True)
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
|
def run(args, **kwargs):
import subprocess
kwargs = kwargs.copy()
print(('$ %s' % ' '.join(args)), {k: (v if (k != 'input') else '...') for (k, v) in kwargs.items()})
try:
subprocess.run(args, **kwargs, check=True)
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
|
def main():
argparser = ArgumentParser()
argparser.add_argument('file', help="by Returnn search, in 'py' format")
argparser.add_argument('--out', required=True, help='output filename')
args = argparser.parse_args()
d = eval(open(args.file, 'r').read())
assert isinstance(d, dict)
assert (not os.path.exists(args.out))
map_special = {'L': '[LAUGHTER]', 'N': '[NOISE]', 'V': '[VOCALIZED-NOISE]', 'S': '[SILENCE]'}
with open(args.out, 'w') as out:
out.write('{\n')
for (seq_tag, txt) in sorted(d.items()):
seq = [w.strip() for w in txt.split((' ' if ('<eow>' not in txt) else '<eow>'))]
seq = ' '.join([(x.replace(' ', '') if (x not in map_special) else map_special[x]) for x in seq]).strip()
out.write(('%r: %r,\n' % (seq_tag, seq)))
out.write('}\n')
print('# Done.')
|
def main():
argparser = ArgumentParser()
argparser.add_argument('file', help="by Returnn search, in 'py' format")
argparser.add_argument('--out', required=True, help='output filename')
args = argparser.parse_args()
d = eval(open(args.file, 'r').read())
assert isinstance(d, dict)
assert (not os.path.exists(args.out))
lex_out = {}
lexicon_file = '/work/asr4/zeyer/backup/switchboard/tuske-train.lex.v1_0_3.ci.gz'
lexicon = Lexicon(lexicon_file)
for word in lexicon.lemmas:
list_phones = []
for item in lexicon.lemmas[word]['phons']:
list_phones.append(item['phon'])
lex_out[word] = list_phones
duplicates = {}
for (word, phones) in sorted(lex_out.items()):
for phone in phones:
if (phone in duplicates):
lex_out[word].remove(phone)
lex_out[word].insert(0, ('%s #%s' % (phone, duplicates[phone])))
duplicates[phone] += 1
else:
duplicates[phone] = 1
rev_lex = {v[0]: k for (k, v) in lex_out.items() if (len(v) > 0)}
with open(args.out, 'w') as out:
out.write('{\n')
for (seq_tag, txt) in sorted(d.items()):
seq = [w.strip() for w in txt.split('<eow>')]
seq = ' '.join([(rev_lex[x] if (x in rev_lex) else '[UNKNOWN]') for x in seq if (len(x) > 0)]).strip()
out.write(('%r: %r,\n' % (seq_tag, seq)))
out.write('}\n')
print('# Done.')
|
def parse_vocab(filename):
'\n :param str filename:\n :rtype: dict[str,str]\n :return: phone->unicode\n '
raw = open(filename, 'r').read()
return eval(raw)
|
def main():
args_parser = ArgumentParser()
args_parser.add_argument('--lexicon', required=True)
args_parser.add_argument('--input', required=True)
args_parser.add_argument('--disamb_map', required=True)
args_parser.add_argument('--disamb', action='store_true')
args_parser.add_argument('--output', required=True)
args = args_parser.parse_args()
lexicon_fn = args.lexicon
disamb_map_fn = args.disamb_map
scoring_words = args.input
map_dict = parse_vocab(disamb_map_fn)
segments = eval(open(scoring_words, 'r').read())
unicode_to_phone = {value: key for (key, value) in map_dict.items()}
phone_mapper = PhoneUtils.PhoneMapper(lexicon_filename=lexicon_fn, phone_unicode_map_filename=disamb_map_fn)
if args.disamb:
result_disamb = phone_mapper.map_phones_to_word_on_corpus(segments)
PhoneUtils.save_corpus_segments_to_file(args.output, result_disamb)
else:
result = {}
for (seg_tag, word_seq) in segments.items():
phones_seq = ''
for unicode in word_seq:
phones_seq += unicode_to_phone[unicode]
result[seg_tag] = phones_seq
lex = Lexicon(lexicon_fn)
vocab_dict = {}
words_map = {}
for word in lex.lemmas:
if (len(lex.lemmas[word]['phons']) > 0):
phonemes = lex.lemmas[word]['phons'][0]['phon']
phone_concat = ''.join(phonemes.split())
vocab_dict[word] = phone_concat
words_map.setdefault(phone_concat, []).append(word)
for (k, v) in result.items():
list_phones = v.split()
new_value = ''
for phones in list_phones:
if (phones in words_map.keys()):
new_value += (' ' + words_map[phones][0])
else:
new_value += (' ' + '[UNK]')
result[k] = new_value
PhoneUtils.save_corpus_segments_to_file(args.output, result)
|
def get_filename(config):
for base_dir in base_dirs:
fn = ('%s/config-train/%s.config' % (base_dir, config))
print(fn)
if os.path.exists(fn):
return fn
raise Exception(('not found: %s' % config))
|
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for base_dir in base_dirs:
assert os.path.exists(base_dir)
for config in configs:
fn = get_filename(config)
local_fn = ('%s.config' % config)
if (not os.path.exists(local_fn)):
shutil.copy(fn, local_fn)
|
class ConcatSwitchboard(Job):
'\n Based on a STM file, create concatenated dataset.\n '
@classmethod
def create_all_for_num(cls, num, register_output_prefix=None, experiments=None):
'\n Via ``ScliteHubScoreJob.RefsStmFiles``.\n\n :param int num:\n :param str|None register_output_prefix: if set, will register output\n :param recipe.returnn.experiments.MultipleExperimentsFromConfigs|None experiments:\n '
from .scoring import ScliteHubScoreJob
assert ScliteHubScoreJob.RefsStmFiles
for corpus_name in ScliteHubScoreJob.OrigCorpusNames:
stm_path = ScliteHubScoreJob.RefsStmFiles[corpus_name]
job = cls(corpus_name=corpus_name, stm=stm_path, num=num)
if register_output_prefix:
tk.register_output(('%s_%s_concat%i.stm' % (register_output_prefix, corpus_name, num)), job.out_stm)
ScliteHubScoreJob.RefsStmFiles[('%s_concat%i' % (corpus_name, num))] = job.out_stm
ScliteHubScoreJob.ResultsSubsets[('%s_concat%i' % (corpus_name, num))] = ScliteHubScoreJob.ResultsSubsets[corpus_name]
if experiments:
experiments.register_dataset(('%s_concat%i' % (corpus_name, num)), ExplicitDataset(returnn_opts={'class': 'ConcatSeqsDataset', 'dataset': None, 'seq_ordering': 'sorted_reverse', 'seq_list_file': job.out_concat_seq_tags, 'seq_len_file': job.out_orig_seq_lens_py}))
def __init__(self, corpus_name, stm, num):
'\n :param str corpus_name: e.g. "hub5_00"\n :param Path stm:\n :param int num: Concatenate `num` consecutive seqs within a recording\n '
corpus_name_map = {'hub5e_00': 'hub5_00', 'hub5e_01': 'hub5_01'}
self.corpus_name = corpus_name_map.get(corpus_name, corpus_name)
self.stm = stm
self.num = num
self.out_orig_seq_tags = self.output_path('orig_seq_tags.txt')
self.out_orig_seq_lens = self.output_path('orig_seq_lens.txt')
self.out_orig_seq_lens_py = self.output_path('orig_seq_lens.py.txt')
self.out_concat_seq_tags = self.output_path('concat_seq_tags.txt')
self.out_concat_seq_lens = self.output_path('concat_seq_lens.txt')
self.out_concat_seq_lens_py = self.output_path('concat_seq_lens.py.txt')
self.out_stm = self.output_path('concat_ref.stm')
def run(self):
'\n ;; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n ;; hub5_00/en_4156a/1 (301.850000-302.480000)\n en_4156a 1 301.850000 0.283500 oh 0.99\n en_4156a 1 302.133500 0.283500 yeah 0.99\n ;; hub5_00/en_4156a/2 (304.710000-306.720000)\n en_4156a 1 304.710000 0.201000 well 0.99\n '
import re
from decimal import Decimal
self_job = self
orig_seqs = []
concatenated_seqs = []
print('Corpus:', self.corpus_name)
print('Input ref STM:', self.stm)
print(('Concatenate up to %i seqs.' % self.num))
class ConcatenatedSeq():
def __init__(self):
self.rec_tag = tag
self.rec_tag2 = tag2
self.seq_tags = [full_seq_tag]
self.flags = flags
self.txt = txt
self.start = start
self.end = end
@property
def seq_tag(self):
return ';'.join(self.seq_tags)
@property
def num_seqs(self):
return len(self.seq_tags)
@property
def duration(self):
return (self.end - self.start)
def can_add(self):
if (self.num_seqs >= self_job.num):
return False
return (tag == self.rec_tag)
def add(self):
assert (tag == self.rec_tag)
assert (flags.lower() == self.flags.lower())
assert (full_seq_tag not in self.seq_tags)
self.seq_tags.append(full_seq_tag)
assert (end > self.end)
self.end = end
self.txt = ('%s %s' % (self.txt, txt))
def write_to_stm(self):
out_stm_file.write((';; _full_seq_tag "%s"\n' % self.seq_tag))
out_stm_file.write(('%s 1 %s %s %s <%s> %s\n' % (self.rec_tag, self.rec_tag2, self.start, self.end, self.flags, self.txt)))
with generic_open(self.out_stm.get_path(), 'w') as out_stm_file:
seq_idx_in_tag = None
last_tag = None
last_end = None
first_seq = True
have_extended = False
extended_seq_tag = None
for line in generic_open(self.stm.get_path()).read().splitlines():
line = line.strip()
if (not line):
continue
if line.startswith(';; _full_seq_tag '):
if first_seq:
have_extended = True
else:
assert have_extended
assert (not extended_seq_tag)
m = re.match('^;; _full_seq_tag "(.*)"$', line)
assert m, ('unexpected line: %r' % line)
(extended_seq_tag,) = m.groups()
continue
if line.startswith(';;'):
out_stm_file.write(('%s\n' % line))
continue
m = re.match('^([a-zA-Z0-9_]+)\\s+1\\s+([a-zA-Z0-9_]+)\\s+([0-9.]+)\\s+([0-9.]+)\\s+<([a-zA-Z0-9,\\-]+)>(.*)$', line)
assert m, ('unexpected line: %r' % line)
(tag, tag2, start_s, end_s, flags, txt) = m.groups()
txt = txt.strip()
first_seq = False
if (txt == 'ignore_time_segment_in_scoring'):
continue
if (not txt):
continue
start = Decimal(start_s)
end = Decimal(end_s)
assert (start < end), ('line: %r' % line)
if (tag != last_tag):
seq_idx_in_tag = 1
last_tag = tag
else:
assert (start >= (last_end - Decimal('0.01'))), ('line: %r' % line)
assert (end > last_end), ('line: %r' % line)
seq_idx_in_tag += 1
last_end = end
if extended_seq_tag:
full_seq_tag = extended_seq_tag
extended_seq_tag = None
else:
full_seq_tag = ('%s/%s/%i' % (self.corpus_name, tag, seq_idx_in_tag))
orig_seqs.append(ConcatenatedSeq())
if ((not concatenated_seqs) or (not concatenated_seqs[(- 1)].can_add())):
if concatenated_seqs:
concatenated_seqs[(- 1)].write_to_stm()
concatenated_seqs.append(ConcatenatedSeq())
else:
concatenated_seqs[(- 1)].add()
assert concatenated_seqs
concatenated_seqs[(- 1)].write_to_stm()
def write_seq_tags(seqs, output_filename):
'\n :param list[ConcatenatedSeq] seqs:\n :param str output_filename:\n '
with generic_open(output_filename, 'w') as f:
for seq in seqs:
assert isinstance(seq, ConcatenatedSeq)
f.write(('%s\n' % seq.seq_tag))
def write_seq_lens(seqs, output_filename):
'\n :param list[ConcatenatedSeq] seqs:\n :param str output_filename:\n '
with generic_open(output_filename, 'w') as f:
for seq in seqs:
assert isinstance(seq, ConcatenatedSeq)
f.write(('%s\n' % seq.duration))
def write_seq_lens_py(seqs, output_filename):
'\n :param list[ConcatenatedSeq] seqs:\n :param str output_filename:\n '
with generic_open(output_filename, 'w') as f:
f.write('{\n')
for seq in seqs:
assert isinstance(seq, ConcatenatedSeq)
f.write(('%r: %s,\n' % (seq.seq_tag, seq.duration)))
f.write('}\n')
def get_seq_lens_numpy(seqs):
'\n :param list[ConcatenatedSeq] seqs:\n :rtype: numpy.ndarray\n '
return numpy.array([float(seq.duration) for seq in seqs])
def get_vector_stats(v):
'\n :param numpy.ndarray v:\n :rtype: str\n '
assert (len(v.shape) == 1)
v = v.astype(numpy.float)
return ('#num %i, min-max %s-%s, mean %s, std %s' % (len(v), numpy.min(v), numpy.max(v), numpy.mean(v), numpy.std(v)))
orig_seq_lens_np = get_seq_lens_numpy(orig_seqs)
concatenated_seq_lens_np = get_seq_lens_numpy(concatenated_seqs)
print('Original seq lens:', get_vector_stats(orig_seq_lens_np))
print('Concatenated seq lens:', get_vector_stats(concatenated_seq_lens_np))
write_seq_tags(orig_seqs, self.out_orig_seq_tags.get_path())
write_seq_lens(orig_seqs, self.out_orig_seq_lens.get_path())
write_seq_lens_py(orig_seqs, self.out_orig_seq_lens_py.get_path())
write_seq_tags(concatenated_seqs, self.out_concat_seq_tags.get_path())
write_seq_lens(concatenated_seqs, self.out_concat_seq_lens.get_path())
write_seq_lens_py(concatenated_seqs, self.out_concat_seq_lens_py.get_path())
def tasks(self):
(yield Task('run', rqmt={'cpu': 1, 'mem': 1, 'time': 0.1}, mini_task=True))
|
def score_hyps(experiment, dataset, hyps):
'\n :param returnn.experiments.ExperimentFromConfig|None experiment: (unused)\n :param str dataset: from experiments.dataset_inference_keys\n :param Path hyps:\n :rtype: list[Path]\n '
return scoring.ScliteHubScoreJob.create_by_corpus_name(name=dataset, hyps=hyps).output_results_txts_list
|
class CalculateWordErrorRateJob(Job):
def __init__(self, refs, hyps):
'\n :param Path refs: Python txt format, seq->txt, whole words\n :param Path hyps: Python txt format, seq->txt, whole words\n '
self.refs = refs
self.hyps = hyps
self.output_wer = self.output_path('wer.txt')
def run(self):
import subprocess
args = [('%s/returnn/tools/calculate-word-error-rate.py' % tk.gs.BASE_DIR), '--expect_full', '--hyps', self.hyps.get_path(), '--refs', self.refs.get_path(), '--out', self.output_wer.get_path()]
print(('$ %s' % ' '.join(args)))
subprocess.check_call(args)
wer = float(open(self.output_wer.get_path()).read())
print(('WER: %f %%' % wer))
def tasks(self):
(yield Task('run', rqmt={'cpu': 1, 'mem': 1, 'time': 0.1}, mini_task=True))
|
class ScliteJob(Job):
'\n Run sclite.\n '
def __init__(self, name, refs, hyps):
'\n :param str name: e.g. dataset name (test-clean or so), just for the output reporting, not used otherwise\n :param Path refs: Python txt format, seq->txt, whole words\n :param Path hyps: Python txt format, seq->txt, whole words\n '
self.name = name
self.refs = refs
self.hyps = hyps
self.output_sclite_dir = self.output_path('sclite-out', directory=True)
@staticmethod
def create_stm(name, source_filename, target_filename):
'\n :param str name:\n :param str source_filename: Python txt format\n :param str target_filename:\n :return: target_filename\n :rtype: str\n '
'\n ;; Reference file for english , generated Wed Apr 5 09:29:10 EDT 2000\n ;; CATEGORY "0" "" ""\n ;; LABEL "O" "Overall" "The Complete Test Set"\n ;; CATEGORY "1" "Corpus" ""\n ;; LABEL "EN" "Callhome" "Callhome conversations"\n ;; LABEL "SW" "Switchboard" "Switchboard conversations"\n ;; CATEGORY "2" "Sex" ""\n ;; LABEL "F" "Female" "Female Caller"\n ;; LABEL "M" "Male" "Male Caller"\n ;; CATEGORY "3" "Corpus/Sex" ""\n ;; LABEL "EN-F" "Callhome Female" "Callhome Female Caller"\n ;; LABEL "EN-M" "Callhome Male" "Callhome Male Caller"\n ;; LABEL "SW-F" "Switchboard Female" "Switchboard Female Caller"\n ;; LABEL "SW-M" "Switchboard Male" "Switchboard Male Caller"\n en_4156a 1 en_4156_A 301.85 302.48 <O,en,F,en-F> oh yeah \n en_4156a 1 en_4156_A 304.71 306.72 <O,en,F,en-F> well i am going to have mine in two more classes \n en_4156a 1 en_4156_A 307.63 311.16 <O,en,F,en-F> no i am not well then i have to take my exams my orals but \n en_4156a 1 en_4156_A 313.34 315.37 <O,en,F,en-F> that is kind of what i would like to do \n en_4156a 1 en_4156_A 316.83 319.20 <O,en,F,en-F> i might even want to go on and get my p h d \n en_4156a 1 en_4156_A 321.55 322.16 <O,en,F,en-F> it is just that \n '
py_txt = eval(generic_open(source_filename).read())
assert (isinstance(py_txt, dict) and (len(py_txt) > 0))
(example_key, example_value) = next(iter(py_txt.items()))
assert (isinstance(example_key, str) and isinstance(example_value, str))
with generic_open(target_filename, 'w') as f:
f.write(';; CATEGORY "0" "" ""\n')
f.write((';; LABEL "O" "%s" ""\n' % name))
start = 0.0
for (seq_tag, raw_txt) in sorted(py_txt.items()):
f.write(('%s 1 rec %f %f <O> %s\n' % (seq_tag, (start + 0.01), (start + 0.99), raw_txt)))
start += 1
return target_filename
@staticmethod
def create_ctm(source_filename, target_filename):
'\n :param str source_filename: Python txt format\n :param str target_filename:\n :return: target_filename\n :rtype: str\n '
'\n ;; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n ;; hub5_00/en_4156a/1 (301.850000-302.480000)\n en_4156a 1 301.850000 0.283500 oh 0.99\n en_4156a 1 302.133500 0.283500 yeah 0.99\n ;; hub5_00/en_4156a/2 (304.710000-306.720000)\n en_4156a 1 304.710000 0.201000 well 0.99\n '
py_txt = eval(generic_open(source_filename).read())
assert (isinstance(py_txt, dict) and (len(py_txt) > 0))
(example_key, example_value) = next(iter(py_txt.items()))
assert (isinstance(example_key, str) and isinstance(example_value, str))
with generic_open(target_filename, 'w') as f:
f.write(';; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n')
start = 0.0
for (seq_tag, raw_txt) in sorted(py_txt.items()):
f.write((';; %s (%f-%f)\n' % (seq_tag, (start + 0.01), (start + 0.99))))
if raw_txt:
words = raw_txt.split()
word_duration = (0.9 / len(words))
for i in range(len(words)):
f.write(('%s 1 %f %f %s\n' % (seq_tag, ((start + 0.01) + (i * word_duration)), word_duration, words[i])))
start += 1
return target_filename
def run(self):
stm_filename = self.create_stm(self.name, self.refs.get_path(), 'refs.stm')
ctm_filename = self.create_ctm(self.hyps.get_path(), 'hyps.ctm')
args = [('%s/SCTK/bin/sclite' % tk.gs.BASE_DIR), '-r', stm_filename, 'stm', '-h', ctm_filename, 'ctm', '-e', 'utf-8', '-o', 'all', '-o', 'dtl', '-o', 'lur', '-n', 'sclite', '-O', self.output_sclite_dir.get_path()]
print(('$ %s' % ' '.join(args)))
for sclite_stdout_line in subprocess.check_output(args).splitlines():
if (not sclite_stdout_line.strip()):
continue
if (b'Performing alignments for file' in sclite_stdout_line):
continue
if (b'Segments For Channel' in sclite_stdout_line):
continue
print(sclite_stdout_line.decode('utf8'))
raise NotImplementedError
def tasks(self):
(yield Task('run', rqmt={'cpu': 1, 'mem': 1, 'time': 0.1}, mini_task=True))
|
class ScliteHubScoreJob(Job):
"\n Wraps the SCTK hubscr.pl script, which is used to calculate the WER for Switchboard, Hub 5'00, Hub 5'01, rts03.\n "
CorpusNameMap = {'dev': 'hub5e_00'}
OrigCorpusNames = ['hub5e_00', 'hub5e_01', 'rt03s']
ResultsSubsets = {'hub5e_00': ['Callhome', 'Switchboard', 'Overall'], 'hub5e_01': ['Switchboard', 'Switchboard-2 Phase III', 'Switchboard-Cell', 'Overall'], 'rt03s': ['Swbd', 'Fisher', 'Overall']}
RefsStmFiles = {}
GlmFile = None
@classmethod
def create_by_corpus_name(cls, name, hyps):
'\n :param str name: "hub5e_00", "hub5e_01" or "rt03s"\n :param Path hyps: Python txt format, seq->txt, whole words\n '
name = cls.CorpusNameMap.get(name, name)
assert ((name in cls.OrigCorpusNames) or (name in cls.RefsStmFiles))
assert (name in cls.RefsStmFiles), 'make sure you fill this dict before usage'
return cls(name=name, stm=cls.RefsStmFiles[name], hyps=hyps)
def __init__(self, name, stm, hyps):
'\n :param str name: "hub5e_00", "hub5e_01" or "rt03s"\n :param Path stm: reference file (STM format)\n :param Path hyps: Python txt format, seq->txt, whole words\n '
assert self.GlmFile, 'make sure you set this before usage'
self.name = name
self.hyps = hyps
self._ref_stm = stm
self._glm = self.GlmFile
self.ResultsSubsets = self.ResultsSubsets
self.output_dir = self.output_path('sclite-out', directory=True)
self.output_results_txt = self.output_path('results.txt')
self.output_results_txts = {subset: self.output_path(('result-%s.txt' % subset.replace(' ', '-'))) for subset in self.ResultsSubsets[name]}
self.output_results_txts_list = [self.output_results_txts[subset] for subset in self.ResultsSubsets[name]]
self.output_wer = self.output_path('wer.txt')
@staticmethod
def create_ctm(name, ref_stm_filename, source_filename, target_filename):
'\n :param str name: e.g. "hub5_00"\n :param str ref_stm_filename:\n :param str source_filename: Python txt format\n :param str target_filename: ctm file\n :return: target_filename\n :rtype: str\n '
corpus_name_map = {'hub5e_00': 'hub5_00', 'hub5e_01': 'hub5_01'}
'\n ;; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n ;; hub5_00/en_4156a/1 (301.850000-302.480000)\n en_4156a 1 301.850000 0.283500 oh 0.99\n en_4156a 1 302.133500 0.283500 yeah 0.99\n ;; hub5_00/en_4156a/2 (304.710000-306.720000)\n en_4156a 1 304.710000 0.201000 well 0.99\n '
import re
import decimal
from decimal import Decimal
py_txt = eval(generic_open(source_filename).read())
assert (isinstance(py_txt, dict) and (len(py_txt) > 0))
(example_key, example_value) = next(iter(py_txt.items()))
assert (isinstance(example_key, str) and isinstance(example_value, str))
with generic_open(target_filename, 'w') as f:
f.write(';; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n')
seq_idx_in_tag = None
last_tag = None
last_end = None
first_seq = True
have_extended = False
extended_seq_tag = None
for line in generic_open(ref_stm_filename).read().splitlines():
line = line.strip()
if (not line):
continue
if line.startswith(';; _full_seq_tag '):
if first_seq:
have_extended = True
else:
assert have_extended
assert (not extended_seq_tag)
m = re.match('^;; _full_seq_tag "(.*)"$', line)
assert m, ('unexpected line: %r' % line)
(extended_seq_tag,) = m.groups()
continue
if line.startswith(';;'):
continue
m = re.match('^([a-zA-Z0-9_]+)\\s+1\\s+([a-zA-Z0-9_]+)\\s+([0-9.]+)\\s+([0-9.]+)\\s+<([a-zA-Z0-9,\\-]+)>(.*)$', line)
assert m, ('unexpected line: %r' % line)
(tag, tag2, start_s, end_s, flags, txt) = m.groups()
txt = txt.strip()
first_seq = False
if (txt == 'ignore_time_segment_in_scoring'):
continue
if (not txt):
continue
start = Decimal(start_s)
end = Decimal(end_s)
duration = (end - start)
assert (duration > 0.0)
if (tag != last_tag):
seq_idx_in_tag = 1
last_tag = tag
else:
assert (start >= (last_end - Decimal('0.01'))), ('line: %r' % line)
seq_idx_in_tag += 1
last_end = end
if extended_seq_tag:
full_seq_tag = extended_seq_tag
extended_seq_tag = None
else:
full_seq_tag = ('%s/%s/%i' % (corpus_name_map.get(name, name), tag, seq_idx_in_tag))
assert (full_seq_tag in py_txt), ('line: %r' % line)
hyp_raw_txt = py_txt.pop(full_seq_tag)
words = (hyp_raw_txt.split() if hyp_raw_txt else [])
words = [w for w in words if ((w[:1] != '[') and (w[(- 1):] != ']'))]
f.write((';; %s (%s-%s)\n' % (tag, start, end)))
f.write((';; full tag: %s\n' % full_seq_tag))
f.write((';; ref: %s\n' % txt))
if words:
word_duration = (duration / len(words))
for i in range(len(words)):
f.write(('%s 1 %.3f %.3f %s\n' % (tag, ((start + Decimal('0.01')) + (i * word_duration)), (word_duration * Decimal('0.9')), words[i])))
assert (not extended_seq_tag)
if (not have_extended):
allowed_remaining = {'hub5_00/sw_4601a/28', 'hub5_00/sw_4601a/29'}
for tag in allowed_remaining:
if (tag in py_txt):
py_txt.pop(tag)
assert (not py_txt)
return target_filename
@staticmethod
def parse_lur_file(filename):
'\n :param str filename:\n :rtype: dict[str,float]\n '
import re
state = 0
results = {}
corpora = None
(p0, p1, p2) = (None, None, None)
for line in generic_open(filename).read().splitlines():
line = line.strip()
if (state == 0):
if (line[:2] == '|-'):
state = 1
elif (state == 1):
assert (line[:2] == '| ')
p = line.find('Corpus')
assert (p > 0)
(p1, p2) = (line.rfind('|', 0, p), line.find('|', p))
assert (0 < p1 < p < p2)
assert (line[(p1 - 1):(p1 + 1)] == '||')
p0 = line.rfind('|', 0, (p1 - 1))
assert (0 < p0 < p1)
state = 2
elif (state == 2):
assert (line[:2] == '|-')
state = 3
elif (state == 3):
assert (line[(p0 + 1):(p1 - 1)].strip() == 'Overall')
corpora = line[(p1 + 1):p2].split('|')
corpora = [c.strip() for c in corpora]
state = 4
elif (state == 4):
if (line[:2] == '|='):
state = 5
elif (state == 5):
results_ = line[(p1 + 1):p2].split('|')
assert (len(results_) == len(corpora))
results_ = {key: value.strip() for (key, value) in zip(corpora, results_)}
results_['Overall'] = line[(p0 + 1):(p1 - 1)].strip()
for (key, value) in list(results_.items()):
m = re.match('^\\[([0-9]+)\\]\\s+([0-9.]+)$', value)
assert m, ('line %r, key %r, value %r' % (line, key, value))
(_, wer) = m.groups()
float(wer)
results[key] = float(wer)
state = 6
assert (state == 6)
return results
def run(self):
stm_filename = 'refs.stm'
shutil.copy(self._ref_stm.get_path(), stm_filename)
sclite_out_dir = self.output_dir.get_path()
hyps_symlink_name = ('%s/input_words.txt' % os.path.dirname(sclite_out_dir))
if self.hyps.path.endswith('.gz'):
hyps_symlink_name += '.gz'
if (not os.path.exists(hyps_symlink_name)):
os.symlink(self.hyps.get_path(), hyps_symlink_name)
ctm_filename = self.create_ctm(name=self.name, ref_stm_filename=stm_filename, source_filename=self.hyps.get_path(), target_filename=('%s/%s' % (sclite_out_dir, self.name)))
args = [('%s/SCTK/bin/hubscr.pl' % tk.gs.BASE_DIR), '-p', ('%s/SCTK/bin' % tk.gs.BASE_DIR), '-V', '-l', 'english', '-h', 'hub5', '-g', self._glm, '-r', stm_filename, ctm_filename]
print(('$ %s' % ' '.join(args)))
for sclite_stdout_line in subprocess.check_output(args).splitlines():
if (not sclite_stdout_line.strip()):
continue
if (b'Performing alignments for file' in sclite_stdout_line):
continue
if (b'Segments For Channel' in sclite_stdout_line):
continue
print(sclite_stdout_line.decode('utf8'))
results = self.parse_lur_file(('%s.filt.lur' % ctm_filename))
print('Results:', results)
with generic_open(self.output_results_txt.get_path(), 'w') as f:
f.write(('%r\n' % (results,)))
with generic_open(self.output_wer.get_path(), 'w') as f:
f.write(('%f\n' % results['Overall']))
for subset in self.ResultsSubsets[self.name]:
with generic_open(self.output_results_txts[subset].get_path(), 'w') as f:
dataset_name = (('%s: %s' % (self.name, subset)) if (subset != 'Overall') else self.name)
wer = results[subset]
f.write(("{'dataset': %r, 'keys': ['wer'], 'wer': %f}\n" % (dataset_name, wer)))
for fn in os.listdir(sclite_out_dir):
self.sh(('gzip %s/%s' % (sclite_out_dir, fn)))
def tasks(self):
(yield Task('run', rqmt={'cpu': 1, 'mem': 1, 'time': 0.1}, mini_task=True))
|
def generic_open(filename, mode='r'):
'\n Wrapper around :func:`open`.\n Automatically wraps :func:`gzip.open` if filename ends with ``".gz"``.\n\n :param str filename:\n :param str mode: text mode by default\n :rtype: typing.TextIO|typing.BinaryIO\n '
if filename.endswith('.gz'):
import gzip
if ('b' not in mode):
mode += 't'
return gzip.open(filename, mode)
return open(filename, mode)
|
def hash_limited_len_name(name, limit=200):
'\n :param str name:\n :param int limit:\n :return: name, maybe truncated (by hash) such that its len (in bytes) is <=200\n :rtype: str\n '
name_b = name.encode('utf8')
if (len(name_b) < limit):
return name
assert (len(name_b) == len(name))
h = sis_hash(name_b)
name = ('%s...%s' % (name[:((limit - 3) - len(h))], h))
assert (len(name) == limit)
return name
|
def get_config_filename(config):
for base_dir in base_dirs:
fn = ('%s/config-train/%s.config' % (base_dir, config))
if os.path.exists(fn):
return fn
raise Exception(('not found: %s' % config))
|
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for base_dir in base_dirs:
assert os.path.exists(base_dir)
for config in configs:
fn = get_config_filename(config)
local_fn = ('%s.config' % config)
if (not os.path.exists(local_fn)):
shutil.copy(fn, local_fn)
|
def logsumexp(*args):
'\n Stable log sum exp.\n '
if all(((a == NEG_INF) for a in args)):
return NEG_INF
a_max = max(args)
lsp = np.log(sum((np.exp((a - a_max)) for a in args)))
return (a_max + lsp)
|
def log_softmax(acts, axis):
'\n Log softmax over the last axis of the 3D array.\n '
acts = (acts - np.max(acts, axis=axis, keepdims=True))
probs = np.sum(np.exp(acts), axis=axis, keepdims=True)
log_probs = (acts - np.log(probs))
return log_probs
|
def forward_pass(log_probs, labels, blank, label_rep=False):
(T, U, _) = log_probs.shape
S = ((T - U) + 2)
alphas = np.zeros((S, U))
for u in range(1, U):
alphas[(0, u)] = (alphas[(0, (u - 1))] + log_probs[((u - 1), (u - 1), labels[(u - 1)])])
for t in range(1, S):
alphas[(t, 0)] = (alphas[((t - 1), 0)] + log_probs[((t - 1), 0, blank)])
for t in range(1, S):
for u in range(1, U):
skip = (alphas[((t - 1), u)] + log_probs[(((t + u) - 1), u, blank)])
emit = (alphas[(t, (u - 1))] + log_probs[(((t + u) - 1), (u - 1), labels[(u - 1)])])
alphas[(t, u)] = logsumexp(emit, skip)
if label_rep:
same = alphas[((t - 1), u, log_probs[(((t + u) - 1), (u - 1), labels[(u - 1)])])]
alphas[(t, u)] = logsumexp(alphas[(t, u)], same)
return (alphas, alphas[((S - 1), (U - 1))])
|
def backward_pass(log_probs, labels, blank):
(T, U, _) = log_probs.shape
S = ((T - U) + 2)
S1 = (S - 1)
U1 = (U - 1)
betas = np.zeros((S, U))
for i in range(1, U):
u = (U1 - i)
betas[(S1, u)] = (betas[(S1, (u + 1))] + log_probs[((T - i), u, labels[u])])
for i in range(1, S):
t = (S1 - i)
betas[(t, U1)] = (betas[((t + 1), U1)] + log_probs[((T - i), U1, blank)])
for i in range(1, S):
t = (S1 - i)
for j in range(1, U):
u = (U1 - j)
skip = (betas[((t + 1), u)] + log_probs[(((T - i) - j), u, blank)])
emit = (betas[(t, (u + 1))] + log_probs[(((T - i) - j), u, labels[u])])
betas[(t, u)] = logsumexp(emit, skip)
return (betas, betas[(0, 0)])
|
def analytical_gradient(log_probs, alphas, betas, labels, blank):
(T, U, _) = log_probs.shape
S = ((T - U) + 2)
log_like = betas[(0, 0)]
grads = np.full(log_probs.shape, NEG_INF)
for t in range((S - 1)):
for u in range(U):
grads[((t + u), u, blank)] = (((alphas[(t, u)] + betas[((t + 1), u)]) + log_probs[((t + u), u, blank)]) - log_like)
for t in range(S):
for (u, l) in enumerate(labels):
grads[((t + u), u, l)] = (((alphas[(t, u)] + betas[(t, (u + 1))]) + log_probs[((t + u), u, l)]) - log_like)
return (- np.exp(grads))
|
def numerical_gradient(log_probs, labels, neg_loglike, blank):
epsilon = 1e-05
(T, U, V) = log_probs.shape
grads = np.zeros_like(log_probs)
for t in range(T):
for u in range(U):
for v in range(V):
log_probs[(t, u, v)] += epsilon
(alphas, ll_forward) = forward_pass(log_probs, labels, blank)
grads[(t, u, v)] = (((- ll_forward) - neg_loglike) / epsilon)
log_probs[(t, u, v)] -= epsilon
return grads
|
def test():
np.random.seed(0)
blank = 0
vocab_size = 4
input_len = 5
output_len = 3
print(('T=%d, U=%d, V=%d' % (input_len, (output_len + 1), vocab_size)))
inputs = np.random.rand(input_len, (output_len + 1), vocab_size)
labels = np.random.randint(1, vocab_size, output_len)
log_probs = log_softmax(inputs, axis=2)
print('log-probs:', log_probs.shape)
print(log_probs[(..., 0)])
(alphas, ll_forward) = forward_pass(log_probs, labels, blank)
print('alphas')
print(alphas)
print('LL forward')
print(ll_forward)
(betas, ll_backward) = backward_pass(log_probs, labels, blank)
assert np.allclose(ll_forward, ll_backward, atol=1e-12, rtol=1e-12), 'Log-likelihood from forward and backward pass mismatch.'
print(('LL forward == LL backward: %s' % colored('MATCH', 'green')))
neg_loglike = (- ll_forward)
analytical_grads = analytical_gradient(log_probs, alphas, betas, labels, blank)
numerical_grads = numerical_gradient(log_probs, labels, neg_loglike, blank)
assert np.allclose(analytical_grads, numerical_grads, atol=1e-06, rtol=1e-06), 'Analytical and numerical computation of gradient mismatch.'
print(('analytical == numerical grad: %s' % colored('MATCH', 'green')))
|
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('setup')
arg_parser.add_argument('--align-layer', default='ctc_align')
arg_parser.add_argument('--prior-scale', default=None)
arg_parser.add_argument('--extern-prior')
args = arg_parser.parse_args()
config_filename = ('%s/config-train/%s.config' % (setup_base_dir, args.setup))
setup_dir = ('%s/data-train/%s' % (setup_base_dir, args.setup))
assert (os.path.exists(config_filename) and os.path.isdir(setup_dir))
if (args.extern_prior and (not args.extern_prior.startswith('/'))):
args.extern_prior = ('%s/%s' % (os.getcwd(), args.extern_prior))
os.chdir(setup_dir)
init(config_filename=config_filename, extra_greeting='dump-align', config_updates={'need_data': False})
config = get_global_config()
datasets_dict = {'train': config.typed_dict['train'], 'dev': config.typed_dict['dev']}
for (dataset_name, dataset_dict) in datasets_dict.items():
assert isinstance(dataset_dict, dict)
assert (dataset_dict['class'] == 'ExternSprintDataset')
assert (('partition_epoch' in dataset_dict) and ('estimated_num_seqs' in dataset_dict))
dataset_dict['estimated_num_seqs'] *= dataset_dict['partition_epoch']
dataset_dict['partition_epoch'] = 1
sprint_args = dataset_dict['sprintConfigStr']
assert isinstance(sprint_args, list)
shuffle_chunk_size_opt = [arg for arg in sprint_args if (isinstance(arg, str) and ('segment-order-sort-by-time-length-chunk-size=' in arg))]
assert (len(shuffle_chunk_size_opt) == 1)
sprint_args.remove(shuffle_chunk_size_opt[0])
dataset_dict['name'] = dataset_name
dump_layer_name = ('%s_dump' % args.align_layer)
def net_dict_post_proc(net_dict):
'\n :param dict[str] net_dict:\n :rtype: dict[str]\n '
assert (args.align_layer in net_dict)
net_dict[dump_layer_name] = {'class': 'hdf_dump', 'from': args.align_layer, 'extra': {'scores': ('%s/scores' % args.align_layer)}, 'filename': None, 'is_output_layer': True}
if (args.prior_scale is not None):
align_scores_layer_name = net_dict[args.align_layer]['from']
assert isinstance(align_scores_layer_name, str)
align_scores_layer_dict = net_dict[align_scores_layer_name]
assert ('eval_locals' in align_scores_layer_dict)
align_scores_eval_locals = align_scores_layer_dict['eval_locals']
assert ('prior_scale' in align_scores_eval_locals)
align_scores_eval_locals['prior_scale'] = float(args.prior_scale)
if args.extern_prior:
log_prior = numpy.array(load_txt_vector(args.extern_prior), dtype='float32')
align_scores_layer_name = net_dict[args.align_layer]['from']
assert isinstance(align_scores_layer_name, str)
align_scores_layer_dict = net_dict[align_scores_layer_name]
assert ('eval_locals' in align_scores_layer_dict)
align_scores_eval_locals = align_scores_layer_dict['eval_locals']
assert ('prior_scale' in align_scores_eval_locals)
assert ('safe_log(source(1))' in align_scores_layer_dict['eval'])
assert (len(align_scores_layer_dict['from']) == 2)
(align_posterior_layer_name, align_prior_layer_name) = align_scores_layer_dict['from']
align_posterior_layer_dict = net_dict[align_posterior_layer_name]
dim = align_posterior_layer_dict['n_out']
assert (log_prior.shape == (dim,))
assert (align_prior_layer_name in net_dict)
net_dict[align_prior_layer_name] = {'class': 'eval', 'from': [], 'out_type': {'shape': (dim,), 'batch_dim_axis': None, 'time_dim_axis': None}, 'eval': (lambda **kwargs: tf.exp(tf.constant(log_prior)))}
if ('decision' in net_dict):
net_dict.pop('decision')
if (('output' in net_dict) and (net_dict['output']['class'] == 'rec') and isinstance(net_dict['output']['unit'], dict)):
net_dict.pop('output')
return net_dict
engine = get_global_engine()
engine.init_network_from_config(net_dict_post_proc=net_dict_post_proc)
print('Initialized network, epoch:', engine.epoch)
dump_layer = engine.network.layers[dump_layer_name]
assert isinstance(dump_layer, HDFDumpLayer)
for (dataset_name, dataset_dict) in datasets_dict.items():
print('Load data', dataset_name, '...')
dataset = init_dataset(dataset_dict)
print(dataset)
out_filename_parts = [args.setup, ('epoch-%i' % engine.epoch)]
if args.extern_prior:
out_filename_parts += ['extern_prior']
if (args.prior_scale is not None):
out_filename_parts += [('prior-%s' % args.prior_scale.replace('.', '_'))]
out_filename_parts += [('data-%s' % dataset_name), 'hdf']
output_hdf_filename = ('%s/%s' % (data_dir, '.'.join(out_filename_parts)))
print('Store HDF as:', output_hdf_filename)
assert (not os.path.exists(output_hdf_filename))
dump_layer.filename = output_hdf_filename
dataset_batches = dataset.generate_batches(recurrent_net=engine.network.recurrent, batch_size=config.typed_value('batch_size', 1), max_seqs=config.int('max_seqs', (- 1)), used_data_keys=engine.network.get_used_data_keys())
runner = Runner(engine=engine, dataset=dataset, batches=dataset_batches, train=False, eval=False)
runner.run(report_prefix=(engine.get_epoch_str() + (' %r dump align' % dataset_name)))
if (not runner.finalized):
print('Runner not finalized, quitting.')
sys.exit(1)
assert dump_layer.hdf_writer
engine.network.call_graph_reset_callbacks()
assert os.path.exists(output_hdf_filename)
assert (not dump_layer.hdf_writer)
print('Finished.')
|
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('setup')
arg_parser.add_argument('--softmax-layer', default='ctc_out')
args = arg_parser.parse_args()
config_filename = ('%s/config-train/%s.config' % (setup_base_dir, args.setup))
setup_dir = ('%s/data-train/%s' % (setup_base_dir, args.setup))
assert (os.path.exists(config_filename) and os.path.isdir(setup_dir))
os.chdir(setup_dir)
init(config_filename=config_filename, extra_greeting='extract softmax prior', config_updates={'need_data': False})
config = get_global_config()
datasets_dict = {'train': config.typed_dict['train']}
for (dataset_name, dataset_dict) in datasets_dict.items():
assert isinstance(dataset_dict, dict)
assert (dataset_dict['class'] == 'ExternSprintDataset')
assert (('partition_epoch' in dataset_dict) and ('estimated_num_seqs' in dataset_dict))
dataset_dict['estimated_num_seqs'] *= dataset_dict['partition_epoch']
dataset_dict['partition_epoch'] = 1
sprint_args = dataset_dict['sprintConfigStr']
assert isinstance(sprint_args, list)
shuffle_chunk_size_opt = [arg for arg in sprint_args if (isinstance(arg, str) and ('segment-order-sort-by-time-length-chunk-size=' in arg))]
assert (len(shuffle_chunk_size_opt) == 1)
sprint_args.remove(shuffle_chunk_size_opt[0])
dataset_dict['name'] = dataset_name
engine = get_global_engine()
engine.init_network_from_config()
print('Initialized network, epoch:', engine.epoch)
for (dataset_name, dataset_dict) in datasets_dict.items():
print('Load data', dataset_name, '...')
dataset = init_dataset(dataset_dict)
print(dataset)
out_filename_parts = [args.setup, ('epoch-%i' % engine.epoch), 'smprior', 'txt']
output_filename = ('%s/%s' % (data_dir, '.'.join(out_filename_parts)))
print('Store prior as:', output_filename)
assert (not os.path.exists(output_filename))
config.set('forward_output_layer', args.softmax_layer)
config.set('output_file', output_filename)
config.set('max_seq_length', sys.maxsize)
engine.compute_priors(dataset=dataset, config=config)
assert os.path.exists(output_filename)
print('Finished.')
|
class AddOneHotToTime(CopyLayer):
layer_class = 'addonehot'
def __init__(self, position=0, repeat=1, vocab_size=30000, **kwargs):
'\n :param float|str prefix: either some constant or another layer\n :param int repeat: how often to repeat the prefix\n '
super(AddOneHotToTime, self).__init__(**kwargs)
assert (self.output.time_dim_axis is not None)
assert isinstance(position, int), 'Idice needs to be Integer'
c = tf.one_hot(position, depth=vocab_size, dtype=self.output.dtype)
shape = [((self.output.batch_shape[i] or tf.shape(self.output.placeholder)[i]) if (i != self.output.time_dim_axis) else repeat) for i in range(self.output.batch_ndim)]
x = tf.ones(shape, dtype=self.output.dtype)
self.output.placeholder = tf.concat([self.output.placeholder, (x * c)], axis=self.output.time_dim_axis)
self.output.size_placeholder[self.output.time_dim_axis_excluding_batch] += repeat
|
def gen_model_1label():
'\n \\sum_{s:y} p(x|s),\n two possible inputs x1 (1,0) and x2 (0,1),\n two possible labels "a" and (blank) "B".\n Define p(x1|s=a) = theta_a, p(x2|s=a) = 1 - theta_a,\n p(x2|s=B) = theta_B, p(x1|s=B) = 1 - theta_B.\n\n For simplicity, fsa ^= a*B*, and the input be x1^{na},x2^{nB}, T = na + nB.\n Then we can just count. All alignments can be iterated through by t=0...T.\n Symmetric case...\n '
na = var('na', domain=ZZ)
nb = var('nb', domain=ZZ)
theta_a = var('theta_a', domain=RR)
theta_b = var('theta_b', domain=RR)
t = var('t', domain=ZZ)
p1 = (theta_a ** min_symbolic(t, na))
p2 = ((1 - theta_a) ** max_symbolic((t - na), 0))
p3 = (theta_b ** min_symbolic(((na + nb) - t), nb))
p4 = ((1 - theta_b) ** max_symbolic((na - t), 0))
sum_ = sum((((p1 * p2) * p3) * p4), t, 0, (na + nb))
for _ in range(6):
sum_ = sum_.simplify()
print(sum_)
syms = (theta_a,)
sum_diff = sum_.diff(*syms)
sum_diff = sum_diff.simplify()
print('diff:', sum_diff)
opts = solve((sum_diff == 0), *syms, domain=RR)
print('num opts:', len(opts))
for opt in opts:
print('opt:', opt)
|
def main():
if (len(sys.argv) >= 2):
globals()[sys.argv[1]]()
return
print(('Usage: %s <func>' % __file__))
sys.exit(1)
|
class Arc():
def __init__(self, source_state: int, target_state: int, label: str):
self.source_state = source_state
self.target_state = target_state
self.label = label
def short_str(self, target_is_final_mark: bool=False):
return ('%i -%s-> %i%s' % (self.source_state, self.label, self.target_state, ('.' if target_is_final_mark else '')))
def __repr__(self):
return ('Arc{%s}' % self.short_str())
|
class Fsa():
'\n Finite state automaton.\n '
def __init__(self):
self.states = {0}
self.start_state = 0
self.final_states = set()
self.arcs = set()
self.arcs_by_source_state = {}
def add_arc(self, source_state: int, target_state: int, label: str):
self.states.add(source_state)
self.states.add(target_state)
arc = Arc(source_state=source_state, target_state=target_state, label=label)
self.arcs.add(arc)
self.arcs_by_source_state.setdefault(source_state, set()).add(arc)
def add_final_state(self, state: int):
self.final_states.add(state)
def get_labels(self):
labels = set()
for arc in self.arcs:
labels.add(arc.label)
return sorted(labels)
def is_deterministic_by_label(self):
for (_, arcs) in self.arcs_by_source_state.items():
labels = set()
for arc in arcs:
if (arc.label in labels):
return False
labels.add(arc.label)
return True
def str(self):
collected_arcs_set = set()
collected_arcs_list = []
visited_states = set()
state_queue = [self.start_state]
while state_queue:
state = state_queue.pop(0)
if (state in visited_states):
continue
visited_states.add(state)
for arc in self.arcs_by_source_state[state]:
if (arc in collected_arcs_set):
continue
collected_arcs_set.add(arc)
collected_arcs_list.append(arc.short_str(target_is_final_mark=(arc.target_state in self.final_states)))
if (arc.target_state not in visited_states):
state_queue.append(arc.target_state)
return ', '.join(collected_arcs_list)
def __repr__(self):
return ('Fsa{%s}' % self.str())
def copy_ref_new_final_states(self, final_states: typing.Set[int]):
fsa = Fsa()
fsa.states = self.states
fsa.start_state = self.start_state
fsa.arcs = self.arcs
fsa.arcs_by_source_state = self.arcs_by_source_state
fsa.final_states = final_states
return fsa
def get_deterministic_source_to_target_arc(self, source_state, target_state):
'\n :param int source_state:\n :param int target_state:\n :rtype: Arc|None\n '
assert ((source_state in self.states) and (target_state in self.states))
possible_arcs = []
for arc in self.arcs_by_source_state[source_state]:
if (arc.target_state == target_state):
possible_arcs.append(arc)
assert (len(possible_arcs) <= 1)
if (not possible_arcs):
return None
return possible_arcs[0]
def get_deterministic_source_to_target_prob(self, source_state, target_state, probs_by_label):
'\n :param int source_state:\n :param int target_state:\n :param dict[str,T] probs_by_label:\n :rtype: T|int\n '
arc = self.get_deterministic_source_to_target_arc(source_state=source_state, target_state=target_state)
if arc:
return probs_by_label[arc.label]
return 0
def get_edges_weights_start_end_states(self):
'\n :return: edges, weights, start_end_states;\n edges is (4,num_edges), int32, edges of the graph (from,to,emission_idx,sequence_idx).\n weights is (num_edges,), float32. all zero.\n start_end_states is (2,batch), int32, (start,end) state idx in FSA.\n :rtype: (numpy.ndarray,numpy.ndarray,numpy.ndarray)\n '
assert (len(self.final_states) == 1)
def tf_get_full_sum(self, logits, idx_by_label=None, unroll=False, max_approx=False):
'\n :param tf.Tensor logits: [T,B,D], normalized as you want\n :param dict[str,int]|None idx_by_label:\n :param bool unroll:\n :param bool max_approx: max instead of sum (Viterbi instead of Baum-Welch)\n :return: [B], scores in +log space\n :rtype: tf.Tensor\n '
import tensorflow as tf
tf = tf.compat.v1
assert isinstance(logits, tf.Tensor)
num_states = (max(self.states) + 1)
states = range(num_states)
labels = self.get_labels()
num_labels = len(labels)
logits.set_shape((None, None, num_labels))
if (idx_by_label is None):
idx_by_label = {}
idx = 0
for label in labels:
if (label != BlankLabel):
idx_by_label[label] = idx
idx += 1
if (BlankLabel in labels):
idx_by_label[BlankLabel] = idx
assert all([(0 <= idx_by_label[label] < num_labels) for label in labels])
(num_frames, num_batch, _) = logits.shape.as_list()
if (num_frames is None):
num_frames = tf.shape(logits)[0]
if (num_batch is None):
num_batch = tf.shape(logits)[1]
logits_ta = tf.TensorArray(tf.float32, size=num_frames, element_shape=(num_labels, None))
logits_ta = logits_ta.unstack(tf.transpose(logits, [0, 2, 1]))
initial_scores = ([float('-inf')] * num_states)
initial_scores[self.start_state] = 0.0
scores = tf.tile(tf.expand_dims(tf.constant(initial_scores), axis=1), [1, num_batch])
def combine_scores(ss_, axis):
if max_approx:
arg_ = tf.argmax(ss_, axis=axis)
if (axis == 0):
ss_ = tf.transpose(ss_, [1, 0])
assert (arg_.shape.as_list() == [None])
ss_ = tf.squeeze(tf.batch_gather(ss_, indices=tf.expand_dims(arg_, axis=(- 1))), axis=(- 1))
assert (ss_.shape.as_list() == [None])
return ss_
return tf.reduce_logsumexp(ss_, axis=axis)
def add_scores(s_, y_):
return tf.where(tf.is_finite(s_), (s_ + y_), s_)
def body(t_, scores_):
'\n :param tf.Tensor t_: scalar\n :param tf.Tensor scores_: (states,B)\n '
logits_cur = logits_ta.read(t_)
logits_cur_by_label_idx = tf.unstack(logits_cur, axis=0)
assert (len(logits_cur_by_label_idx) == num_labels)
scores_by_state_ = tf.unstack(scores_, axis=0)
assert (len(scores_by_state_) == num_states)
next_scores_parts = [[] for _ in range(num_states)]
for src_state in states:
arcs = sorted(self.arcs_by_source_state[src_state], key=(lambda arc_: (arc_.target_state, idx_by_label[arc_.label])))
for arc in arcs:
next_scores_parts[arc.target_state].append(add_scores(scores_by_state_[src_state], logits_cur_by_label_idx[idx_by_label[arc.label]]))
next_scores = ([float('-inf')] * num_states)
for (i, parts) in enumerate(next_scores_parts):
if parts:
if (len(parts) == 1):
next_scores[i] = parts[0]
else:
next_scores[i] = combine_scores(parts, axis=0)
return ((t_ + 1), tf.convert_to_tensor(next_scores))
if unroll:
assert isinstance(num_frames, int)
for t in range(num_frames):
with tf.name_scope(('frame_t%i' % t)):
(_, scores) = body(t, scores)
else:
(_, scores) = tf.while_loop(cond=(lambda t_, scores_: tf.less(t_, num_frames)), body=body, loop_vars=(0, scores), shape_invariants=(tf.TensorShape(()), tf.TensorShape((num_states, None))))
scores_by_state = tf.unstack(scores, axis=0)
assert (len(scores_by_state) == num_states)
final_scores = combine_scores([scores_by_state[i] for i in self.final_states], axis=0)
return final_scores
def tf_get_best_alignment(self, logits):
'\n :param tf.Tensor logits: [T,B,D], before softmax\n :return: (alignment, scores), alignment is (time, batch), scores is (batch,), in +log space\n :rtype: (tf.Tensor, tf.Tensor)\n '
import tensorflow as tf
scores = self.tf_get_full_sum(logits=logits, max_approx=True)
(logits_grad,) = tf.gradients(tf.reduce_sum(scores), logits)
alignment = tf.argmax(logits_grad, axis=(- 1))
return (alignment, scores)
|
def iterate_all_paths(fsa: Fsa, num_frames: int, state: typing.Union[(None, int)]=None) -> typing.Generator[(typing.List[Arc], None, None)]:
if (state is None):
state = fsa.start_state
if (num_frames == 0):
if (state in fsa.final_states):
(yield [])
return
assert (num_frames > 0)
for arc in fsa.arcs_by_source_state[state]:
for sub_path in iterate_all_paths(fsa=fsa, state=arc.target_state, num_frames=(num_frames - 1)):
(yield ([arc] + sub_path))
|
def count_all_paths_inefficient(fsa: Fsa, num_frames: int) -> int:
return len(list(iterate_all_paths(fsa=fsa, num_frames=num_frames)))
|
def count_all_paths_with_label_in_frame_inefficient(fsa: Fsa, num_frames: int, frame_idx: int, label: str) -> int:
return len([path for path in iterate_all_paths(fsa=fsa, num_frames=num_frames) if (path[frame_idx].label == label)])
|
@sympy.cacheit
def count_all_paths(fsa: Fsa, state: typing.Union[(None, int)]=None) -> (sympy.Symbol, sympy.Expr):
'\n :return: (num_frames, count).\n num_frames is a symbolic var,\n count is the count of all unique paths from the given state (or start state) to any final state.\n '
if (state is None):
return count_all_paths(fsa=fsa, state=fsa.start_state)
num_frames = sympy.Symbol(('num_frames_%i' % state), integer=True, nonnegative=True)
if (state in fsa.final_states):
count = sympy.Piecewise((1, sympy.Equality(num_frames, 0)), (0, True))
else:
count = sympy.sympify(0)
have_loop = False
for arc in fsa.arcs_by_source_state[state]:
if (arc.target_state == state):
have_loop = True
else:
(num_frames_, count_) = count_all_paths(fsa=fsa, state=arc.target_state)
count += sympy.Piecewise((count_.subs(num_frames_, (num_frames - 1)), sympy.Ge(num_frames, 1)), (0, True))
count = count.simplify()
assert isinstance(count, sympy.Expr)
while have_loop:
if (count == sympy.sympify(0)):
break
true_value_wild = sympy.Wild('true_value')
false_value_wild = sympy.Wild('false_value')
cond_wild = sympy.Wild('cond')
m = count.match(sympy.Piecewise((true_value_wild, cond_wild), (false_value_wild, True)))
if m:
true_value = m[true_value_wild]
false_value = m[false_value_wild]
n_wild = sympy.Wild('n')
cond = m[cond_wild]
assert isinstance(cond, (sympy.Expr, sympy.Basic))
if (cond.match((sympy.Eq(num_frames, 0) | sympy.Ge(num_frames, 1))) == {}):
cond = sympy.Ge(num_frames, 0)
cond_match = cond.match(sympy.Eq(num_frames, n_wild))
if cond_match:
assert (isinstance(true_value, sympy.Integer) and isinstance(false_value, sympy.Integer))
assert (cond_match[n_wild] == sympy.sympify(0))
count = (true_value + (false_value * num_frames))
break
if (cond.match(True) == {}):
assert isinstance(true_value, sympy.Integer)
count = (true_value * (num_frames + 1))
break
cond_match = cond.match(sympy.Ge(num_frames, n_wild))
if cond_match:
n = cond_match[n_wild]
assert isinstance(n, sympy.Integer)
num_frames_loop = sympy.Symbol(('num_frames_%i_looped' % state), integer=True, nonnegative=True)
count = sympy.Sum(true_value, (num_frames, n, num_frames_loop))
if (n > 0):
count += sympy.Sum(false_value, (num_frames, 0, (n - 1)))
num_frames = num_frames_loop
count = count.simplify().simplify()
break
raise Exception(('no simplify rule for piecewise %r' % m))
num_frames_loop = sympy.Symbol(('num_frames_%i_looped' % state), integer=True, nonnegative=True)
count = sympy.Sum(count, (num_frames, 0, num_frames_loop))
num_frames = num_frames_loop
break
return (num_frames, count)
|
@sympy.cacheit
def count_all_paths_with_label_in_frame(fsa: Fsa, label: str) -> (sympy.Symbol, sympy.Symbol, sympy.Expr):
'\n :return: (num_frames, frame_idx, count)\n '
num_frames = sympy.Symbol('num_frames', integer=True, nonnegative=True)
frame_idx = sympy.Symbol('frame_idx', integer=True, nonnegative=True)
count = 0
for arc in fsa.arcs:
if (arc.label != label):
continue
(before_num, before_count) = count_all_paths(fsa=fsa.copy_ref_new_final_states({arc.source_state}))
before_count = before_count.subs(before_num, frame_idx)
(after_num, after_count) = count_all_paths(fsa=fsa, state=arc.target_state)
after_count = after_count.subs(after_num, ((num_frames - frame_idx) - 1))
count += (before_count * after_count)
count = sympy.sympify(count).simplify().simplify()
return (num_frames, frame_idx, count)
|
def count_all_paths_with_label_avg(fsa: Fsa, label: str, num_frames: typing.Optional[int]=None):
(num_frames_, frame_idx, count) = count_all_paths_with_label_in_frame(fsa=fsa, label=label)
count_sum = sympy.Sum(count, (frame_idx, 0, (num_frames_ - 1)))
for _ in range(4):
count_sum = count_sum.simplify()
print(('sum over counts with l=%s in any frame:' % label), count_sum)
(n, count_all) = count_all_paths(fsa=fsa)
count_all = count_all.subs(n, num_frames_)
avg = (count_sum / (num_frames_ * count_all))
for _ in range(4):
avg = avg.simplify()
print(('avg counts with l=%s in any frame:' % label), avg)
if (num_frames is not None):
for t_ in range(1, (num_frames + 1)):
print((' with T=%i ->' % t_), avg.subs(num_frames_, t_), ('(%f)' % avg.subs(num_frames_, t_)))
print()
|
def count_all_paths_with_label_seq(fsa: Fsa, label_seq_template: str):
'\n :param Fsa fsa:\n :param str label_seq_template: example "baab". this will get upsampled for num_frames, e.g. "bbaaaabb"\n '
n = sympy.Symbol('n', integer=True, nonnegative=True)
num_frames = (len(label_seq_template) * n)
frame_idx = sympy.Symbol('frame_idx', integer=True, nonnegative=True)
labels = set(label_seq_template)
count_by_label = {}
for output_label in sorted(labels):
(num_frames_, frame_idx_, count) = count_all_paths_with_label_in_frame(fsa=fsa, label=output_label)
count = count.subs(num_frames_, num_frames)
count = count.subs(frame_idx_, frame_idx)
count_by_label[output_label] = count
(num_frames_, count_all) = count_all_paths(fsa=fsa)
count_all = count_all.subs(num_frames_, num_frames)
for output_label in sorted(labels):
sum_for_output = 0
sum_by_input = {}
for input_label in sorted(labels):
count_sum = 0
count_label_frames = 0
for i in range(len(label_seq_template)):
if (label_seq_template[i] == input_label):
count_sum += sympy.Sum(count_by_label[output_label], (frame_idx, (n * i), (((n * i) + n) - 1)))
count_label_frames += n
for _ in range(4):
count_sum = count_sum.simplify()
sum_by_input[input_label] = count_sum
sum_for_output += count_sum
print(('sum over counts with (frame)l=%s, (output)l=%s for seq template %r:' % (input_label, output_label, label_seq_template)), count_sum)
avg = (count_sum / (count_label_frames * count_all))
for _ in range(5):
avg = avg.simplify()
print(('avg counts with l=%s,%s:' % (input_label, output_label)), avg)
for n_ in range(1, 11):
avg_ = avg.subs(n, n_)
print(('avg counts with (frame)l=%s, (output)l=%s, n=%i, T=%i:' % (input_label, output_label, n_, num_frames.subs(n, n_))), avg_, ('(%f)' % avg_))
for input_label in sorted(labels):
avg_per_input = (sum_by_input[input_label] / sum_for_output)
for _ in range(5):
avg_per_input = avg_per_input.simplify()
print(('avg (per input) with (frame)l=%s, (output)l=%s:' % (input_label, output_label)), avg_per_input)
for n_ in range(1, 11):
avg_ = avg_per_input.subs(n, n_)
print(('avg (per input) with (frame)l=%s, (output)l=%s, n=%i, T=%i:' % (input_label, output_label, n_, num_frames.subs(n, n_))), avg_, ('(%f)' % avg_))
print()
|
def count_all_paths_with_label_seq_partly_dominated(fsa: Fsa, label_seq_template: str, dom_label: str, n: typing.Union[(int, sympy.Symbol)], factor: typing.Union[(int, float, sympy.Symbol)], fixed_factor_power: typing.Optional[typing.Union[(sympy.Symbol, sympy.Expr)]]=None) -> typing.Dict[(typing.Tuple[(str, str)], typing.Dict[(str, sympy.Expr)])]:
'\n Example label_seq_template = "BaaB".\n Case 1: l=B dominating in x=a. Count how much l=B on avg in x=B.\n Case 2: l=B dominating in x=B. Count how much l=B on avg in x=a.\n\n :param Fsa fsa:\n :param str label_seq_template:\n :param str dom_label:\n :param n: should be integer. positive.\n :param factor: float>1.0 or just 1. in any case positive.\n :param fixed_factor_power: see code...\n :return: dict input label (with prob_dom), other input label (uniform) -> label -> float expr (normalized or not)\n '
labels = fsa.get_labels()
assert (dom_label in labels)
input_labels = set(label_seq_template)
res = {}
for input_label in input_labels:
count_frames_in_template = 0
for (i, input_label_) in enumerate(label_seq_template):
if (input_label_ == input_label):
count_frames_in_template += 1
parts = []
parts_by_state = {}
class Part():
def __init__(self, state: int):
self.start_state = state
self.end_state = state
self.arcs = set()
self.loops_in_states = set()
def add_arc(self, arc_: Arc):
assert (self.start_state <= arc_.source_state <= self.end_state)
self.arcs.add(arc_)
self.end_state = max(self.end_state, arc_.target_state)
if (arc_.source_state == arc_.target_state):
self.loops_in_states.add(arc_.source_state)
def have_loop(self):
return bool(self.loops_in_states)
def __repr__(self):
return ('FsaPart{%s}' % self.arcs)
rem_part = None
for arc in sorted(fsa.arcs, key=(lambda arc_: (arc_.source_state, arc_.target_state, arc_.label))):
if (arc.label == dom_label):
if (arc.source_state not in parts_by_state):
part = Part(state=arc.source_state)
parts.append(part)
parts_by_state[arc.source_state] = part
else:
part = parts_by_state[arc.source_state]
part.add_arc(arc)
parts_by_state[arc.target_state] = part
else:
if (rem_part is None):
rem_part = Part(state=arc.source_state)
rem_part.add_arc(arc)
assert (len(parts) == 2)
assert all([part.have_loop() for part in parts])
assert (parts[0].start_state == 0)
assert ((parts[(- 1)].start_state in fsa.final_states) and (parts[(- 1)].end_state in fsa.final_states))
assert (label_seq_template == 'BaaB')
for input_label_ in input_labels:
if (input_label_ != input_label):
res_ = {label: 0 for label in labels}
res[(input_label, input_label_)] = res_
def _add():
if (input_label == BlankLabel):
blank_num_frames_input = (blank_num_frames_p1 + blank_num_frames_p4)
else:
blank_num_frames_input = blank_num_frames_p23
if (input_label_ == BlankLabel):
blank_num_frames_input_ = (blank_num_frames_p1 + blank_num_frames_p4)
else:
blank_num_frames_input_ = blank_num_frames_p23
label1_num_frames_input_ = ((2 * n) - blank_num_frames_input_)
if (fixed_factor_power is not None):
blank_num_frames_input = sympy.sympify(blank_num_frames_input)
syms = set(blank_num_frames_input.free_symbols)
if syms.issubset({n}):
eq = sympy.Eq(fixed_factor_power, blank_num_frames_input)
sum_blank = sympy.Sum(sympy.Sum(blank_num_frames_input_, label1_end_frame_range), label1_start_frame_range)
sum_label = sympy.Sum(sympy.Sum(label1_num_frames_input_, label1_end_frame_range), label1_start_frame_range)
if (eq == sympy.sympify(True)):
res_[BlankLabel] += sum_blank
res_[Label1] += sum_label
else:
res_[BlankLabel] += sympy.Piecewise((sum_blank, eq), (0, True))
res_[Label1] += sympy.Piecewise((sum_label, eq), (0, True))
elif syms.issubset({n, label1_start_frame}):
(label1_start_frame_,) = sympy.solve((blank_num_frames_input - fixed_factor_power), label1_start_frame)
in_range = sympy.And(sympy.Ge(label1_start_frame_, label1_start_frame_range[1]), sympy.Le(label1_start_frame_, label1_start_frame_range[2])).simplify()
sum_blank = sympy.Sum(blank_num_frames_input_, label1_end_frame_range)
sum_label = sympy.Sum(label1_num_frames_input_, label1_end_frame_range)
sum_blank = sum_blank.subs(label1_start_frame, label1_start_frame_)
sum_label = sum_label.subs(label1_start_frame, label1_start_frame_)
res_[BlankLabel] += sympy.Piecewise((sum_blank, in_range), (0, True))
res_[Label1] += sympy.Piecewise((sum_label, in_range), (0, True))
elif syms.issubset({n, label1_end_frame}):
(label1_end_frame_,) = sympy.solve((blank_num_frames_input - fixed_factor_power), label1_end_frame)
assert set(label1_end_frame_.free_symbols).issubset({n, fixed_factor_power})
in_range = sympy.And(sympy.Ge(label1_end_frame_, label1_end_frame_range[1]), sympy.Le(label1_end_frame_, label1_end_frame_range[2])).simplify()
assert set(in_range.free_symbols).issubset({n, fixed_factor_power})
sum_blank = sympy.Sum(blank_num_frames_input_.subs(label1_end_frame, label1_end_frame_), label1_start_frame_range)
sum_label = sympy.Sum(label1_num_frames_input_.subs(label1_end_frame, label1_end_frame_), label1_start_frame_range)
res_[BlankLabel] += sympy.Piecewise((sum_blank, in_range), (0, True))
res_[Label1] += sympy.Piecewise((sum_label, in_range), (0, True))
else:
assert syms.issubset({n, label1_start_frame, label1_end_frame})
assert set(sympy.sympify(blank_num_frames_input_).free_symbols).issubset({n})
assert set(sympy.sympify(label1_num_frames_input_).free_symbols).issubset({n})
(label1_end_frame_,) = sympy.solve((blank_num_frames_input - fixed_factor_power), label1_end_frame)
assert set(label1_end_frame_.free_symbols).issubset({n, fixed_factor_power, label1_start_frame})
total_cond = True
rs = []
r1 = sympy.Ge(label1_end_frame_, label1_end_frame_range[1]).simplify()
r2 = sympy.Le(label1_end_frame_, label1_end_frame_range[2]).simplify()
if (label1_start_frame not in r1.free_symbols):
total_cond = sympy.And(total_cond, r1)
else:
rs.append(sympy.solve_univariate_inequality(r1, label1_start_frame, relational=False))
rs.append(sympy.solve_univariate_inequality(r2, label1_start_frame, relational=False))
rs.append(sympy.Interval(label1_start_frame_range[1], label1_start_frame_range[2]))
r3 = 0
r4 = ((4 * n) - 1)
for r_interval in rs:
assert isinstance(r_interval, sympy.Interval)
r3 = (- sympy.Min((- r3), (- r_interval.start)))
r4 = sympy.Min(r4, r_interval.end)
_c = fixed_factor_power
c = ((r4 - r3) + 1)
c = c.replace(sympy.Min(0, (((- _c) + n) - 1)), (sympy.Min(_c, (n - 1)) - _c))
assert isinstance(c, sympy.Expr)
assert ((c.count(sympy.Min) == 1) and (c.count(sympy.Max) == 0))
(q,) = list(c.find(sympy.Min))
assert isinstance(q, sympy.Min)
assert (len(q.args) == 2)
min_args = list(q.args)
case_cond = [sympy.Le(min_args[0], min_args[1]), sympy.Ge(min_args[0], min_args[1])]
cases_blank = []
cases_label = []
for i_ in range(2):
c_ = c.replace(q, min_args[i_])
assert (c_.count(sympy.Min) == 0)
sum_blank = (blank_num_frames_input_ * c_)
sum_label = (label1_num_frames_input_ * c_)
cond = sympy.And(total_cond, case_cond[i_], sympy.Ge(c_, 0)).simplify()
cond = cond.replace((((_c - (2 * n)) >= (- 1)) & ((_c - (2 * n)) <= (- 1))), sympy.Eq(_c, ((2 * n) - 1)))
cond = cond.replace((sympy.Eq(_c, (2 * n)) & sympy.Eq((_c - (2 * n)), (- 1))), False)
cond = cond.simplify()
if (cond != sympy.sympify(False)):
cases_blank.append((sum_blank, cond))
cases_label.append((sum_label, cond))
cases_blank.append((0, True))
cases_label.append((0, True))
sum_blank = sympy.Piecewise(*cases_blank)
sum_label = sympy.Piecewise(*cases_label)
res_[BlankLabel] += sum_blank
res_[Label1] += sum_label
else:
factor_ = sympy.Pow(factor, blank_num_frames_input)
res_[BlankLabel] += sympy.Sum(sympy.Sum((blank_num_frames_input_ * factor_), label1_end_frame_range), label1_start_frame_range)
res_[Label1] += sympy.Sum(sympy.Sum((label1_num_frames_input_ * factor_), label1_end_frame_range), label1_start_frame_range)
label1_start_frame = sympy.Symbol('label1_start_frame', integer=True)
label1_end_frame = sympy.Symbol('label1_end_frame', integer=True)
if True:
label1_start_frame_range = (label1_start_frame, 0, (n - 1))
label1_end_frame_range = (label1_end_frame, label1_start_frame, (n - 1))
label1_num_frames_p1 = ((label1_end_frame - label1_start_frame) + 1)
blank_num_frames_p1 = (n - label1_num_frames_p1)
blank_num_frames_p23 = (2 * n)
blank_num_frames_p4 = n
_add()
label1_end_frame_range = (label1_end_frame, n, ((3 * n) - 1))
label1_num_frames_p1 = (n - label1_start_frame)
blank_num_frames_p1 = (n - label1_num_frames_p1)
label1_num_frames_p23 = ((label1_end_frame - n) + 1)
blank_num_frames_p23 = ((2 * n) - label1_num_frames_p23)
blank_num_frames_p4 = n
_add()
label1_end_frame_range = (label1_end_frame, (3 * n), ((4 * n) - 1))
label1_num_frames_p1 = (n - label1_start_frame)
blank_num_frames_p1 = (n - label1_num_frames_p1)
blank_num_frames_p23 = 0
label1_num_frames_p4 = ((label1_end_frame - (3 * n)) + 1)
blank_num_frames_p4 = (n - label1_num_frames_p4)
_add()
if True:
label1_start_frame_range = (label1_start_frame, n, ((3 * n) - 1))
blank_num_frames_p1 = n
label1_end_frame_range = (label1_end_frame, label1_start_frame, ((3 * n) - 1))
label1_num_frames_p23 = ((label1_end_frame - label1_start_frame) + 1)
blank_num_frames_p23 = ((2 * n) - label1_num_frames_p23)
blank_num_frames_p4 = n
_add()
label1_end_frame_range = (label1_end_frame, (3 * n), ((4 * n) - 1))
blank_num_frames_p23 = (label1_start_frame - n)
label1_num_frames_p4 = ((label1_end_frame - (3 * n)) + 1)
blank_num_frames_p4 = (n - label1_num_frames_p4)
_add()
if True:
label1_start_frame_range = (label1_start_frame, (3 * n), ((4 * n) - 1))
blank_num_frames_p1 = n
blank_num_frames_p23 = (2 * n)
label1_end_frame_range = (label1_end_frame, label1_start_frame, ((4 * n) - 1))
label1_num_frames_p4 = ((label1_end_frame - label1_start_frame) + 1)
blank_num_frames_p4 = (n - label1_num_frames_p4)
_add()
if isinstance(factor, int):
for _ in range(5):
for label in labels:
x = res_[label]
if (fixed_factor_power is not None):
x = sympy_utils.simplify_and(x)
x = x.simplify()
res_[label] = x
return res
|
def count_all_paths_with_label_seq_partly_dominated_inefficient(fsa: Fsa, label_seq_template: str, dom_label: str, n: int, prob_dom: float, normalized: bool=True, verbosity: int=0) -> typing.Dict[(typing.Tuple[(str, str)], typing.Dict[(str, float)])]:
'\n Same as :func:`count_all_paths_with_label_seq_partly_dominated`.\n For each input label, assume prob_dom and uniform in the other input labels, and then count.\n\n :param Fsa fsa: e.g. for 1 label B*a+B*\n :param str label_seq_template: e.g. "BaaB"\n :param str dom_label: e.g. B\n :param int n: multiplicator for seq template\n :param float prob_dom:\n :param bool normalized: return value normalized or not\n :param int verbosity: 0 is no output\n :return: input label (with prob_dom), other input label (uniform) -> label -> float (normalized or not)\n '
labels = fsa.get_labels()
assert (dom_label in labels)
input_labels = set(label_seq_template)
num_frames = (n * len(label_seq_template))
res = {}
counts = defaultdict(int)
counts_by_t = defaultdict(int)
seqs = defaultdict(list)
for path in iterate_all_paths(fsa=fsa, num_frames=num_frames):
assert (len(path) == num_frames)
for input_label in input_labels:
count_frames_by_label = {label: 0 for label in labels}
for (i, input_label_) in enumerate(label_seq_template):
if (input_label_ == input_label):
for j in range((i * n), ((i * n) + n)):
count_frames_by_label[path[j].label] += 1
count_frames_dom_label = count_frames_by_label[dom_label]
counts[(input_label, count_frames_dom_label)] += 1
seqs[(input_label, count_frames_dom_label)].append(''.join([arc.label for arc in path]))
for (i, input_label_) in enumerate(label_seq_template):
if (input_label_ != input_label):
for j in range((i * n), ((i * n) + n)):
counts_by_t[(input_label, count_frames_dom_label, input_label_, path[j].label)] += 1
v = {i: (sys.stdout if (i < verbosity) else open(os.devnull, 'w')) for i in range(3)}
print(('Dom label: %s' % dom_label), file=v[1])
for input_label in input_labels:
print(('Input %s in %s, n=%i, T=%i.' % (input_label, label_seq_template, n, num_frames)), file=v[0])
rel_total_counts_by_label = defaultdict(int)
counts_frames_dom_label = [c for (input_label_, c) in counts.keys() if (input_label_ == input_label)]
max_count_frames_dom_label = max(counts_frames_dom_label)
print((' Max count of dom label %s in label seq at this input:' % dom_label), max_count_frames_dom_label, file=v[0])
for count_frames_dom_label in range((max_count_frames_dom_label + 1)):
print((' For count of dom label %i in input %s:' % (count_frames_dom_label, input_label)), file=v[1])
print(' Num seqs:', counts[(input_label, count_frames_dom_label)], file=v[1])
print(' Seqs:', seqs[(input_label, count_frames_dom_label)], file=v[2])
for input_label_ in input_labels:
if (input_label_ != input_label):
count_all_frames = 0
print((' Look at other input %s:' % input_label_), file=v[1])
for (i, input_label__) in enumerate(label_seq_template):
if (input_label__ == input_label_):
count_all_frames += n
for label in labels:
a = counts_by_t[(input_label, count_frames_dom_label, input_label_, label)]
b = (counts[(input_label, count_frames_dom_label)] * count_all_frames)
print((' Count label %s: %i/%i (%f)' % (label, a, b, (float(a) / b))), file=v[1])
if (prob_dom == 0.5):
rel_total_counts_by_label[(input_label_, label)] += a
else:
rel_total_counts_by_label[(input_label_, label)] += (a * ((prob_dom / (1.0 - prob_dom)) ** count_frames_dom_label))
for input_label_ in input_labels:
if (input_label_ != input_label):
res_by_label = {}
z = sum([rel_total_counts_by_label[(input_label_, label)] for label in labels])
for label in labels:
print((' (avg q(%s|x=%s)) Relative count for input %s, label %s, p(%s|x=%s)=%f: %f' % (label, input_label_, input_label_, label, dom_label, input_label, prob_dom, (float(rel_total_counts_by_label[(input_label_, label)]) / z))), file=v[0])
res_by_label[label] = rel_total_counts_by_label[(input_label_, label)]
if normalized:
res_by_label[label] /= z
res[(input_label, input_label_)] = res_by_label
return res
|
def full_sum(fsa: Fsa, label_seq_template: str):
n = sympy.Symbol('n', integer=True, nonnegative=True)
states = sorted(fsa.states)
input_labels = sorted(set(label_seq_template))
labels = fsa.get_labels()
probs_by_label_by_input = {input_label: {} for input_label in input_labels}
prob_vars = []
for input_label in input_labels:
s = 0
for label in labels[:(- 1)]:
probs_by_label_by_input[input_label][label] = sympy.Symbol(('prob_%s_in_%s' % (label, input_label)))
s += probs_by_label_by_input[input_label][label]
prob_vars.append(probs_by_label_by_input[input_label][label])
probs_by_label_by_input[input_label][labels[(- 1)]] = (1 - s)
initial_vec = sympy.Matrix([[1, 0, 0]])
final_vec = sympy.Matrix([([1] if (state in fsa.final_states) else [0]) for state in states])
v = initial_vec
trans_mat_product = None
for i in range(len(label_seq_template)):
probs_by_label = probs_by_label_by_input[label_seq_template[i]]
trans_mat = sympy.Matrix([[fsa.get_deterministic_source_to_target_prob(source_state=src_state, target_state=tgt_state, probs_by_label=probs_by_label) for tgt_state in states] for src_state in states])
trans_mat = sympy.Pow(trans_mat, n)
v *= trans_mat
if (trans_mat_product is None):
trans_mat_product = trans_mat
else:
trans_mat_product *= trans_mat
res = (v * final_vec)
assert (res.shape == (1, 1))
res = res[(0, 0)]
print('params:', prob_vars)
for subs in [[(prob_vars[0], 1), (n, 4)]]:
print('calc:')
res_ = res
for (sub_var, sub_val) in subs:
res_ = res_.subs(sub_var, sub_val)
print('sub', sub_var, 'by', sub_val)
for _ in range(4):
res_ = res_.simplify()
v = prob_vars[1]
d = sympy.diff(res_, v)
for _ in range(2):
d = d.simplify()
opts = sympy.solve(d, v)
print('optima:', opts)
print('max:', sympy.maximum(res_, v, sympy.Interval(0, 1)))
for opt in opts:
opt = opt.simplify()
print(res_.subs(v, opt).simplify().doit())
|
def get_std_fsa_1label():
fsa = Fsa()
fsa.add_arc(0, 0, BlankLabel)
fsa.add_arc(0, 1, Label1)
fsa.add_arc(1, 1, Label1)
fsa.add_arc(1, 2, BlankLabel)
fsa.add_arc(2, 2, BlankLabel)
fsa.add_final_state(1)
fsa.add_final_state(2)
return fsa
|
def get_std_fsa_1label_2times():
fsa = Fsa()
fsa.add_arc(0, 0, BlankLabel)
fsa.add_arc(0, 1, Label1)
fsa.add_arc(1, 1, Label1)
fsa.add_arc(1, 2, BlankLabel)
fsa.add_arc(2, 2, BlankLabel)
fsa.add_arc(2, 3, Label1)
fsa.add_arc(3, 3, Label1)
fsa.add_arc(3, 4, BlankLabel)
fsa.add_arc(4, 4, BlankLabel)
fsa.add_final_state(3)
fsa.add_final_state(4)
return fsa
|
def get_std_fsa_2label():
fsa = Fsa()
fsa.add_arc(0, 0, BlankLabel)
fsa.add_arc(0, 1, Label1)
fsa.add_arc(1, 1, Label1)
fsa.add_arc(1, 2, BlankLabel)
fsa.add_arc(2, 2, BlankLabel)
fsa.add_arc(1, 3, Label2)
fsa.add_arc(2, 3, Label2)
fsa.add_arc(3, 3, Label2)
fsa.add_arc(3, 4, BlankLabel)
fsa.add_arc(4, 4, BlankLabel)
fsa.add_final_state(3)
fsa.add_final_state(4)
return fsa
|
def get_std_fsa_3label_blank():
fsa = Fsa()
fsa.add_arc(0, 0, BlankLabel)
fsa.add_arc(0, 1, Label1)
fsa.add_arc(1, 1, Label1)
fsa.add_arc(1, 2, BlankLabel)
fsa.add_arc(2, 2, BlankLabel)
fsa.add_arc(1, 3, Label2)
fsa.add_arc(2, 3, Label2)
fsa.add_arc(3, 3, Label2)
fsa.add_arc(3, 4, BlankLabel)
fsa.add_arc(4, 4, BlankLabel)
fsa.add_arc(3, 5, Label3)
fsa.add_arc(4, 5, Label3)
fsa.add_arc(5, 5, Label3)
fsa.add_arc(5, 6, BlankLabel)
fsa.add_arc(6, 6, BlankLabel)
fsa.add_final_state(5)
fsa.add_final_state(6)
return fsa
|
def get_std_fsa_3label_sil():
fsa = Fsa()
fsa.add_arc(0, 0, BlankLabel)
fsa.add_arc(0, 1, Label1)
fsa.add_arc(1, 1, Label1)
fsa.add_arc(1, 2, Label2)
fsa.add_arc(2, 2, Label2)
fsa.add_arc(2, 3, Label3)
fsa.add_arc(3, 3, Label3)
fsa.add_arc(3, 4, BlankLabel)
fsa.add_arc(4, 4, BlankLabel)
fsa.add_final_state(3)
fsa.add_final_state(4)
return fsa
|
def get_std_fsa_4label_2words_blank():
fsa = Fsa()
fsa.add_arc(0, 0, BlankLabel)
fsa.add_arc(0, 1, Label1)
fsa.add_arc(1, 1, Label1)
fsa.add_arc(1, 2, BlankLabel)
fsa.add_arc(2, 2, BlankLabel)
fsa.add_arc(1, 3, Label2)
fsa.add_arc(2, 3, Label2)
fsa.add_arc(3, 3, Label2)
fsa.add_arc(3, 4, BlankLabel)
fsa.add_arc(4, 4, BlankLabel)
fsa.add_arc(3, 5, Label3)
fsa.add_arc(4, 5, Label3)
fsa.add_arc(5, 5, Label3)
fsa.add_arc(5, 6, BlankLabel)
fsa.add_arc(6, 6, BlankLabel)
fsa.add_arc(5, 7, Label4)
fsa.add_arc(6, 7, Label4)
fsa.add_arc(7, 7, Label4)
fsa.add_arc(7, 8, BlankLabel)
fsa.add_arc(8, 8, BlankLabel)
fsa.add_final_state(7)
fsa.add_final_state(8)
return fsa
|
def get_std_fsa_4label_2words_sil():
fsa = Fsa()
fsa.add_arc(0, 0, BlankLabel)
fsa.add_arc(0, 1, Label1)
fsa.add_arc(1, 1, Label1)
fsa.add_arc(1, 2, Label2)
fsa.add_arc(2, 2, Label2)
fsa.add_arc(2, 3, Label3)
fsa.add_arc(3, 3, Label3)
fsa.add_arc(3, 4, BlankLabel)
fsa.add_arc(4, 4, BlankLabel)
fsa.add_arc(3, 5, Label4)
fsa.add_arc(4, 5, Label4)
fsa.add_arc(5, 5, Label4)
fsa.add_arc(5, 6, BlankLabel)
fsa.add_arc(6, 6, BlankLabel)
fsa.add_final_state(5)
fsa.add_final_state(6)
return fsa
|
def test_count_all_paths(fsa: Fsa, num_frames: int):
c_ = count_all_paths_inefficient(fsa=fsa, num_frames=num_frames)
print(('count all paths for T=%i explicit:' % num_frames), c_)
(n, c) = count_all_paths(fsa=fsa)
print('count all paths symbolic:', n, '->', c)
c__ = c.subs(n, num_frames).doit()
print(('count all paths for T=%i via symbolic:' % num_frames), c__)
assert (c_ == c__)
num_labels = len(fsa.get_labels())
print('L with uniform distribution:', ((numpy.log(num_labels) * num_frames) - numpy.log(c_)))
print('L with uniform distribution (inexact):', (- numpy.log((((1.0 / num_labels) ** num_frames) * c_))))
print()
|
def test_count_all_paths_with_label_in_frame(fsa: Fsa, num_frames: int, frame_idx: int, label: str):
c_ = count_all_paths_with_label_in_frame_inefficient(fsa=fsa, num_frames=num_frames, frame_idx=frame_idx, label=label)
print(('count all paths with t=%i, T=%i, l=%s explicit:' % (frame_idx, num_frames, label)), c_)
(n, t, c) = count_all_paths_with_label_in_frame(fsa=fsa, label=label)
print(('count all paths with l=%s symbolic:' % label), ('(%s, %s) -> %s' % (n, t, c)))
n_t = sympy.Symbol('T', integer=True)
t1 = sympy.Symbol('t', integer=True)
print(('count all paths with l=%s symbolic (t index 1):' % label), ('(%s, %s) -> %s' % (n, t, c.subs(n, n_t).subs(t, (t1 - 1)).simplify())))
c__ = c.subs(n, num_frames).subs(t, frame_idx).doit()
print('count via symbolic:', c__)
assert (c_ == c__)
(n, count_all) = count_all_paths(fsa=fsa)
count_all = count_all.subs(n, num_frames)
print((' / %i, fraction %f' % (count_all, (c__ / count_all))))
print()
|
def count_paths_with_label(fsa: Fsa, num_frames: int, label: str):
(_n, _t, count_blank_sym) = count_all_paths_with_label_in_frame(fsa=fsa, label=label)
n_t = sympy.Symbol('T', integer=True)
t1 = sympy.Symbol('t', integer=True)
count_blank_sym = count_blank_sym.subs(_n, n_t).subs(_t, (t1 - 1)).simplify()
(_n, count_all_sym) = count_all_paths(fsa=fsa)
count_all_sym = count_all_sym.subs(_n, n_t)
for n_ in range(1, (num_frames + 1)):
count_all = int(count_all_sym.subs(n_t, n_).doit())
count_blank_dominating_frames = 0
for t in range(1, (n_ + 1)):
count_blank = int(count_blank_sym.subs(n_t, n_).subs(t1, t).doit())
if ((count_blank * 2) > count_all):
count_blank_dominating_frames += 1
print(('T=%i, num frames dominated by label=%s:' % (n_, label)), count_blank_dominating_frames, ('(%f)' % (float(count_blank_dominating_frames) / n_)))
expr_lhs = ((count_blank_sym * 2) - count_all_sym)
expr_lhs = expr_lhs.simplify()
expr = sympy.GreaterThan(expr_lhs, 0)
print(expr.simplify())
assert expr_lhs.is_polynomial(t1)
t_real = sympy.Symbol('t_')
expr_lhs = expr_lhs.subs(t1, t_real)
(zero1, zero2) = sympy.solve(expr_lhs, t_real)
print('zeros:', [zero1, zero2])
count_blank_dominating_frames_sym = (sympy.ceiling((zero1 - 1)) + sympy.ceiling((n_t - zero2)))
print(('num frames dominated by label=%s symbolic:' % label), count_blank_dominating_frames_sym)
count_blank_dominating_frames_small_lim_sym = (((zero1 - 1) + n_t) - zero2)
rel_count_blank_dominating_frames_sym = (count_blank_dominating_frames_small_lim_sym / n_t)
print('pessimistic relative count:', rel_count_blank_dominating_frames_sym)
for n_ in range(1, (num_frames + 1)):
print(('T=%i symbolic:' % n_), count_blank_dominating_frames_sym.subs(n_t, n_), float(count_blank_dominating_frames_small_lim_sym.subs(n_t, n_)), 'relative:', float(rel_count_blank_dominating_frames_sym.subs(n_t, n_)))
|
def match(fsa: Fsa, input_seq: str) -> typing.Optional[typing.List[Arc]]:
'\n Match the input_seq to the FSA.\n Assumes that the FSA is deterministic by label.\n '
path = []
state = fsa.start_state
for label in input_seq:
next_state = None
for arc in fsa.arcs_by_source_state[state]:
if (arc.label == label):
next_state = arc.target_state
path.append(arc)
break
if (next_state is None):
return None
state = next_state
if (state in fsa.final_states):
return path
return None
|
def bias_model(fsa: Fsa, num_frames: int):
print(('Bias model with T=%i:' % num_frames))
labels = fsa.get_labels()
label_probs = {}
for label in labels[:(- 1)]:
label_probs[label] = sympy.Symbol(('prob_%s' % label), real=True, nonnegative=True)
label_probs[labels[(- 1)]] = (1 - sum([label_probs[label] for label in labels[:(- 1)]]))
prob_sum = 0
for path in iterate_all_paths(fsa=fsa, num_frames=num_frames):
prob_path = 1
for label in labels:
label_arcs = [arc for arc in path if (arc.label == label)]
if label_arcs:
prob_path *= sympy.Pow(label_probs[label], len(label_arcs))
prob_sum += prob_path
assert isinstance(prob_sum, sympy.Expr)
print(prob_sum)
label_prob0 = label_probs[labels[0]]
for p in [0, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
x = float(prob_sum.subs(label_prob0, p))
print(('p(%s)=%f ->' % (labels[0], p)), x, (- numpy.log(x)))
opts = sympy.solve(prob_sum.diff(label_prob0), label_prob0)
opts = [opt.simplify() for opt in opts]
print(opts)
for p in opts:
x = float(prob_sum.subs(label_prob0, p))
print(('p(%s)=%f ->' % (labels[0], p)), x, (- numpy.log(x)))
print()
|
def bias_model_1label(num_frames: int):
print('Bias model with fixed FSA.')
labels = ['B', 'a']
label_prob0 = sympy.Symbol('prob_B', real=True)
i = sympy.Symbol('i', integer=True, positive=True)
n = sympy.Symbol('T', integer=True, positive=True)
prob_sum = (sympy.Sum(((i * sympy.Pow(label_prob0, (i - 1))) * sympy.Pow((1 - label_prob0), ((n - i) + 1))), (i, 2, n)) + sympy.Pow((1 - label_prob0), n))
assert isinstance(prob_sum, sympy.Expr)
print(prob_sum)
for p in [0, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
print(('p(%s)=%f ->' % (labels[0], p)), prob_sum.subs(label_prob0, p), ('with T=%i:' % num_frames), prob_sum.subs(label_prob0, p).subs(n, num_frames).doit())
d = prob_sum.diff(label_prob0)
opts = sympy.solve(d, label_prob0)
print(opts)
print()
|
def test_count_all_paths_with_label_seq_partly_dominated(recalc=False, check=False, check_with_factor=False):
fsa = get_std_fsa_1label()
n_ = 4
n = sympy.Symbol('n', integer=True, positive=True)
factor = sympy.Symbol('fact', real=True, positive=True)
res = count_all_paths_with_label_seq_partly_dominated_inefficient(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, prob_dom=0.5, normalized=False, verbosity=2)
print(res)
if recalc:
res_ = count_all_paths_with_label_seq_partly_dominated(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, factor=1)
else:
res_ = {('B', 'a'): {'B': (((2 * n) * ((13 * (n ** 2)) - 1)) / 3), 'a': (((2 * n) * (((11 * (n ** 2)) + (6 * n)) + 1)) / 3)}, ('a', 'B'): {'B': (((2 * n) * ((19 * (n ** 2)) - 1)) / 3), 'a': (((2 * n) * (((5 * (n ** 2)) + (6 * n)) + 1)) / 3)}}
print(res_)
for (key, counts) in res_.items():
assert (set(counts.keys()) == {BlankLabel, Label1})
for (label, c) in counts.items():
c__ = c.subs(n, n_).doit()
print(('%s, label %s ->' % (key, label)), c, '=', c__)
assert (c__ == res[key][label])
c = fixed_factor_power = sympy.Symbol('c', integer=True, nonnegative=True)
if recalc:
res__ = count_all_paths_with_label_seq_partly_dominated(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, factor=1, fixed_factor_power=fixed_factor_power)
else:
res__ = {('B', 'a'): {'B': sympy.Piecewise(((((2 * n) * ((4 * (n ** 2)) - 1)) / 3), sympy.Eq(c, (2 * n))), (((2 * n) * (c + (2 * n))), sympy.Eq((c - (2 * n)), (- 1))), (((2 * n) * ((2 * c) + 1)), ((c >= n) & ((c - (2 * n)) <= (- 1)))), (((4 * n) * ((c - n) + 1)), (((c - n) >= (- 1)) & ((c - (2 * n)) <= (- 1)))), (0, True)), 'a': sympy.Piecewise(((((2 * n) * (((2 * (n ** 2)) + (3 * n)) + 1)) / 3), sympy.Eq(c, (2 * n))), (((2 * n) * ((- c) + (4 * n))), ((c >= n) & ((c - (2 * n)) <= (- 1)))), (((2 * n) * (c + 1)), ((c - n) <= (- 1))), (((2 * n) * (((- c) + (2 * n)) - 1)), (((c - n) >= (- 1)) & ((c - (2 * n)) <= (- 1)))), (0, True))}, ('a', 'B'): {'B': sympy.Piecewise(((n * (((n ** 2) + (2 * n)) + 1)), sympy.Eq(c, 0)), (((n * (((5 * (n ** 2)) + (3 * n)) - 2)) / 3), sympy.Eq(c, (2 * n))), ((n * (((2 * c) + (3 * n)) + 1)), ((c - (2 * n)) <= (- 1))), ((4 * (n ** 2)), sympy.Eq((c - (2 * n)), (- 1))), (0, True)), 'a': sympy.Piecewise(((n * (((n ** 2) + (2 * n)) + 1)), sympy.Eq(c, 0)), (((n * (((n ** 2) + (3 * n)) + 2)) / 3), sympy.Eq(c, (2 * n))), ((n * (n + 1)), ((c - (2 * n)) <= (- 1))), (0, True))}}
print(res__)
for (key, counts) in res__.items():
assert (set(counts.keys()) == {BlankLabel, Label1})
for (label, c) in counts.items():
print(('%s, label %s ->' % (key, label)), c)
if check:
c_sum = 0
for fixed_count in range(0, ((2 * n_) + 1)):
c__ = c.subs({n: n_, fixed_factor_power: fixed_count}).doit()
print((' c %i -> %i' % (fixed_count, c__)))
c_sum += c__
assert (c_sum == res[key][label])
d = (counts[BlankLabel] - counts[Label1])
d = d.simplify()
d = sympy_utils.simplify_and(d)
print('diff:', d)
for i in range(3):
print(('assume ((4 * n - 1 + i) / 3) natural number, i = %i.' % i))
print((' first pos i%i' % i), d.subs(fixed_factor_power, ((((4 * n) - 1) + i) / 3)).simplify())
print((' last neg i%i' % i), d.subs(fixed_factor_power, (((((4 * n) - 1) + i) / 3) - 1)).simplify())
s_pos = sympy_utils.sum_over_piecewise(d, fixed_factor_power, ((((4 * n) - 1) + i) / 3), (2 * n), extra_condition=sympy.Ge(n, 4))
print((' diff sum c={(4n-1+%i)/3}^2n:' % i), s_pos)
s_neg = sympy_utils.sum_over_piecewise(d, fixed_factor_power, 0, (((((4 * n) - 1) + i) / 3) - 1), extra_condition=sympy.Ge(n, 4))
print((' diff sum c=0^{(4n-1+%i)/3-1}:' % i), s_neg)
print(' tot:', (s_pos + s_neg).simplify())
s = sympy_utils.sum_over_piecewise(d, fixed_factor_power, 0, (2 * n))
print('diff sum c=0^2n:', s)
s = sympy_utils.sum_over_piecewise(d, fixed_factor_power, n, (2 * n))
print('diff sum c=n^2n:', s)
s = sympy_utils.sum_over_piecewise(d, fixed_factor_power, 0, (n - 1))
print('diff sum c=0^{n-1}:', s)
s = sympy_utils.sum_over_piecewise(counts[BlankLabel], fixed_factor_power, 0, (2 * n))
print('blank sum c=0^2n:', s)
if check_with_factor:
res = count_all_paths_with_label_seq_partly_dominated_inefficient(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, prob_dom=0.6, verbosity=1)
print(res)
res_ = count_all_paths_with_label_seq_partly_dominated(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n, factor=factor)
print(res_)
for (key, counts) in res_.items():
print('key:', key)
assert (set(counts.keys()) == {BlankLabel, Label1})
d = (counts[BlankLabel] - counts[Label1])
print(d)
|
def gen_model_1label():
'\n \\sum_{s:y} p(x|s),\n two possible inputs x1 (1,0) and x2 (0,1),\n two possible labels "a" and (blank) "B".\n Define p(x1|s=a) = theta_a, p(x2|s=a) = 1 - theta_a,\n p(x2|s=B) = theta_B, p(x1|s=B) = 1 - theta_B.\n\n For simplicity, fsa ^= a*B*, and the input be x1^{na},x2^{nB}, T = na + nB.\n Then we can just count. All alignments can be iterated through by t=0...T.\n Symmetric case...\n '
na = sympy.Symbol('na', integer=True, positive=True)
nb = sympy.Symbol('nb', integer=True, positive=True)
theta_a = sympy.Symbol('theta_a', real=True, nonnegative=True)
theta_b = sympy.Symbol('theta_b', real=True, nonnegative=True)
t = sympy.Symbol('t', integer=True, nonnegative=True)
p1 = sympy.Pow(theta_a, sympy.Min(t, na))
p2 = sympy.Pow((1 - theta_a), sympy.Max((t - na), 0))
p3 = sympy.Pow(theta_b, sympy.Min(((na + nb) - t), nb))
p4 = sympy.Pow((1 - theta_b), sympy.Max((na - t), 0))
sum_ = sympy.Sum((((p1 * p2) * p3) * p4), (t, 0, (na + nb)))
p1 = 1
p2 = 1
p3 = sympy.Pow(theta_b, nb)
p4 = sympy_utils.polynomial_exp(1, (- theta_b), na, expand=False)
s0 = (((p1 * p2) * p3) * p4)
p1 = sympy.Pow(theta_a, t)
p2 = 1
p3 = sympy.Pow(theta_b, nb)
p4 = sympy_utils.polynomial_exp(1, (- theta_b), (na - t), expand=False)
sum1_ = sympy.Sum((((p1 * p2) * p3) * p4), (t, 0, na))
p1 = sympy.Pow(theta_a, na)
p2 = 1
p3 = sympy.Pow(theta_b, nb)
p4 = 1
s1 = (((p1 * p2) * p3) * p4)
p1 = sympy.Pow(theta_a, na)
p2 = sympy_utils.polynomial_exp(1, (- theta_a), (t - na), expand=False)
p3 = sympy.Pow(theta_b, ((na + nb) - t))
p4 = 1
sum2_ = sympy.Sum((((p1 * p2) * p3) * p4), (t, (na + 1), (na + nb)))
p1 = sympy.Pow(theta_a, na)
p2 = sympy_utils.polynomial_exp(1, (- theta_a), nb, expand=False)
p3 = 1
p4 = 1
s2 = (((p1 * p2) * p3) * p4)
for _ in range(6):
sum_ = sum_.simplify()
print(sum_)
sum__ = sum_.subs(na, 10).subs(nb, 10)
xs = ys = numpy.linspace(0, 1.0, num=11)
values = numpy.zeros((len(xs), len(ys)))
for (ix, x) in enumerate(xs):
for (iy, y) in enumerate(ys):
value = sum__.subs(theta_a, x).subs(theta_b, y).doit()
print(('theta = (%f, %f) -> sum = %s' % (x, y, value)))
syms = (theta_a, theta_b)
syms = (theta_a,)
sum_diff = sum_.diff(*syms)
print('diff:', sum_diff)
for _ in range(5):
sum_diff = sum_diff.simplify()
print(sum_diff)
opts = sympy.solve(sum_diff, *syms)
print('num opts:', len(opts))
print('opts:', opts)
|
def gen_model_1label_bab():
n = sympy.Symbol('n', integer=True, positive=True)
t_end = (n * 4)
theta_a = sympy.Symbol('theta_a', real=True, nonnegative=True)
theta_b = sympy.Symbol('theta_b', real=True, nonnegative=True)
t1 = sympy.Symbol('t1', integer=True, nonnegative=True)
t2 = sympy.Symbol('t2', integer=True, nonnegative=True)
num_correct_b = sympy.Symbol('num_correct_b', integer=True, nonnegative=True)
num_wrong_a = ((2 * n) - num_correct_b)
num_correct_b_left = sympy.Symbol('num_correct_b_left', integer=True, nonnegative=True)
num_correct_a = sympy.Symbol('num_correct_a', integer=True, nonnegative=True)
num_wrong_b = 0
p1 = sympy.Pow(theta_b, num_correct_b)
p2 = sympy_utils.polynomial_exp(1, (- theta_b), num_wrong_b)
p3 = sympy.Pow(theta_a, num_correct_a)
p4 = sympy_utils.polynomial_exp(1, (- theta_b), num_wrong_a)
p = (((p1 * p2) * p3) * p4)
p = sympy.Sum(p, (t2, (t1 + 1), t_end))
p = sympy.Sum(p, (t1, 0, (t_end - 1)))
print(p)
for _ in range(4):
p = p.simplify()
print(p)
diff = p.diff(theta_a)
print('diff:', diff)
print(sympy.solve(diff, theta_a))
|
def gen_model_fsa_template_via_matrix(fsa: Fsa, label_seq_template: str):
n = sympy.Symbol('n', integer=True, nonnegative=True)
num_frames = (len(label_seq_template) * n)
states = sorted(fsa.states)
input_labels = sorted(set(label_seq_template))
labels = fsa.get_labels()
probs_by_label_by_input = {input_label: {} for input_label in input_labels}
prob_vars = []
for label in labels:
s = 0
for input_label in input_labels[:(- 1)]:
probs_by_label_by_input[input_label][label] = sympy.Symbol(('prob_%s_in_%s' % (label, input_label)))
s += probs_by_label_by_input[input_label][label]
prob_vars.append(probs_by_label_by_input[input_label][label])
probs_by_label_by_input[input_labels[(- 1)]][label] = (1 - s)
print(prob_vars)
(theta_a, theta_b) = prob_vars
initial_vec = sympy.Matrix([[1, 0, 0]])
final_vec = sympy.Matrix([([1] if (state in fsa.final_states) else [0]) for state in states])
v = initial_vec
trans_mat_product = None
for i in range(len(label_seq_template)):
probs_by_label = probs_by_label_by_input[label_seq_template[i]]
trans_mat = sympy.Matrix([[fsa.get_deterministic_source_to_target_prob(source_state=src_state, target_state=tgt_state, probs_by_label=probs_by_label) for tgt_state in states] for src_state in states])
trans_mat = sympy.Pow(trans_mat, n)
v *= trans_mat
if (trans_mat_product is None):
trans_mat_product = trans_mat
else:
trans_mat_product *= trans_mat
res = (v * final_vec)
assert (res.shape == (1, 1))
sum_ = res[(0, 0)]
print('sum:', sum_)
for _ in range(0):
sum_ = sum_.simplify()
print('sum simplified:', sum_)
print('max:', sympy.maximum(sum_, theta_a, sympy.Interval(0, 1)))
syms = (theta_a, theta_b)
syms = (theta_b,)
sum_diff = sum_.diff(*syms)
print('diff:', sum_diff)
for _ in range(0):
sum_diff = sum_diff.simplify()
print('diff simplified:', sum_diff)
opts = sympy.solve(sum_diff, *syms)
print('num opts:', len(opts))
print('opts:', opts)
|
def test_tf_grad_log_sm():
import tensorflow as tf
print('TF version:', tf.__version__)
with tf.Session() as session:
x = tf.constant([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
y = tf.nn.log_softmax(x)
scores = [0.0, float('-inf'), float('-inf')]
def combine(s_, y_):
return tf.where(tf.is_finite(s_), (s_ + y_), s_)
for t in range(2):
ys = y[t]
scores = [combine(scores[0], ys[0]), tf.reduce_logsumexp([combine(scores[0], ys[1]), combine(scores[1], ys[1])]), tf.reduce_logsumexp([combine(scores[1], ys[2]), combine(scores[2], ys[2])])]
z = scores[(- 1)]
(dx,) = tf.gradients(z, x)
print(session.run(dx))
|
def test_ctc():
import tensorflow as tf
print('TF version:', tf.__version__)
fsa = get_std_fsa_3label_blank()
num_batch = 1
num_frames = 100
num_labels = 4
with tf.Session() as session:
labels = tf.SparseTensor(indices=[[0, 0], [0, 1], [0, 2]], values=[0, 1, 2], dense_shape=[num_batch, 3])
logits = tf.random_normal((num_frames, num_batch, num_labels), seed=42)
logits_normalized = tf.nn.log_softmax(logits)
score1 = tf.nn.ctc_loss(labels=labels, inputs=logits, sequence_length=([num_frames] * num_batch))
score2 = (- fsa.tf_get_full_sum(logits=logits_normalized))
(dscore1,) = tf.gradients(score1, logits)
(dscore2,) = tf.gradients(score2, logits)
res = session.run({'score1': score1, 'score2': score2, 'dscore1': dscore1, 'dscore2': dscore2})
pprint(res)
numpy.testing.assert_allclose(res['score1'], res['score2'], rtol=1e-05)
numpy.testing.assert_allclose(res['dscore1'], res['dscore2'], atol=0.0001)
|
def main():
if (len(sys.argv) >= 2):
globals()[sys.argv[1]]()
return
label_seq_template = Label1StrTemplate
fsa = get_std_fsa_1label()
print('fsa:', fsa)
assert fsa.is_deterministic_by_label()
assert match(fsa=fsa, input_seq=label_seq_template)
num_frames = 16
print(('T=%i, labels=%r count of all paths:' % (num_frames, len(fsa.get_labels()))), (len(fsa.get_labels()) ** num_frames))
test_count_all_paths(fsa=fsa, num_frames=num_frames)
test_count_all_paths_with_label_in_frame(fsa=fsa, num_frames=num_frames, frame_idx=0, label=Label1)
test_count_all_paths_with_label_in_frame(fsa=fsa, num_frames=num_frames, frame_idx=(num_frames // 2), label=Label1)
for t in range(num_frames):
test_count_all_paths_with_label_in_frame(fsa=fsa, num_frames=num_frames, frame_idx=t, label=BlankLabel)
count_paths_with_label(fsa=fsa, num_frames=(num_frames * 2), label=BlankLabel)
print('Relevant for bias model:')
count_all_paths_with_label_avg(fsa=fsa, label=BlankLabel, num_frames=num_frames)
count_all_paths_with_label_avg(fsa=fsa, label=Label1)
bias_model(fsa=fsa, num_frames=5)
bias_model_1label(num_frames=5)
print('Relevant for FFNN / generative model:')
count_all_paths_with_label_seq(fsa=fsa, label_seq_template=label_seq_template)
|
def plot_alignment(alignment, labels, filename=None):
'\n :param list[int]|list[list[float]] alignment:\n :param list[str] labels:\n :param str|None filename:\n '
num_labels = len(labels)
num_frames = len(alignment)
ts = range(num_frames)
if isinstance(alignment[0], list):
assert (len(alignment[0]) == num_labels)
ss = numpy.array(alignment).transpose()
assert (ss.shape == (num_labels, num_frames))
else:
assert (max(alignment) == num_labels)
ss = [([0.0] * num_frames) for _ in range(num_labels)]
for (t, a) in enumerate(alignment):
ss[(a - 1)][t] = 1.0
fig = plt.figure(frameon=False, figsize=(5, 0.8))
extra_height = 0.2
ax = fig.add_axes([0, extra_height, 1, (1.0 - (2 * extra_height))])
ax.axis('off')
assert isinstance(fig, plt.Figure)
assert isinstance(ax, plt.Axes)
for (i, s) in enumerate(ss):
kwargs = {}
if (i == (len(ss) - 1)):
kwargs.update(dict(color='lightgray', linestyle='dotted'))
ax.plot(ts, s, **kwargs)
if (i < (len(ss) - 1)):
xmax = numpy.argmax(s)
ymax = s[xmax]
xmax2 = xmax
while (s[xmax2] == ymax):
xmax2 += 1
xmax = (((xmax2 - 1) + xmax) / 2.0)
ax.annotate(labels[i], xy=(xmax, ymax), ha='center', fontsize=16)
if filename:
filename = ('%s/%s' % (target_dir, filename))
print('save figure:', filename)
fig.savefig(filename)
else:
plt.show()
|
def main():
labels = ['p', 'ih', 'ng', 'sil']
labels_blank = (labels[:(- 1)] + ['blank'])
n_ = 10
align_opt_sil = (((((([4] * 2) * n_) + ([1] * n_)) + (([2] * 3) * n_)) + (([3] * 2) * n_)) + (([4] * 2) * n_))
align_peaky_sil = [(x + 1) for x in [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
align_peaky_blank = [(x + 1) for x in [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
assert (len(align_peaky_sil) == len(align_peaky_blank) == len(align_opt_sil))
posteriors_blank = [[1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [0.11329136788845062, 7.64282859222476e-08, 5.0091447434397196e-08, 0.8867084980010986], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [4.815770449084766e-09, 0.03436011075973511, 1.1281462874990211e-08, 0.9656398296356201], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [8.512160754037268e-09, 1.6339818387223204e-08, 0.051136527210474014, 0.9488635063171387], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0], [1.2303150165315913e-10, 4.677016551823954e-10, 2.2791392384480247e-10, 1.0]]
posteriors_sil = [[5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [5.60422863671306e-09, 6.19781515606016e-13, 2.275723880174052e-11, 1.0], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [0.034249208867549896, 0.03456207364797592, 0.03506220132112503, 0.8961265087127686], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [4.967092709362575e-11, 3.9665035700409135e-09, 6.305710121523589e-05, 0.9999369382858276], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0], [5.429683178764799e-12, 1.506674125739682e-11, 8.984882282625506e-11, 1.0]]
plot_alignment(alignment=align_opt_sil, labels=labels, filename='ex-ping-opt-sil.pdf')
plot_alignment(alignment=align_peaky_sil, labels=labels, filename='ex-ping-peaky-sil.pdf')
plot_alignment(alignment=align_peaky_blank, labels=labels_blank, filename='ex-ping-peaky-blank.pdf')
plot_alignment(alignment=posteriors_sil, labels=labels, filename='ex-ping-posteriors-sil.pdf')
plot_alignment(alignment=posteriors_blank, labels=labels, filename='ex-ping-posteriors-blank.pdf')
|
def range_from_relationals(and_expr: typing.Union[(sympy.And, sympy.Rel)], gen: sympy.Symbol) -> (typing.Optional[sympy.Basic], typing.Optional[sympy.Basic]):
'\n :return whether there is a solution, optional start range, optional end range\n (including; assume integer; assume simplified)\n '
if isinstance(and_expr, sympy.Rel):
args = [and_expr]
else:
assert isinstance(and_expr, sympy.And)
args = and_expr.args
assert all([(isinstance(part, sympy.Rel) and (gen in part.free_symbols)) for part in args])
rel_ops = ['>=', '<=', '==']
rhs_by_c = {}
for part in args:
assert isinstance(part, sympy.Rel)
part = _solve_inequality(part, gen)
assert isinstance(part, sympy.Rel)
assert (part.lhs == gen)
(rel_op, rhs) = (part.rel_op, part.rhs)
assert (rel_op in rel_ops)
assert (rel_op not in rhs_by_c)
rhs_by_c[rel_op] = rhs
if ('==' in rhs_by_c):
assert (set(rhs_by_c.keys()) == {'=='})
return (rhs_by_c['=='], rhs_by_c['=='])
return (rhs_by_c.get('>=', None), rhs_by_c.get('<=', None))
|
def simplify_and(x: sympy.Basic, gen: typing.Optional[sympy.Symbol]=None, extra_conditions: typing.Optional[sympy.Basic]=True) -> sympy.Basic:
'\n Some rules, because SymPy currently does not automatically simplify them...\n '
assert isinstance(x, sympy.Basic), ('type x: %r' % type(x))
from sympy.solvers.inequalities import reduce_rational_inequalities
from sympy.core.relational import Relational
syms = []
if (gen is not None):
syms.append(gen)
w1 = sympy.Wild('w1')
w2 = sympy.Wild('w2')
for sub_expr in x.find(sympy.Eq(w1, w2)):
m = sub_expr.match(sympy.Eq(w1, w2))
ws_ = (m[w1], m[w2])
for w_ in ws_:
if (isinstance(w_, sympy.Symbol) and (w_ not in syms)):
syms.append(w_)
for w_ in x.free_symbols:
if (w_ not in syms):
syms.append(w_)
if (len(syms) >= 1):
_c = syms[0]
if (len(syms) >= 2):
n = syms[1]
else:
n = sympy.Wild('n')
else:
return x
x = x.replace((((_c - (2 * n)) >= (- 1)) & ((_c - (2 * n)) <= (- 1))), sympy.Eq(_c, ((2 * n) - 1)))
apply_rules = True
while apply_rules:
apply_rules = False
for and_expr in x.find(sympy.And):
assert isinstance(and_expr, sympy.And)
and_expr_ = reduce_rational_inequalities([and_expr.args], _c)
if (and_expr_ != and_expr):
x = x.replace(and_expr, and_expr_)
and_expr = and_expr_
if (and_expr == sympy.sympify(False)):
continue
if isinstance(and_expr, sympy.Rel):
continue
assert isinstance(and_expr, sympy.And)
and_expr_args = list(and_expr.args)
if all([(isinstance(part, Relational) and (_c in part.free_symbols)) for part in and_expr_args]):
rel_ops = ['==', '>=', '<=']
if (not (_c.is_Integer or _c.assumptions0['integer'])):
rel_ops.extend(['<', '>'])
rhs_by_c = {op: [] for op in rel_ops}
for part in and_expr_args:
assert isinstance(part, Relational)
part = _solve_inequality(part, _c)
assert isinstance(part, Relational)
assert (part.lhs == _c)
(rel_op, rhs) = (part.rel_op, part.rhs)
if (_c.is_Integer or _c.assumptions0['integer']):
if (rel_op == '<'):
rhs = (rhs - 1)
rel_op = '<='
elif (rel_op == '>'):
rhs = (rhs + 1)
rel_op = '>='
assert (rel_op in rhs_by_c), ('x: %r, _c: %r, and expr: %r, part %r' % (x, _c, and_expr, part))
other_rhs = rhs_by_c[rel_op]
assert isinstance(other_rhs, list)
need_to_add = True
for rhs_ in other_rhs:
cmp = Relational.ValidRelationOperator[rel_op](rhs, rhs_)
if (simplify_and(sympy.And(sympy.Not(cmp), extra_conditions)) == sympy.sympify(False)):
other_rhs.remove(rhs_)
break
elif (simplify_and(sympy.And(cmp, extra_conditions)) == sympy.sympify(False)):
need_to_add = False
break
if need_to_add:
other_rhs.append(rhs)
if (rhs_by_c['>='] and rhs_by_c['<=']):
all_false = False
for lhs in rhs_by_c['>=']:
for rhs in rhs_by_c['<=']:
if (sympy.Lt(lhs, rhs) == sympy.sympify(False)):
all_false = True
if (sympy.Eq(lhs, rhs) == sympy.sympify(True)):
rhs_by_c['=='].append(lhs)
if all_false:
x = x.replace(and_expr, False)
continue
if rhs_by_c['==']:
all_false = False
while (len(rhs_by_c['==']) >= 2):
(lhs, rhs) = rhs_by_c['=='][:2]
if (sympy.Eq(lhs, rhs) == sympy.sympify(False)):
all_false = True
break
elif (sympy.Eq(lhs, rhs) == sympy.sympify(True)):
rhs_by_c['=='].pop(1)
else:
raise NotImplementedError(('cannot cmp %r == %r. rhs_by_c %r' % (lhs, rhs, rhs_by_c)))
if all_false:
x = x.replace(and_expr, False)
continue
new_parts = [sympy.Eq(_c, rhs_by_c['=='][0])]
for op in rel_ops:
for part in rhs_by_c[op]:
new_parts.append(Relational.ValidRelationOperator[op](rhs_by_c['=='][0], part).simplify())
else:
new_parts = []
for op in rel_ops:
for part in rhs_by_c[op]:
new_parts.append(Relational.ValidRelationOperator[op](_c, part))
assert new_parts
and_expr_ = sympy.And(*new_parts)
x = x.replace(and_expr, and_expr_)
and_expr = and_expr_
if (sympy.Eq(_c, (2 * n)) in and_expr.args):
if (((_c - (2 * n)) <= (- 1)) in and_expr.args):
x = x.replace(and_expr, False)
continue
if (sympy.Eq((_c - (2 * n)), (- 1)) in and_expr.args):
x = x.replace(and_expr, False)
continue
if (((_c - n) <= (- 1)) in and_expr.args):
x = x.replace(and_expr, False)
continue
if (((_c >= n) in and_expr.args) and (((_c - n) <= (- 1)) in and_expr.args)):
x = x.replace(and_expr, False)
continue
if (sympy.Eq((_c - (2 * n)), (- 1)) in and_expr.args):
if ((_c >= n) in and_expr.args):
x = x.replace(and_expr, sympy.And(*[arg for arg in and_expr.args if (arg != (_c >= n))]))
apply_rules = True
break
if (((_c - n) >= (- 1)) in and_expr.args):
x = x.replace(and_expr, sympy.And(*[arg for arg in and_expr.args if (arg != ((_c - n) >= (- 1)))]))
apply_rules = True
break
if ((_c >= n) in and_expr.args):
if (((_c - n) >= (- 1)) in and_expr.args):
x = x.replace(and_expr, sympy.And(*[arg for arg in and_expr.args if (arg != ((_c - n) >= (- 1)))]))
apply_rules = True
break
if ((((_c - n) >= (- 1)) in and_expr.args) and (((_c - n) <= (- 1)) in and_expr.args)):
args = list(and_expr.args)
args.remove(((_c - n) >= (- 1)))
args.remove(((_c - n) <= (- 1)))
args.append(sympy.Eq((_c - n), (- 1)))
if (((_c - (2 * n)) <= (- 1)) in args):
args.remove(((_c - (2 * n)) <= (- 1)))
x = x.replace(and_expr, sympy.And(*args))
apply_rules = True
break
return x
|
def sum_over_piecewise(expr: sympy.Piecewise, sum_var: sympy.Symbol, sum_start: typing.Union[(sympy.Basic, int)], sum_end: sympy.Basic, extra_condition: sympy.Basic=True) -> sympy.Expr:
'\n :return: equivalent to Sum(expr, (sum_var, sum_start, sum_end)), but we try to remove the piecewise.\n We assume that the piecewise conditions also depend on sum_var.\n '
assert (sum_var.is_Integer or sum_var.assumptions0['integer'])
assert isinstance(expr, sympy.Piecewise)
res = sympy.sympify(0)
cond_start = sympy.Ge(sum_var, sum_start)
cond_end = sympy.Le(sum_var, sum_end)
prev_ranges = [(None, (sum_start - 1)), ((sum_end + 1), None)]
def check(cond__):
false_cond = simplify_and(sympy.And(sympy.Not(cond__), extra_condition))
if (false_cond == sympy.sympify(False)):
return True
true_cond = simplify_and(sympy.And(cond__, extra_condition))
if (true_cond == sympy.sympify(False)):
return False
return None
for (value, cond) in expr.args:
j = 0
while (j < (len(prev_ranges) - 1)):
cond_r_start = sympy.Ge(sum_var, (prev_ranges[j][1] + 1))
cond_r_end = sympy.Le(sum_var, (prev_ranges[(j + 1)][0] - 1))
cond = sympy.And(cond, cond_start, cond_end, cond_r_start, cond_r_end)
cond_ = simplify_and(cond, sum_var, extra_conditions=extra_condition)
if (cond_ == sympy.sympify(False)):
j += 1
continue
if isinstance(cond_, sympy.And):
if any([(sum_var not in part.free_symbols) for part in cond_.args]):
new_extra_conditions = [part for part in cond_.args if (sum_var not in part.free_symbols)]
new_extra_condition = sympy.And(*new_extra_conditions)
if (check(new_extra_condition) is False):
j += 1
continue
assert check(new_extra_condition)
cond_ = sympy.And(*[part for part in cond_.args if (sum_var in part.free_symbols)])
r = range_from_relationals(cond_, sum_var)
if (r[0] is None):
r = (sum_start, r[1])
if (sympy.Eq(r[0], r[1]) == sympy.sympify(True)):
res += value.subs(sum_var, r[0])
else:
res += sympy.Sum(value, (sum_var, r[0], r[1])).doit()
for i in range(1, (len(prev_ranges) + 1)):
assert (i < len(prev_ranges)), ('where to insert %r?' % (r,))
assert check(sympy.Gt(r[0], prev_ranges[(i - 1)][1]))
if check(sympy.Eq((r[0] - 1), prev_ranges[(i - 1)][1])):
prev_ranges[(i - 1)] = (prev_ranges[(i - 1)][0], r[1])
break
if (check(sympy.Lt(r[0], prev_ranges[i][0])) or check(sympy.Lt(r[1], prev_ranges[i][0]))):
if check(sympy.Eq((r[1] + 1), prev_ranges[i][0])):
prev_ranges[i] = (r[0], prev_ranges[i][1])
else:
prev_ranges.insert(i, r)
break
j = 0
if ((len(prev_ranges) == 2) and (sympy.Eq(prev_ranges[0][1], prev_ranges[1][0]) == sympy.sympify(True))):
break
return res.simplify()
|
def binomial_expansion(a, b, exp):
'\n Applies the binomial expansion (https://en.wikipedia.org/wiki/Binomial_theorem).\n\n :param sympy.Expr|int a:\n :param sympy.Expr|int b:\n :param sympy.Expr|int exp: assumes to be a nonnegative integer\n :rtype sympy.Expr\n '
i = sympy.Symbol('i', integer=True, nonnegative=True)
x = ((sympy.binomial(exp, i) * sympy.Pow(a, (exp - i))) * sympy.Pow(b, i))
return sympy.Sum(x, (i, 0, exp))
|
def polynomial_exp(a, b, exp, expand=True, flip=True):
'\n :param sympy.Expr|int a:\n :param sympy.Expr|int b:\n :param sympy.Expr|int exp: assumes to be a nonnegative integer\n :param bool expand:\n :param bool flip:\n :rtype sympy.Expr\n '
if expand:
a = sympy.sympify(a)
b = sympy.sympify(b)
has_flipped = False
if flip:
try:
should_flip = bool((((b <= 0) and (a == 1)) or ((a <= 0) and (b == 1))))
except TypeError:
should_flip = False
if should_flip:
(a, b) = ((- a), (- b))
has_flipped = True
res = binomial_expansion(a, b, exp)
if has_flipped:
res *= sympy.Pow((- 1), exp)
return res
return sympy.Pow((a + b), exp)
|
class NameAxisLayer(_ConcatInputLayer):
'\n Adds a DimensionTag to an axis s.t. it will be unique.\n '
layer_class = 'name_axis'
def __init__(self, axis, description, **kwargs):
super(NameAxisLayer, self).__init__(**kwargs)
from returnn.tf.layers.base import LayerBase
batch_dim = LayerBase.get_recent_layer().get_batch_info().dim
for (i, dyn_size) in self.output.size_placeholder.items():
if ((len(dyn_size.shape) == 0) or (dyn_size.shape[0] == 1)):
dim_tag = DimensionTag.get_tag_from_size_tensor(dyn_size)
new_dyn_size = tf.broadcast_to(dyn_size, [batch_dim])
dim_tag.set_tag_on_size_tensor(new_dyn_size)
dim_tag.dyn_size = new_dyn_size
self.output.size_placeholder[i] = new_dyn_size
@classmethod
def get_out_data_from_opts(cls, name, axis, description, sources, **kwargs):
'\n :param str name:\n :param str|int|list[str|int]|tuple[str|int] axis:\n :param str|None|list[str|None]|tuple[str|None] description:\n :param list[LayerBase] sources:\n :rtype: Data\n '
data = Data.get_common_data([s.output for s in sources])
data = data.copy(name=('%s_output' % name))
if (not isinstance(axis, (list, tuple))):
axis = [axis]
if (not isinstance(description, (list, tuple))):
description = [description]
assert (len(axis) == len(description))
for (ax, descr) in zip(axis, description):
if isinstance(ax, int):
data = data.copy_as_batch_major()
if (isinstance(ax, str) and ('|' in ax)):
possible_axes = ax.split('|')
found_ax = None
for possible_ax in possible_axes:
try:
found_ax = data.get_axis_from_description(possible_ax)
break
except:
continue
assert (found_ax is not None), ('%r: axis %r not found in %r' % (cls, ax, data))
ax = found_ax
if (isinstance(ax, str) and (len(ax) >= 3) and (ax[(- 2)] == '+')):
ax_offset = int(ax[(- 1)])
ax = ax[:(- 2)]
else:
ax_offset = 0
ax = (data.get_axis_from_description(ax, allow_int=True) + ax_offset)
ax_wo_batch = data.get_batch_axis_excluding_batch(ax)
if (descr is None):
del data.size_placeholder[ax_wo_batch]
else:
if (ax_wo_batch in data.size_placeholder):
dyn_size = tf.identity(data.size_placeholder[ax_wo_batch])
else:
assert (data.batch_shape[ax] is not None)
dyn_size = tf.constant(data.batch_shape[ax], shape=(1,))
from returnn.tf.util.basic import DimensionTag
tag = DimensionTag(description=descr, kind=DimensionTag.Types.Time)
data.size_placeholder[ax_wo_batch] = dyn_size
tag.set_tag_on_size_tensor(dyn_size)
return data
|
def _query_key_time_default(query_time_axis, key_time_axis):
'\n :param None|str query_time_axis:\n :param None|str key_time_axis:\n :rtype: tuple[str,str]\n '
assert ((query_time_axis is None) == (key_time_axis is None))
if (query_time_axis is None):
query_time_axis = 'stag:extern_data:classes'
key_time_axis = 'stag:extern_data:data'
assert query_time_axis.startswith('stag:')
assert key_time_axis.startswith('stag:')
return (query_time_axis, key_time_axis)
|
def make_lsh_hash_gen(d, output, key_dim, num_hashes, num_heads, num_rounds, hash_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0)):
"\n :param dict[str,dict] d: the network dict to write into\n :param str output: prefix of all layers generated. Output is written into output + '_hash_gen' layer.\n :param int key_dim:\n :param int num_hashes:\n :param int num_heads:\n :param int num_rounds:\n :param str hash_init: initializer for the hash generator matrix\n "
assert ((num_hashes % 2) == 0)
d[(output + '_top_unnamed')] = {'class': 'variable', 'shape': (num_heads, num_rounds, key_dim, (num_hashes // 2)), 'trainable': False, 'init': hash_init, 'add_batch_axis': True}
d[(output + '_top')] = {'class': 'name_axis', 'axis': ['static:0', 'static:1'], 'description': ['att-heads', 'att-rounds'], 'from': [(output + '_top_unnamed')]}
d[(output + '_bottom')] = {'class': 'eval', 'eval': '-source(0)', 'from': [(output + '_top')]}
d[output] = {'class': 'copy', 'from': [(output + '_top'), (output + '_bottom')]}
|
def apply_lsh_hash_gen(d, input, hash_gen_input, output, num_hashes, time_axis, hash_mask_value=((2 ** 31) - 1), hash_dropin=0.0):
'\n :param dict[str,dict] d:\n :param str input:\n :param str hash_gen_input:\n :param str output:\n :param int num_hashes:\n :param str time_axis:\n :param int|None hash_mask_value: or None if you do not want masking\n :param float hash_dropin:\n '
d[(output + '_linear')] = {'class': 'dot', 'from': [hash_gen_input, input], 'debug': True, 'red1': 'static:-2', 'red2': 'F', 'var1': ['stag:att-rounds', 'static:-1'], 'var2': (time_axis + '?'), 'add_var2_if_empty': False}
d[(output + '_sparse')] = {'class': 'reduce', 'mode': 'argmax', 'axes': 'static:-1', 'from': [(output + '_linear')]}
d[(output + '_actual')] = {'class': 'reinterpret_data', 'from': [(output + '_sparse')], 'set_sparse': False, 'set_axes': {'F': None}}
d[(output + '_dropin_decision_ones')] = {'class': 'eval', 'from': [(output + '_actual')], 'eval': 'tf.ones_like(source(0), dtype="float32")', 'out_type': {'dtype': 'float32'}}
d[(output + '_dropin_decision_float')] = {'class': 'dropout', 'dropout': hash_dropin, 'dropout_noise_shape': {'B': (- 1), 'except_time': (- 1), 'T': 1}, 'from': [(output + '_dropin_decision_ones')]}
d[(output + '_dropin_decision')] = {'class': 'compare', 'from': [(output + '_dropin_decision_float')], 'kind': 'greater', 'value': 0.5}
d[(output + '_dropin_hashes')] = {'class': 'eval', 'eval': ('tf.random.uniform(tf.shape(source(0)), minval=0, maxval=%s, dtype="int32")' % num_hashes), 'from': [(output + '_actual')], 'out_type': {'dtype': 'int32'}}
d[(output + '_unmasked')] = {'class': 'switch', 'condition': (output + '_dropin_decision'), 'true_from': (output + '_actual'), 'false_from': (output + '_dropin_hashes')}
if (hash_mask_value is not None):
d[output] = {'class': 'seq_len_mask', 'from': [(output + '_unmasked')], 'axis': time_axis, 'mask_value': hash_mask_value}
else:
d[output] = {'class': 'copy', 'from': [(output + '_unmasked')]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.