code
stringlengths
17
6.64M
class Configuration(): def __init__(self, name: str, corpus_from_directory: Callable[([Path], Corpus)], allowed_characters: List[chr]=english_frequent_characters, directories: DataDirectories=default_data_directories, mel_frequency_count: int=128, training_batches_per_epoch: int=100, batch_size: int=64): self.training_batches_per_epoch = training_batches_per_epoch self.mel_frequency_count = mel_frequency_count self.name = name self.directories = directories self.spectrogram_cache_directory = (directories.spectrogram_cache_base_directory / name) self.corpus_directory = (directories.corpus_base_directory / name) self.corpus_from_directory = corpus_from_directory self.allowed_characters = allowed_characters self.batch_size = batch_size @lazy def corpus(self) -> Corpus: return self.corpus_from_directory(self.corpus_directory) @lazy def batch_generator(self) -> LabeledSpectrogramBatchGenerator: return self.batch_generator_for_corpus(self.corpus) def batch_generator_for_corpus(self, corpus: Corpus) -> LabeledSpectrogramBatchGenerator: return LabeledSpectrogramBatchGenerator(corpus=corpus, spectrogram_cache_directory=self.spectrogram_cache_directory, batch_size=self.batch_size) @staticmethod def english() -> 'Configuration': return Configuration(name='English', corpus_from_directory=english_corpus) @staticmethod def minimal_english() -> 'Configuration': return Configuration(name='English', corpus_from_directory=minimal_english_corpus) @staticmethod def german(from_cached: bool=True, sampled_training_example_count_when_loading_from_cached: Optional[int]=None) -> 'Configuration': def load_cached_corpus(corpus_directory: Path) -> Corpus: return Corpus.load((corpus_directory / 'corpus.csv'), sampled_training_example_count=sampled_training_example_count_when_loading_from_cached) return Configuration(name='German', allowed_characters=german_frequent_characters, corpus_from_directory=(load_cached_corpus if from_cached else german_corpus)) @staticmethod def mixed_german_english(): return Configuration(name='mixed-English-German', allowed_characters=german_frequent_characters, corpus_from_directory=(lambda _: ComposedCorpus([Configuration.english().corpus, Configuration.german().corpus]))) def train(self, wav2letter, run_name: str) -> None: wav2letter.train(self.batch_generator.training_batches(), tensor_board_log_directory=(self.directories.tensorboard_log_base_directory / run_name), net_directory=(self.directories.nets_base_directory / run_name), preview_labeled_spectrogram_batch=self.batch_generator.preview_batch(), batches_per_epoch=self.training_batches_per_epoch) def train_from_beginning(self): from speechless.net import Wav2Letter wav2letter = Wav2Letter(self.mel_frequency_count, allowed_characters=self.allowed_characters) self.train(wav2letter, run_name=(timestamp() + '-adam-small-learning-rate-complete-training-{}{}'.format(self.name, self.sampled_training_example_count_extension()))) def summarize_and_save_corpus(self): log(self.corpus.summary()) self.corpus.summarize_to_csv((self.corpus_directory / 'summary.csv')) self.save_corpus() def save_corpus(self): self.corpus.save((self.corpus_directory / 'corpus.csv')) def fill_cache(self, repair_incorrect: bool=False): self.batch_generator.fill_cache(repair_incorrect=repair_incorrect) def test_model(self, wav2letter): log(wav2letter.test_and_predict_batch(self.batch_generator.preview_batch())) log(wav2letter.test_and_predict_batches(self.batch_generator.test_batches())) def test_model_grouped_by_loaded_corpus_name(self, wav2letter) -> ExpectationsVsPredictionsInGroupedBatches: def corpus_name(example: LabeledExampleFromFile) -> str: return example.audio_directory.relative_to(self.corpus_directory).parts[0] corpus_by_name = self.corpus.grouped_by(corpus_name) log([(name, len(corpus.test_examples)) for (name, corpus) in corpus_by_name.items()]) result = wav2letter.test_and_predict_grouped_batches(OrderedDict(((corpus_name, self.batch_generator_for_corpus(corpus).test_batches()) for (corpus_name, corpus) in corpus_by_name.items()))) log(result) return result def train_transfer_from_best_english_model(self, frozen_layer_count: int, reinitialize_trainable_loaded_layers: bool=False): run_name = (timestamp() + '-adam-small-learning-rate-transfer-to-{}-freeze-{}{}{}'.format(self.name, frozen_layer_count, ('-reinitialize' if reinitialize_trainable_loaded_layers else ''), self.sampled_training_example_count_extension())) log(('Run: ' + run_name)) wav2letter = self.load_best_english_model(frozen_layer_count=frozen_layer_count, reinitialize_trainable_loaded_layers=reinitialize_trainable_loaded_layers) self.train(wav2letter, run_name=run_name) def sampled_training_example_count_extension(self): return ('-{}examples'.format(self.corpus.sampled_training_example_count) if (self.corpus.sampled_training_example_count is not None) else '') def load_model(self, load_name: str, load_epoch: int, frozen_layer_count: int=0, allowed_characters_for_loaded_model: List[chr]=english_frequent_characters, use_kenlm: bool=False, reinitialize_trainable_loaded_layers: bool=False, language_model_name_extension: str=''): from speechless.net import Wav2Letter return Wav2Letter(allowed_characters=self.allowed_characters, input_size_per_time_step=self.mel_frequency_count, load_model_from_directory=(self.directories.nets_base_directory / load_name), load_epoch=load_epoch, allowed_characters_for_loaded_model=allowed_characters_for_loaded_model, frozen_layer_count=frozen_layer_count, kenlm_directory=((self.directories.kenlm_base_directory / (self.name.lower() + language_model_name_extension)) if use_kenlm else None), reinitialize_trainable_loaded_layers=reinitialize_trainable_loaded_layers) def load_best_english_model(self, frozen_layer_count: int=0, use_ken_lm: bool=False, reinitialize_trainable_loaded_layers: bool=False): return self.load_model(load_name=Configuration.english_baseline[0], load_epoch=Configuration.english_baseline[1], frozen_layer_count=frozen_layer_count, use_kenlm=use_ken_lm, reinitialize_trainable_loaded_layers=reinitialize_trainable_loaded_layers) def test_best_english_model(self, use_kenlm: bool=False): self.test_model_grouped_by_loaded_corpus_name(self.load_best_english_model(use_ken_lm=use_kenlm)) english_baseline = ('20170314-134351-adam-small-learning-rate-complete-95', 1689) def test_german_model(self, load_name: str, load_epoch: int, use_ken_lm=False, language_model_name_extension: str=''): self.test_model_grouped_by_loaded_corpus_name(self.load_german_model(load_name, load_epoch, use_ken_lm=use_ken_lm, language_model_name_extension=language_model_name_extension)) def load_german_model(self, load_name: str, load_epoch: int, use_ken_lm=False, language_model_name_extension: str='') -> Wav2Letter: return self.load_model(load_name=load_name, load_epoch=load_epoch, allowed_characters_for_loaded_model=german_frequent_characters, use_kenlm=use_ken_lm, language_model_name_extension=language_model_name_extension) freeze0day4hour7 = ('20170420-001258-adam-small-learning-rate-transfer-to-German-freeze-0', 2066) def load_best_german_model(self, use_ken_lm=False, language_model_name_extension: str='') -> Wav2Letter: return self.load_german_model(Configuration.freeze0day4hour7[0], Configuration.freeze0day4hour7[1], use_ken_lm=use_ken_lm, language_model_name_extension=language_model_name_extension)
class LoggedRun(): def __init__(self, action: Callable[([], None)], name: str, results_directory: Path=default_data_directories.test_results_directory): self.action = action self.name = name self.results_directory = results_directory self.result_file = (self.results_directory / self.name) def __call__(self): mkdir(self.results_directory) write_text(self.result_file, '') handler = logging.FileHandler(str(self.result_file)) handler.setLevel(logging.INFO) logger.addHandler(handler) try: self.action() finally: logger.removeHandler(handler)
class ParsingException(Exception): pass
class Phase(Enum): training = 'training' test = 'test'
class Corpus(): __metaclass__ = ABCMeta def __init__(self, training_examples: List[LabeledExample], test_examples: List[LabeledExample], sampled_training_example_count: Optional[int]=None): self.training_examples = (training_examples if (sampled_training_example_count is None) else random.Random(42).sample(training_examples, sampled_training_example_count)) self.sampled_training_example_count = sampled_training_example_count self.test_examples = test_examples self.examples = (training_examples + test_examples) log('Training on {} examples, testing on {} examples.'.format(len(self.training_examples), len(self.test_examples))) duplicate_training_ids = duplicates((e.id for e in training_examples)) if (len(duplicate_training_ids) > 0): raise ValueError('Duplicate ids in training examples: {}'.format(duplicate_training_ids)) duplicate_test_ids = duplicates((e.id for e in test_examples)) if (len(duplicate_test_ids) > 0): raise ValueError('Duplicate ids in test examples: {}'.format(duplicate_test_ids)) overlapping_ids = duplicates((e.id for e in self.examples)) if (len(overlapping_ids) > 0): raise ValueError('Overlapping training and test set: {}'.format(overlapping_ids)) @abstractmethod def csv_rows(self) -> List[str]: raise NotImplementedError @abstractmethod def summary(self) -> str: raise NotImplementedError def summarize_to_csv(self, summary_csv_file: Path) -> None: import csv with summary_csv_file.open('w', encoding='utf8') as csv_summary_file: writer = csv.writer(csv_summary_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) for row in self.csv_rows(): writer.writerow(row) def save(self, corpus_csv_file: Path, use_relative_audio_file_paths: bool=True): import csv with corpus_csv_file.open('w', encoding='utf8') as opened_csv: writer = csv.writer(opened_csv, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) examples_and_phase = ([(e, Phase.training) for e in self.training_examples] + [(e, Phase.test) for e in self.test_examples]) for (e, phase) in examples_and_phase: writer.writerow((e.id, str((e.audio_file.relative_to(corpus_csv_file.parent) if use_relative_audio_file_paths else e.audio_file)), e.label, phase.value, (e.positional_label.serialize() if e.positional_label else ''))) @staticmethod def load(corpus_csv_file: Path, sampled_training_example_count: Optional[int]=None) -> 'Corpus': import csv with corpus_csv_file.open(encoding='utf8') as opened_csv: reader = csv.reader(opened_csv, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) def to_absolute(audio_file_path: Path) -> Path: return (audio_file_path if audio_file_path.is_absolute() else (Path(corpus_csv_file.parent) / audio_file_path)) examples = [(LabeledExampleFromFile(audio_file=to_absolute(Path(audio_file_path)), id=id, label=label, positional_label=(None if (positional_label == '') else PositionalLabel.deserialize(positional_label))), Phase[phase]) for (id, audio_file_path, label, phase, positional_label) in reader] return Corpus(training_examples=[e for (e, phase) in examples if (phase == Phase.training)], test_examples=[e for (e, phase) in examples if (phase == Phase.test)], sampled_training_example_count=sampled_training_example_count) K = TypeVar('Key') def grouped_by(self, key: Callable[([LabeledExample], K)]) -> Dict[(K, 'Corpus')]: examples_by_key = group(self.examples, key=key) training_examples_by_key = group(self.training_examples, key=key) test_examples_by_key = group(self.test_examples, key=key) keys = examples_by_key.keys() return OrderedDict(((key, Corpus(training_examples=(list(training_examples_by_key[key]) if (key in training_examples_by_key) else []), test_examples=(list(test_examples_by_key[key]) if (key in test_examples_by_key) else []))) for key in keys))
class ComposedCorpus(Corpus): def __init__(self, corpora: List[Corpus]): self.corpora = corpora training_examples = [example for corpus in corpora for example in corpus.training_examples] super().__init__(training_examples=training_examples, test_examples=[example for corpus in corpora for example in corpus.test_examples]) def csv_rows(self) -> List[str]: return [row for corpus in self.corpora for row in corpus.csv_rows()] def summary(self) -> str: return ('\n\n'.join([corpus.summary() for corpus in self.corpora]) + '\n\n {} total, {} training, {} test'.format(len(self.examples), len(self.training_examples), len(self.test_examples)))
class TrainingTestSplit(): training_only = (lambda examples: (examples, [])) test_only = (lambda examples: ([], examples)) @staticmethod def randomly_grouped_by(key_from_example: Callable[([LabeledExample], Any)], training_share: float=0.9) -> Callable[([List[LabeledExample]], Tuple[(List[LabeledExample], List[LabeledExample])])]: def split(examples: List[LabeledExample]) -> Tuple[(List[LabeledExample], List[LabeledExample])]: examples_by_directory = group(examples, key=key_from_example) directories = examples_by_directory.keys() random.seed(42) keys = set(random.sample(directories, int((training_share * len(directories))))) training_examples = [example for example in examples if (key_from_example(example) in keys)] test_examples = [example for example in examples if (key_from_example(example) not in keys)] return (training_examples, test_examples) return split @staticmethod def randomly(training_share: float=0.9) -> Callable[([List[LabeledExample]], Tuple[(List[LabeledExample], List[LabeledExample])])]: return TrainingTestSplit.randomly_grouped_by((lambda e: e.id), training_share=training_share) @staticmethod def randomly_grouped_by_directory(training_share: float=0.9) -> Callable[([List[LabeledExample]], Tuple[(List[LabeledExample], List[LabeledExample])])]: return TrainingTestSplit.randomly_grouped_by((lambda e: e.audio_directory), training_share=training_share) @staticmethod def overfit(training_example_count: int) -> Callable[([List[LabeledExample]], Tuple[(List[LabeledExample], List[LabeledExample])])]: return (lambda examples: (examples[:training_example_count], examples[training_example_count:])) @staticmethod def by_directory(test_directory_name: str='test') -> Callable[([List[LabeledExampleFromFile]], Tuple[(List[LabeledExampleFromFile], List[LabeledExampleFromFile])])]: def split(examples: List[LabeledExampleFromFile]) -> Tuple[(List[LabeledExampleFromFile], List[LabeledExampleFromFile])]: training_examples = [example for example in examples if (example.audio_directory.name != test_directory_name)] test_examples = [example for example in examples if (example.audio_directory.name == test_directory_name)] return (training_examples, test_examples) return split
def _cache_spectrogram(labeled_spectrogram: CachedLabeledSpectrogram) -> None: labeled_spectrogram.z_normalized_transposed_spectrogram()
def _repair_cached_spectrogram_if_incorrect(labeled_spectrogram: CachedLabeledSpectrogram) -> None: labeled_spectrogram.repair_cached_file_if_incorrect()
class LabeledSpectrogramBatchGenerator(): def __init__(self, corpus: Corpus, spectrogram_cache_directory: Path, batch_size: int=64): mkdir(spectrogram_cache_directory) self.batch_size = batch_size self.spectrogram_cache_directory = spectrogram_cache_directory self.labeled_training_spectrograms = [CachedLabeledSpectrogram(example, spectrogram_cache_directory=spectrogram_cache_directory) for example in corpus.training_examples] self.labeled_test_spectrograms = [CachedLabeledSpectrogram(example, spectrogram_cache_directory=spectrogram_cache_directory) for example in corpus.test_examples] self.labeled_spectrograms = (self.labeled_training_spectrograms + self.labeled_test_spectrograms) def preview_batch(self) -> List[LabeledSpectrogram]: return self.labeled_spectrograms[:self.batch_size] def training_batches(self) -> Iterable[List[LabeledSpectrogram]]: while True: (yield random.sample(self.labeled_training_spectrograms, self.batch_size)) def test_batches(self) -> Iterable[List[LabeledSpectrogram]]: return paginate(self.labeled_test_spectrograms, self.batch_size) def fill_cache(self, repair_incorrect: bool=False) -> None: with Pool(processes=multiprocessing.cpu_count()) as pool: total = len(self.labeled_spectrograms) not_yet_cached = [s for s in self.labeled_spectrograms if (not s.is_cached())] to_calculate = (self.labeled_spectrograms if repair_incorrect else not_yet_cached) log('Filling cache with {} spectrograms: {} already cached, {} to calculate.'.format(total, (total - len(not_yet_cached)), len(to_calculate))) for (index, labeled_spectrogram) in enumerate(to_calculate): pool.apply_async((_repair_cached_spectrogram_if_incorrect if repair_incorrect else _cache_spectrogram), (labeled_spectrogram,)) pool.close() pool.join()
class LibriSpeechCorpus(Corpus): def __init__(self, base_directory: Path, corpus_name: str, base_source_url_or_directory: str='http://www.openslr.org/resources/12/', tar_gz_extension: str='.tar.gz', mel_frequency_count: int=128, root_compressed_directory_name_to_skip: Optional[str]='LibriSpeech/', subdirectory_depth: int=3, allowed_characters: List[chr]=english_frequent_characters, tags_to_ignore: Iterable[str]=list(), id_filter_regex=re.compile('[\\s\\S]*'), training_test_split: Callable[([List[LabeledExample]], Tuple[(List[LabeledExample], List[LabeledExample])])]=TrainingTestSplit.randomly(), maximum_example_duration_in_s: Optional[int]=None, minimum_duration_per_character: Optional[float]=None): self.minimum_duration_per_character_in_s = minimum_duration_per_character self.maximum_example_duration_in_s = maximum_example_duration_in_s self.training_test_split = training_test_split self.id_filter_regex = id_filter_regex self.tags_to_ignore = tags_to_ignore self.allowed_characters = allowed_characters self.subdirectory_depth = subdirectory_depth self.root_compressed_directory_name_to_skip = root_compressed_directory_name_to_skip self.base_directory = base_directory self.base_url_or_directory = base_source_url_or_directory self.tar_gz_extension = tar_gz_extension self.mel_frequency_count = mel_frequency_count self.corpus_name = corpus_name mkdir(base_directory) self.corpus_directory = self._download_and_unpack_if_not_yet_done(corpus_name=corpus_name) directories = [self.corpus_directory] for i in range(self.subdirectory_depth): directories = [subdirectory for directory in directories for subdirectory in directory.iterdir() if subdirectory.is_dir()] self.files = [file for directory in directories for file in directory.iterdir() if file.is_file()] self.unfiltered_audio_files = [file for file in self.files if (file.name.lower().endswith('.flac') or file.name.lower().endswith('.wav'))] audio_files = [file for file in self.unfiltered_audio_files if self.id_filter_regex.match(name_without_extension(file))] self.filtered_out_count = (len(self.unfiltered_audio_files) - len(audio_files)) positional_label_by_id = self._extract_positional_label_by_id(self.files) found_audio_ids = set((name_without_extension(f) for f in audio_files)) found_label_ids = positional_label_by_id.keys() self.audio_ids_without_label = list((found_audio_ids - found_label_ids)) self.label_ids_without_audio = list((found_label_ids - found_audio_ids)) def example(audio_file: Path) -> LabeledExample: id = name_without_extension(audio_file) def correct_whitespace(text: str) -> str: return ' '.join(text.split()).strip() def correct(label: str) -> str: return correct_whitespace(self._remove_tags_to_ignore(label)) original_positional_label = positional_label_by_id[id] has_positions = isinstance(original_positional_label, PositionalLabel) positional_label = (original_positional_label.with_corrected_labels(correct).convert_range_to_seconds(LabeledExampleFromFile.file_sample_rate(audio_file)) if has_positions else None) return LabeledExampleFromFile(audio_file, mel_frequency_count=self.mel_frequency_count, label=(positional_label.label if has_positions else correct(original_positional_label)), label_with_tags=(original_positional_label.label if has_positions else original_positional_label), positional_label=positional_label) self.examples_with_empty_and_too_long_or_short = [example(file) for file in audio_files if (name_without_extension(file) in positional_label_by_id.keys())] self.examples_with_too_long_or_short = [e for e in self.examples_with_empty_and_too_long_or_short if e.label] self.examples_with_too_short = [e for e in self.examples_with_too_long_or_short if (not self.is_too_long(e))] examples = [e for e in self.examples_with_too_short if (not self.is_too_short(e))] (training_examples, test_examples) = self.training_test_split(sorted(examples, key=(lambda x: x.id))) super().__init__(training_examples=training_examples, test_examples=test_examples) def is_too_long(self, example: LabeledExample) -> bool: return ((self.maximum_example_duration_in_s is not None) and (example.duration_in_s > self.maximum_example_duration_in_s)) def is_too_short(self, example: LabeledExample) -> bool: return ((self.minimum_duration_per_character_in_s is not None) and (example.duration_in_s < (len(example.label) * self.minimum_duration_per_character_in_s))) def _remove_tags_to_ignore(self, text: str) -> str: return reduce((lambda text, tag: text.replace(tag, '')), self.tags_to_ignore, text) def _download_and_unpack_if_not_yet_done(self, corpus_name: str) -> Path: file_name = (corpus_name + self.tar_gz_extension) file_url_or_path = (self.base_url_or_directory + file_name) target_directory = (self.base_directory / corpus_name) if (not target_directory.exists()): tar_file = self._download_if_not_yet_done(file_url_or_path, (self.base_directory / file_name)) self._unpack_tar_if_not_yet_done(tar_file, target_directory=target_directory) return target_directory def _unpack_tar_if_not_yet_done(self, tar_file: Path, target_directory: Path): if (not target_directory.is_dir()): with tarfile.open(str(tar_file), 'r:gz') as tar: tar.extractall(str(target_directory), members=self._tar_members_root_directory_skipped_if_specified(tar)) def _tar_members_root_directory_skipped_if_specified(self, tar: TarFile) -> List[TarInfo]: members = tar.getmembers() if (self.root_compressed_directory_name_to_skip is not None): for member in members: member.name = member.name.replace(self.root_compressed_directory_name_to_skip, '') return members def _download_if_not_yet_done(self, source_path_or_url: str, target_path: Path) -> Path: if (not target_path.is_file()): log('Downloading corpus {} to {}'.format(source_path_or_url, target_path)) if self.base_url_or_directory.startswith('http'): request.urlretrieve(source_path_or_url, str(target_path)) else: try: subprocess.check_output(['scp', source_path_or_url, str(target_path)], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: raise IOError(('Copying failed: ' + str(e.output))) return target_path def _extract_positional_label_by_id(self, files: Iterable[Path]) -> Dict[(str, Union[(PositionalLabel, str)])]: label_files = [file for file in files if file.name.endswith('.txt')] positional_label_by_id = OrderedDict() for label_file in label_files: with label_file.open() as f: for line in f.readlines(): parts = line.split() id = parts[0] label = ' '.join(parts[1:]) positional_label_by_id[id] = label.lower() return positional_label_by_id def is_allowed(self, label: str) -> bool: return all(((c in self.allowed_characters) for c in label)) def csv_rows(self): return [[self.corpus_name, self.file_type_summary, len(self.unfiltered_audio_files), self.filtered_out_count, self.id_filter_regex, len(self.audio_ids_without_label), str(self.audio_ids_without_label[:10]), len(self.label_ids_without_audio), self.label_ids_without_audio[:10], self.tag_summary, len(self.examples), len(self.invalid_examples_texts), self.invalid_examples_summary, len(self.empty_examples), [e.id for e in self.empty_examples[:10]], self.duplicate_label_count, self.most_duplicated_labels, len(self.training_examples), len(self.test_examples), len(self.examples_without_positional_labels), self.total_duration_in_h, self.total_training_duration_in_h, self.total_test_duration_in_h, self.total_duration_of_too_long_examples_in_h, len(self.too_long_examples), len(self.too_short_examples), [e.id for e in self.too_short_examples]]] def summary(self) -> str: description = 'File types: {}\n{}{}{}{}{}{} extracted examples, of them {} invalid, {} empty (will be excluded), {} too long, {} too short, {} duplicate, {} without positions.\n{} training examples, {} test examples.'.format(self.file_type_summary, ('Out of {} audio files, {} were excluded by regex {}\n'.format(len(self.unfiltered_audio_files), self.filtered_out_count, self.id_filter_regex) if (self.filtered_out_count > 0) else ''), ('{} audio files without matching label; will be excluded, e. g. {}.\n'.format(len(self.audio_ids_without_label), self.audio_ids_without_label[:10]) if (len(self.audio_ids_without_label) > 0) else ''), ('{} labels without matching audio file; will be excluded, e. g. {}.\n'.format(len(self.label_ids_without_audio), self.label_ids_without_audio[:10]) if (len(self.label_ids_without_audio) > 0) else ''), ('Removed label tags: {}\n'.format(self.tag_summary) if (self.tag_summary != '') else ''), self.invalid_examples_summary, len(self.examples), len(self.invalid_examples_texts), len(self.empty_examples), len(self.too_long_examples), len(self.too_short_examples), self.duplicate_label_count, len(self.examples_without_positional_labels), len(self.training_examples), len(self.test_examples)) return ((self.corpus_name + '\n') + '\n'.join((('\t' + line) for line in description.splitlines()))) @lazy def invalid_examples_summary(self): return ''.join([(e + '\n') for e in self.invalid_examples_texts]) @lazy def original_sample_rate_summary(self): return count_summary(self.some_original_sample_rates) @lazy def tag_summary(self): return count_summary(self.tags_from_all_examples) @lazy def file_type_summary(self): return count_summary(self.file_extensions) @lazy def invalid_examples_texts(self): return ['Invalid characters {} in {}'.format(distinct([c for c in e.label if (c not in self.allowed_characters)]), str(e)) for e in self.examples if (not self.is_allowed(e.label))] @lazy def some_original_sample_rates(self): return [e.original_sample_rate for e in random.sample(self.examples, min(50, len(self.examples)))] @lazy def file_extensions(self): return [extension(file) for file in self.corpus_directory.glob('**/*.*') if file.is_file()] @lazy def empty_examples(self): return [e for e in self.examples_with_empty_and_too_long_or_short if (not e.label)] @lazy def too_long_examples(self): return [e for e in self.examples_with_too_long_or_short if self.is_too_long(e)] @lazy def too_short_examples(self): return [e for e in self.examples_with_too_short if self.is_too_short(e)] @lazy def duplicate_label_count(self): return (len(self.examples) - len(set((e.label for e in self.examples)))) @lazy def most_duplicated_labels(self): return Counter([e.label for e in self.examples]).most_common(10) @lazy def tags_from_all_examples(self): return [counted_tag for e in self.examples for tag in self.tags_to_ignore for counted_tag in ([tag] * e.tag_count(tag))] @lazy def total_training_duration_in_h(self): return (sum((e.duration_in_s for e in self.training_examples)) / 3600) @lazy def total_test_duration_in_h(self): return (sum((e.duration_in_s for e in self.test_examples)) / 3600) @lazy def total_duration_in_h(self): return (sum((e.duration_in_s for e in self.examples)) / 3600) @lazy def total_duration_of_too_long_examples_in_h(self): return (sum((e.duration_in_s for e in self.too_long_examples)) / 3600) @lazy def examples_without_positional_labels(self): return [e for e in self.examples if (not e.positional_label)]
def dev_clean(base_directory: Path) -> LibriSpeechCorpus: return LibriSpeechCorpus(base_directory=base_directory, corpus_name='dev-clean', training_test_split=TrainingTestSplit.training_only)
def english_corpus(base_directory: Path) -> ComposedCorpus: return ComposedCorpus([dev_clean(base_directory), LibriSpeechCorpus(base_directory=base_directory, corpus_name='dev-other', training_test_split=TrainingTestSplit.training_only), LibriSpeechCorpus(base_directory=base_directory, corpus_name='train-clean-100', training_test_split=TrainingTestSplit.training_only), LibriSpeechCorpus(base_directory=base_directory, corpus_name='train-clean-360', training_test_split=TrainingTestSplit.training_only), LibriSpeechCorpus(base_directory=base_directory, corpus_name='train-other-500', training_test_split=TrainingTestSplit.training_only), LibriSpeechCorpus(base_directory=base_directory, corpus_name='test-clean', training_test_split=TrainingTestSplit.test_only)])
def minimal_english_corpus(base_directory: Path) -> ComposedCorpus: return ComposedCorpus([dev_clean(base_directory)])
class UmlautDecoder(): none = (lambda text: text) quote_before_umlaut = (lambda text: text.replace('\\"a', 'ä').replace('\\"o', 'ö').replace('\\"u', 'ü').replace('\\"s', 'ß').replace('"a', 'ä').replace('"o', 'ö').replace('"u', 'ü').replace('"s', 'ß')) quote_after_umlaut = (lambda text: text.replace('a\\"', 'ä').replace('o\\"', 'ö').replace('u\\"', 'ü').replace('s\\"', 'ß').replace('a"', 'ä').replace('o"', 'ö').replace('u"', 'ü').replace('s"', 'ß')) try_quote_before_umlaut_then_after = (lambda text: UmlautDecoder.quote_after_umlaut(UmlautDecoder.quote_before_umlaut(text)))
class GermanClarinCorpus(LibriSpeechCorpus): '\n Parses the labeled German speech data downloadable from https://clarin.phonetik.uni-muenchen.de/BASRepository/.\n ' def __init__(self, corpus_name: str, base_directory: Path, base_source_url_or_directory: str='ketos:/projects/korpora/speech/', umlaut_decoder: Callable[([str], str)]=UmlautDecoder.quote_before_umlaut, tar_gz_extension: str='.tgz', mel_frequency_count: int=128, root_compressed_directory_name_to_skip: Optional[str]=None, subdirectory_depth: int=2, tags_to_ignore: Iterable[str]=_tags_to_ignore, id_filter_regex=re.compile('[\\s\\S]*'), training_test_split: Callable[([List[LabeledExample]], Tuple[(List[LabeledExample], List[LabeledExample])])]=TrainingTestSplit.randomly_grouped_by_directory()): self.umlaut_decoder = umlaut_decoder log('Parsing corpus {}...'.format(corpus_name)) super().__init__(base_directory=base_directory, base_source_url_or_directory=base_source_url_or_directory, corpus_name=corpus_name, tar_gz_extension=tar_gz_extension, root_compressed_directory_name_to_skip=root_compressed_directory_name_to_skip, subdirectory_depth=subdirectory_depth, allowed_characters=german_frequent_characters, tags_to_ignore=tags_to_ignore, id_filter_regex=id_filter_regex, mel_frequency_count=mel_frequency_count, training_test_split=training_test_split, maximum_example_duration_in_s=35, minimum_duration_per_character=(((2 * 2) * 128) / 16000)) def _extract_positional_label_by_id(self, files: Iterable[Path]) -> Dict[(str, Union[(PositionalLabel, str)])]: json_ending = '_annot.json' json_annotation_files = [file for file in files if (file.name.endswith(json_ending) and self.id_filter_regex.match(file.name[:(- len(json_ending))]))] json_extracted = OrderedDict(((file.name[:(- len(json_ending))], self._extract_positional_label_from_json(file)) for file in json_annotation_files)) par_annotation_files = [file for file in files if (file.name.lower().endswith('.par') and self.id_filter_regex.match(name_without_extension(file).lower()))] extracted = OrderedDict(((name_without_extension(file), self._extract_label_from_par(file)) for file in par_annotation_files)) for key in set(extracted.keys()).intersection(set(json_extracted.keys())): json = json_extracted[key] json_label = (json if isinstance(json, str) else json.label) if (extracted[key] != json_label): log('{}: "{}" extracted from par differ from json "{}"'.format(key, extracted[key], json_label)) extracted.update(json_extracted) if ('ALC' in self.corpus_name): correctly_labeled_id_marker = '_h_' empty_labeled_id_marker = '_m_' correct_ids = [id for id in extracted.keys() if (correctly_labeled_id_marker in id)] for correct_id in correct_ids: empty_labeled_id = correct_id.replace(correctly_labeled_id_marker, empty_labeled_id_marker) extracted[empty_labeled_id] = extracted[correct_id] return extracted def _extract_positional_label_from_json(self, json_file: Path) -> Union[(PositionalLabel, str)]: json_text = read_text(json_file, encoding='utf8') try: j = json.loads(json_text) levels = j['levels'] def words_with_id_for_labels(label_names: Set[str]) -> List[Tuple[(str, int)]]: def is_level_empty(level: json) -> bool: return (len(level['items']) == 0) def is_level_useful(level: json) -> bool: if is_level_empty(level): return False return any([label for label in level['items'][0]['labels'] if (label['name'] in label_names)]) def word_with_id(transcription: json) -> Tuple[(str, int)]: labels = transcription['labels'] matching_labels = [label for label in labels if (label['name'] in label_names)] if (len(matching_labels) == 0): raise Exception('No matching label names, found {} instead.'.format([label['name'] for label in labels])) matching_label = single(matching_labels) return (matching_label['value'], transcription['id']) words_with_id = single_or_none([[word_with_id(transcription) for transcription in level['items']] for level in levels if is_level_useful(level)]) if (words_with_id is None): return [] return words_with_id words_with_id = words_with_id_for_labels(label_names={'ORT', 'word'}) tr2_words_with_id = words_with_id_for_labels(label_names={'TR2'}) ids = [id for (word, id) in words_with_id] words = self._merge_transcriptions_and_decode(words=[word for (word, id) in words_with_id], tr2_words=[word for (word, id) in tr2_words_with_id]) segment_ids_by_word_id = group(j['links'], key=(lambda link: link['fromID']), value=(lambda link: link['toID'])) def sampel_range_by_segment_id(level_names: Iterable[str]) -> Dict[(int, Tuple[(int, int)])]: return OrderedDict(((segment['id'], (segment['sampleStart'], ((segment['sampleStart'] + segment['sampleDur']) + 1))) for level in levels if ((level['type'] == 'SEGMENT') and (level['name'] in level_names)) for segment in level['items'])) mas_sample_range_by_segment_id = sampel_range_by_segment_id(level_names=('MAS',)) mau_sample_range_by_segment_id = sampel_range_by_segment_id(level_names=('MAU',)) pho_sample_range_by_segment_id = sampel_range_by_segment_id(level_names=('PHO', 'phonetic')) def sampel_ranges_by_word_id(id: int) -> List[Tuple[(int, int)]]: segment_ids = (segment_ids_by_word_id[id] if (id in segment_ids_by_word_id) else []) def a(x): return [x[segment_id] for segment_id in segment_ids if (segment_id in x)] mas_sample_ranges = a(mas_sample_range_by_segment_id) mau_sample_ranges = a(mau_sample_range_by_segment_id) pho_sample_ranges = a(pho_sample_range_by_segment_id) return (pho_sample_ranges if pho_sample_ranges else (mas_sample_ranges if mas_sample_ranges else mau_sample_ranges)) def merge_consecutive_ranges(ranges: List[Tuple[(int, int)]]) -> Tuple[(int, int)]: def is_not_empty(range: Tuple[(int, int)]): return ((range[0] + 1) != range[1]) s = sorted((range for range in ranges if is_not_empty(range)), key=(lambda range: range[0]))[:(- 1)] for (index, range) in enumerate(s): next_range = ranges[(index + 1)] if (range[1] != next_range[0]): log('Ranges {} of a word are not consecutive.'.format(s)) return (ranges[0][0], ranges[(- 1)][1]) def sample_range_or_none_by_word_id(id: int): ranges = sampel_ranges_by_word_id(id) return (merge_consecutive_ranges(ranges) if ranges else None) words_with_ranges = [(word, sample_range_or_none_by_word_id(id)) for (word, id) in zip(words, ids)] if ((len(words_with_ranges) == 0) or any(((range is None) for (word, range) in words_with_ranges))): return ' '.join((word for (word, range) in words_with_ranges)) return PositionalLabel(words_with_ranges) except Exception: raise ParsingException('Error parsing annotation {}: {}'.format(json_file, json_text[:500])) def _extract_label_from_par(self, par_file: Path) -> str: par_text = '' try: par_text = read_text(par_file, encoding='utf8') def words_for_label(label_name: str): return [line.split('\t')[(- 1)] for line in par_text.splitlines() if line.startswith(label_name)] return ' '.join(self._merge_transcriptions_and_decode(words_for_label('ORT'), words_for_label('TR2'))) except Exception: raise ParsingException('Error parsing annotation {}: {}'.format(par_file, par_text[:500])) def _merge_transcriptions_and_decode(self, words: List[str], tr2_words: List[str]) -> List[str]: usb_tag = '<usb>' def clean_tr2(tr2_word): return tr2_word.replace('<Ger"ausch>', '').replace('<geräusch>', '').replace('<#>', '') if (len(words) > 0): if (words[0] == usb_tag): words[0] = clean_tr2(tr2_words[0]) if (words[(- 1)] == usb_tag): if (len(tr2_words) != len(words)): raise ParsingException('TR2 word count differs.') words[(- 1)] = clean_tr2(tr2_words[(- 1)]) return [self._correct_german(word) for word in words] def _correct_german(self, text: str) -> str: return self.umlaut_decoder(text.lower().replace('é', 'e').replace('xe4', 'ä').replace('.', ' ').replace('-', ' '))
def clarin_corpora_sorted_by_size(base_directory: Path) -> List[GermanClarinCorpus]: return [sc1(base_directory), pd2(base_directory), ziptel(base_directory), sc10(base_directory), GermanClarinCorpus('all.HEMPEL.4.cmdi.11610.1490680796', base_directory), GermanClarinCorpus('all.PD1.3.cmdi.16312.1490681066', base_directory), GermanClarinCorpus('all.VM1.3.cmdi.1508.1490625070', base_directory, id_filter_regex=vm1_id_german_filter_regex, training_test_split=TrainingTestSplit.training_only), GermanClarinCorpus('all.RVG-J.1.cmdi.18181.1490681704', base_directory), GermanClarinCorpus('all.ALC.4.cmdi.16602.1490632862', base_directory, training_test_split=TrainingTestSplit.randomly_grouped_by((lambda e: e.id[:3]))), GermanClarinCorpus('all.VM2.3.cmdi.4260.1490625316', base_directory, id_filter_regex=vm2_id_german_filter_regex, training_test_split=TrainingTestSplit.training_only)]
def sc1(base_directory: Path) -> GermanClarinCorpus: return GermanClarinCorpus('all.SC1.3.cmdi.15010.1490631864', base_directory, umlaut_decoder=UmlautDecoder.quote_after_umlaut, training_test_split=TrainingTestSplit.test_only)
def pd2(base_directory: Path) -> GermanClarinCorpus: return GermanClarinCorpus('all.PD2.4.cmdi.16693.1490681127', base_directory)
def ziptel(base_directory: Path) -> GermanClarinCorpus: return GermanClarinCorpus('all.ZIPTEL.3.cmdi.63058.1490624016', base_directory)
def sc10(base_directory: Path, training_test_split=TrainingTestSplit.test_only) -> GermanClarinCorpus: return GermanClarinCorpus('all.SC10.4.cmdi.13781.1490631055', base_directory, umlaut_decoder=UmlautDecoder.try_quote_before_umlaut_then_after, training_test_split=training_test_split, id_filter_regex=sc10_broken_label_filter_regex)
class GermanVoxforgeCorpus(GermanClarinCorpus): def __init__(self, base_directory: Path): super().__init__(corpus_name='german-speechdata-package-v2', base_directory=base_directory, base_source_url_or_directory='http://www.repository.voxforge1.org/downloads/de/', tar_gz_extension='.tar.gz', subdirectory_depth=1, umlaut_decoder=UmlautDecoder.none, training_test_split=TrainingTestSplit.by_directory(), tags_to_ignore=[], id_filter_regex=re.compile('(?!^2014-03-24-13-39-24_Kinect-RAW)(?!^2014-03-27-11-50-33_Kinect-RAW)(?!^2014-03-18-15-34-19_Realtek)(?!^2014-06-17-13-46-27_Kinect-RAW)(?!^2014-06-17-13-46-27_Realtek)(?!^2014-06-17-13-46-27_Samson)(?!^2014-06-17-13-46-27_Yamaha)(^.*$)')) def _extract_positional_label_by_id(self, files: Iterable[Path]) -> Dict[(str, Union[(PositionalLabel, str)])]: xml_ending = '.xml' microphone_endings = ['_Yamaha', '_Kinect-Beam', '_Kinect-RAW', '_Realtek', '_Samson', '_Microsoft-Kinect-Raw'] xml_files = [file for file in files if file.name.endswith(xml_ending) if self.id_filter_regex.match(name_without_extension(file))] return OrderedDict((((name_without_extension(file) + microphone_ending), self._extract_label_from_xml(file)) for file in xml_files for microphone_ending in microphone_endings if (Path(file.parent) / ((name_without_extension(file) + microphone_ending) + '.wav')).exists())) def _correct_german(self, text: str) -> str: return super()._correct_german(text).replace('co2', 'co zwei').replace('ț', 't').replace('š', 's').replace('č', 'c').replace('ę', 'e').replace('ō', 'o').replace('á', 'a').replace('í', 'i').replace('ł', 'l').replace('à', 'a').replace('ė', 'e').replace('ú', 'u') def _extract_label_from_xml(self, xml_file: Path) -> str: try: return self._correct_german(ElementTree.parse(str(xml_file)).getroot().find('.//cleaned_sentence').text.lower()) except Exception: raise ParsingException('Error parsing annotation {}'.format(xml_file))
def german_corpus(base_directory: Path) -> ComposedCorpus: return ComposedCorpus((clarin_corpora_sorted_by_size(base_directory=base_directory) + [GermanVoxforgeCorpus(base_directory=base_directory)]))
class SpectrogramFrequencyScale(Enum): linear = 'linear' mel = 'mel'
class SpectrogramType(Enum): power = 'power' amplitude = 'amplitude' power_level = 'power level'
def z_normalize(array: ndarray) -> ndarray: return ((array - mean(array)) / std(array))
class PositionalLabel(): def __init__(self, labeled_sections: List[Tuple[(str, Tuple[(float, float)])]]): if (not labeled_sections): raise ValueError('Sections must be specified.') if any(((range is None) for (label, range) in labeled_sections)): raise ValueError('Range must be specified.') self.labeled_sections = labeled_sections self.labels = [word for (word, range) in labeled_sections] self.label = ' '.join((word for word in self.labels)) def convert_range_to_seconds(self, original_sample_rate: int) -> 'PositionalLabel': return PositionalLabel(list(((word, ((start_end[0] / original_sample_rate), (start_end[1] / original_sample_rate))) for (word, start_end) in self.labeled_sections))) def with_corrected_labels(self, correction: Callable[([str], str)]) -> 'PositionalLabel': return PositionalLabel([(correction(section), range) for (section, range) in self.labeled_sections]) def serialize(self) -> str: return '\n'.join(('{}|{}|{}'.format(label, start, end) for (label, (start, end)) in self.labeled_sections)) @staticmethod def deserialize(serialized: str) -> 'PositionalLabel': return PositionalLabel(list(((label, (float(start), float(end))) for (label, start, end) in map((lambda item: item.split('|')), serialized.splitlines()))))
class LabeledSpectrogram(): __metaclass__ = ABCMeta def __init__(self, id: str, label: str): self.label = label self.id = id @abstractmethod def z_normalized_transposed_spectrogram(self) -> ndarray: raise NotImplementedError
class LabeledExample(LabeledSpectrogram): def __init__(self, get_raw_audio: Callable[([], ndarray)], sample_rate: int=16000, id: Optional[str]=None, label: Optional[str]='nolabel', fourier_window_length: int=512, hop_length: int=128, mel_frequency_count: int=128, label_with_tags: str=None, positional_label: Optional[PositionalLabel]=None): super().__init__(id=id, label=label) self.get_raw_audio = get_raw_audio self.sample_rate = sample_rate self.fourier_window_length = fourier_window_length self.hop_length = hop_length self.mel_frequency_count = mel_frequency_count self.label_with_tags = label_with_tags self.positional_label = positional_label def tag_count(self, tag: str) -> int: return self.label_with_tags.count(tag) def _power_spectrogram(self) -> ndarray: return (self._amplitude_spectrogram() ** 2) def _amplitude_spectrogram(self) -> ndarray: return abs(self._complex_spectrogram()) def _complex_spectrogram(self) -> ndarray: return librosa.stft(y=self.get_raw_audio(), n_fft=self.fourier_window_length, hop_length=self.hop_length) def mel_frequencies(self) -> List[float]: return librosa.mel_frequencies((self.mel_frequency_count + 2), fmax=(self.sample_rate / 2)) def _convert_spectrogram_to_mel_scale(self, linear_frequency_spectrogram: ndarray) -> ndarray: return dot(librosa.filters.mel(sr=self.sample_rate, n_fft=self.fourier_window_length, n_mels=self.mel_frequency_count), linear_frequency_spectrogram) def highest_detectable_frequency(self) -> float: return (self.sample_rate / 2) def spectrogram(self, type: SpectrogramType=SpectrogramType.power_level, frequency_scale: SpectrogramFrequencyScale=SpectrogramFrequencyScale.linear) -> ndarray: def spectrogram_by_type(): if (type == SpectrogramType.power): return self._power_spectrogram() if (type == SpectrogramType.amplitude): return self._amplitude_spectrogram() if (type == SpectrogramType.power_level): return self._power_level_from_power_spectrogram(self._power_spectrogram()) raise ValueError(type) s = spectrogram_by_type() return (self._convert_spectrogram_to_mel_scale(s) if (frequency_scale == SpectrogramFrequencyScale.mel) else s) def z_normalized_transposed_spectrogram(self): '\n :return: Array with shape (time, frequencies)\n ' return z_normalize(self.spectrogram(frequency_scale=SpectrogramFrequencyScale.mel).T) def frequency_count_from_spectrogram(self, spectrogram: ndarray) -> int: return spectrogram.shape[0] def time_step_count(self) -> int: return self.spectrogram().shape[1] def time_step_rate(self) -> float: return (self.time_step_count() / self.duration_in_s) @staticmethod def _power_level_from_power_spectrogram(spectrogram: ndarray) -> ndarray: def power_to_decibel(x, min_decibel: float=(- 150)) -> float: if (x == 0): return min_decibel l = (10 * math.log10(x)) return (min_decibel if (l < min_decibel) else l) return vectorize(power_to_decibel)(spectrogram) def reconstructed_audio_from_spectrogram(self) -> ndarray: return librosa.istft(self._complex_spectrogram(), win_length=self.fourier_window_length, hop_length=self.hop_length) @lazy def duration_in_s(self) -> float: return (len(self.get_raw_audio()) / self.sample_rate) def __str__(self) -> str: return (self.id + (': {}'.format(self.label) if self.label else ''))
class LabeledExampleFromFile(LabeledExample): def __init__(self, audio_file: Path, id: Optional[str]=None, sample_rate_to_convert_to: int=16000, label: Optional[str]='nolabel', fourier_window_length: int=512, hop_length: int=128, mel_frequency_count: int=128, label_with_tags: str=None, positional_label: Optional[PositionalLabel]=None): if (id is None): id = name_without_extension(audio_file) self.audio_file = audio_file super().__init__(id=id, get_raw_audio=(lambda : librosa.load(str(self.audio_file), sr=self.sample_rate)[0]), label=label, sample_rate=sample_rate_to_convert_to, fourier_window_length=fourier_window_length, hop_length=hop_length, mel_frequency_count=mel_frequency_count, label_with_tags=label_with_tags, positional_label=positional_label) @property def audio_directory(self): return Path(self.audio_file.parent) @lazy def original_sample_rate(self) -> int: return LabeledExampleFromFile.file_sample_rate(self.audio_file) @staticmethod def file_sample_rate(audio_file: Path) -> int: with audioread.audio_open(os.path.realpath(str(audio_file))) as input_file: return input_file.samplerate @lazy def duration_in_s(self) -> float: try: return librosa.get_duration(filename=str(self.audio_file)) except Exception as e: log('Failed to get duration of {}: {}'.format(self.audio_file, e)) return 0 def sections(self) -> Optional[List[LabeledExample]]: if (self.positional_label is None): return None audio = self.get_raw_audio() def section(label, start, end): return LabeledExample(get_raw_audio=(lambda : audio[int((start * self.sample_rate)):int((end * self.sample_rate))]), label=label, sample_rate=self.sample_rate, fourier_window_length=self.fourier_window_length, hop_length=self.hop_length, mel_frequency_count=self.mel_frequency_count) return [section(label, start, end) for (label, (start, end)) in self.positional_label.labeled_sections]
class CachedLabeledSpectrogram(LabeledSpectrogram): def __init__(self, original: LabeledSpectrogram, spectrogram_cache_directory: Path): super().__init__(id=original.id, label=original.label) self.original = original self.spectrogram_cache_file = (spectrogram_cache_directory / '{}.npy'.format(original.id)) def z_normalized_transposed_spectrogram(self) -> ndarray: if (not self.is_cached()): return self._calculate_and_save_spectrogram() return self._load_from_cache() def _load_from_cache(self): try: return numpy.load(str(self.spectrogram_cache_file)) except ValueError: log('Recalculating cached file {} because loading failed.'.format(self.spectrogram_cache_file)) return self._calculate_and_save_spectrogram() def _calculate_and_save_spectrogram(self): spectrogram = self.original.z_normalized_transposed_spectrogram() self._save_to_cache(spectrogram) return spectrogram def _save_to_cache(self, spectrogram: ndarray): numpy.save(str(self.spectrogram_cache_file), spectrogram) def is_cached(self): return self.spectrogram_cache_file.exists() def repair_cached_file_if_incorrect(self): if (not self.is_cached()): self._calculate_and_save_spectrogram() return from_cache = self._load_from_cache() calculated = self.original.z_normalized_transposed_spectrogram() try: numpy.testing.assert_almost_equal(calculated, from_cache, decimal=1) except AssertionError as e: self.move_incorrect_cached_file_to_backup_location_and_save_error(str(e)) self._save_to_cache(calculated) def move_incorrect_cached_file_to_backup_location_and_save_error(self, error_text: str): parent_directory = Path(self.spectrogram_cache_file.parent) incorrect_cached_backup_directory = Path((parent_directory.parent / (parent_directory.name + '-incorrect'))) mkdir(incorrect_cached_backup_directory) incorrect_backup_file = (incorrect_cached_backup_directory / self.spectrogram_cache_file.name) incorrect_backup_message_file = (incorrect_cached_backup_directory / (name_without_extension(self.spectrogram_cache_file) + '-error.txt')) write_text(incorrect_backup_message_file, error_text) self.spectrogram_cache_file.rename(incorrect_backup_file)
class LabeledExamplePlotter(): def __init__(self, example: LabeledExample): self.example = example def _plot_audio(self, audio: ndarray) -> None: plt.title(str(self)) plt.xlabel('time / samples (sample rate {}Hz)'.format(self.example.sample_rate)) plt.ylabel('y') plt.plot(audio) plt.show() def show_spectrogram(self, type: SpectrogramType=SpectrogramType.power_level): self.prepare_spectrogram_plot(type) plt.show() def save_spectrogram(self, target_directory: Path, type: SpectrogramType=SpectrogramType.power_level, frequency_scale: SpectrogramFrequencyScale=SpectrogramFrequencyScale.linear) -> Path: self.prepare_spectrogram_plot(type, frequency_scale) path = Path(target_directory, '{}_{}{}_spectrogram.png'.format(self.example.id, ('mel_' if (frequency_scale == SpectrogramFrequencyScale.mel) else ''), type.value.replace(' ', '_'))) plt.savefig(str(path)) return path def plot_raw_audio(self) -> None: self._plot_audio(self.example.get_raw_audio()) def prepare_spectrogram_plot(self, type: SpectrogramType=SpectrogramType.power_level, frequency_scale: SpectrogramFrequencyScale=SpectrogramFrequencyScale.linear) -> None: spectrogram = self.example.spectrogram(type, frequency_scale=frequency_scale) (figure, axes) = plt.subplots(1, 1) use_mel = (frequency_scale == SpectrogramFrequencyScale.mel) plt.title('\n'.join(wrap('{0}{1} spectrogram for {2}'.format(('mel ' if use_mel else ''), type.value, str(self)), width=100))) plt.xlabel('time (data every {}ms)'.format(round((1000 / self.example.time_step_rate())))) plt.ylabel('frequency (data evenly distributed on {} scale, {} total)'.format(frequency_scale.value, self.example.frequency_count_from_spectrogram(spectrogram))) mel_frequencies = self.example.mel_frequencies() plt.imshow(spectrogram, cmap='gist_heat', origin='lower', aspect='auto', extent=[0, self.example.duration_in_s, (librosa.hz_to_mel(mel_frequencies[0])[0] if use_mel else 0), (librosa.hz_to_mel(mel_frequencies[(- 1)])[0] if use_mel else self.example.highest_detectable_frequency())]) plt.colorbar(label='{} ({})'.format(type.value, ('in{} dB, not aligned to a particular base level'.format((' something similar to' if use_mel else '')) if (type == SpectrogramType.power_level) else 'only proportional to physical scale'))) class ScalarFormatterWithUnit(ScalarFormatter): def __init__(self, unit: str): super().__init__() self.unit = unit def __call__(self, x, pos=None) -> str: return (super().__call__(x, pos) + self.unit) axes.xaxis.set_major_formatter(ScalarFormatterWithUnit('s')) axes.yaxis.set_major_formatter((FuncFormatter((lambda value, pos: '{}mel = {}Hz'.format(int(value), int(librosa.mel_to_hz(value)[0])))) if use_mel else ScalarFormatterWithUnit('Hz'))) figure.set_size_inches(19.2, 10.8) def plot_reconstructed_audio_from_spectrogram(self) -> None: self._plot_audio(self.example.reconstructed_audio_from_spectrogram()) def save_reconstructed_audio_from_spectrogram(self, target_directory: Path) -> None: librosa.output.write_wav(str(Path(target_directory, '{}_window{}_hop{}.wav'.format(self.example.id, self.example.fourier_window_length, self.example.hop_length))), self.example.reconstructed_audio_from_spectrogram(), sr=self.example.sample_rate) def save_spectrograms_of_all_types(self, target_directory: Path) -> None: for type in SpectrogramType: for frequency_scale in SpectrogramFrequencyScale: self.save_spectrogram(target_directory=target_directory, type=type, frequency_scale=frequency_scale)
class Recorder(): def __init__(self, silence_threshold_for_unnormalized_audio: float=0.03, chunk_size: int=1024, sample_rate: int=16000, silence_until_terminate_in_s: int=3): self.silence_threshold_for_not_normalized_sound = silence_threshold_for_unnormalized_audio self.chunk_size = chunk_size self.sample_rate = sample_rate self.silence_until_terminate_in_s = silence_until_terminate_in_s def _is_silent(self, audio: ndarray): return (max(audio) < self.silence_threshold_for_not_normalized_sound) def _normalize(self, audio: ndarray) -> ndarray: return (audio / max(abs(audio))) def _trim_silence(self, audio: ndarray) -> ndarray: def trim_start(sound: ndarray) -> ndarray: return numpy.array(list(dropwhile((lambda x: (x < self.silence_threshold_for_not_normalized_sound)), sound))) def trim_end(sound: ndarray) -> ndarray: return flipud(trim_start(flipud(sound))) return trim_start(trim_end(audio)) def record(self): 'Records from the microphone and returns the data as an array of signed shorts.' print('Wait in silence to begin recording; wait in silence to terminate') import pyaudio p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paFloat32, channels=1, rate=self.sample_rate, input=True, output=True, frames_per_buffer=self.chunk_size) silent_chunk_count = 0 has_recording_started = False is_first_chunk = False chunks = [] while True: chunk_as_array = array.array('f', stream.read(self.chunk_size)) if (not is_first_chunk): is_first_chunk = True continue if (byteorder == 'big'): chunk_as_array.byteswap() chunk = numpy.array(chunk_as_array) chunks.append(chunk) silent = self._is_silent(chunk) print(('Silent: ' + str(silent))) if has_recording_started: if silent: silent_chunk_count += 1 if ((silent_chunk_count * self.chunk_size) > (self.silence_until_terminate_in_s * self.sample_rate)): break else: silent_chunk_count = 0 elif (not silent): has_recording_started = True stream.stop_stream() stream.close() print('Stopped recording.') p.terminate() return self._normalize(self._trim_silence(concatenate(chunks))) def record_to_file(self, path: Path) -> LabeledExample: "Records from the microphone and outputs the resulting data to 'path'. Returns a labeled example for analysis." librosa.output.write_wav(str(path), self.record(), self.sample_rate) return LabeledExampleFromFile(path)
def record_plot_and_save(recorder: Recorder=Recorder(), recording_directory: Path=configuration.default_data_directories.recording_directory) -> LabeledExample: from speechless.labeled_example_plotter import LabeledExamplePlotter mkdir(recording_directory) name = 'recording-{}'.format(timestamp()) example = recorder.record_to_file((recording_directory / '{}.wav'.format(name))) LabeledExamplePlotter(example).save_spectrogram(recording_directory) return example
class LoggedRunTest(TestCase): def test(self): l1 = LoggedRun((lambda : log('1')), 'test1', Path()) l1() self.assertEqual('1\n', l1.result_file.read_text()) l2 = LoggedRun((lambda : log('2')), 'test2', Path()) l2() self.assertEqual('1\n', l1.result_file.read_text()) self.assertEqual('2\n', l2.result_file.read_text()) l1.result_file.unlink() l2.result_file.unlink()
class CtcDecodersTest(TestCase): def test(self): def decode_greedily(beam_search: bool, merge_repeated: bool): aa_ctc_blank_aa_logits = tf.constant(np.array([[[1.0, 0.0]], [[1.0, 0.0]], [[0.0, 1.0]], [[1.0, 0.0]], [[1.0, 0.0]]], dtype=np.float32)) sequence_length = tf.constant(np.array([5], dtype=np.int32)) ((decoded_list,), log_probabilities) = (tf.nn.ctc_beam_search_decoder(inputs=aa_ctc_blank_aa_logits, sequence_length=sequence_length, merge_repeated=merge_repeated, beam_width=1) if beam_search else tf.nn.ctc_greedy_decoder(inputs=aa_ctc_blank_aa_logits, sequence_length=sequence_length, merge_repeated=merge_repeated)) return list(tf.Session().run(tf.sparse_tensor_to_dense(decoded_list)[0])) self.assertEqual([0], decode_greedily(beam_search=True, merge_repeated=True)) self.assertEqual([0, 0], decode_greedily(beam_search=True, merge_repeated=False)) self.assertEqual([0, 0], decode_greedily(beam_search=False, merge_repeated=True)) self.assertEqual([0, 0, 0, 0], decode_greedily(beam_search=False, merge_repeated=False))
class CtcGraphemeEncodingTests(TestCase): def test_encode(self): g = CtcGraphemeEncoding(english_frequent_characters) label = "she wasn't three abcxyz" self.assertEqual(label, g.decode_graphemes(g.encode(label), merge_repeated=False)) def test_decode(self): g = CtcGraphemeEncoding(english_frequent_characters) graphemes = ((g.encode("sssshhhheeeee wasn't thre") + [g.ctc_blank]) + g.encode('eeeeee')) self.assertEqual("she wasn't three", g.decode_graphemes(graphemes)) def test_encode_batch(self): g = CtcGraphemeEncoding(english_frequent_characters) predictions = zeros((2, 3, g.grapheme_set_size)) predictions[(0, 0, g.encode_character('a'))] = 1 predictions[(0, 1, g.encode_character('b'))] = 1 predictions[(0, 2, g.encode_character('c'))] = 1 predictions[(1, 0, g.encode_character('a'))] = 1 predictions[(1, 1, g.encode_character('b'))] = 1 predictions[(1, 2, g.encode_character('c'))] = 1 self.assertEqual(['abc', 'ab'], g.decode_prediction_batch(predictions, prediction_lengths=[3, 2]))
class AsgGraphemeEncodingTests(TestCase): def test_encode_repetitions(self): g = AsgGraphemeEncoding(english_frequent_characters) self.assertEqual([g.encode_character('e'), g.asg_twice], g.encode('ee')) self.assertEqual([g.encode_character('e'), g.asg_thrice], g.encode('eee')) with self.assertRaises(ValueError): g.encode('eeee') def test_decode(self): g = AsgGraphemeEncoding(english_frequent_characters) def encode_char_by_char(label: str) -> List[int]: return [g.encode_character(c) for c in label] graphemes = (((encode_char_by_char("sssshhhheeeee wasn't thre") + [g.asg_twice, g.asg_twice, g.asg_twice]) + encode_char_by_char(' aaaaaaa')) + [g.asg_thrice]) self.assertEqual("she wasn't three aaa", g.decode_graphemes(graphemes))
class LabeledExampleTest(TestCase): def test(self): example = corpus.examples[0] mel_power_spectrogram = librosa.feature.melspectrogram(y=example.get_raw_audio(), n_fft=example.fourier_window_length, hop_length=example.hop_length, sr=example.sample_rate) self.assertTrue(np.array_equal(mel_power_spectrogram, example.spectrogram(type=SpectrogramType.power, frequency_scale=SpectrogramFrequencyScale.mel))) def test_serialize_positional_label(self): a = PositionalLabel(labeled_sections=[('einmal', (0, 0.55555)), ('von', (0.55555, 0.8))]) s = a.serialize() b = PositionalLabel.deserialize(s) (label, (start, end)) = b.labeled_sections[1] self.assertEqual('von', label) self.assertEqual(0.55555, start) self.assertEqual(0.8, end)
class NetTest(TestCase): def test_sanity_expectation_vs_prediction(self): a = ExpectationVsPrediction(expected='A', predicted='A', loss=0.0) b = ExpectationVsPrediction(expected='B', predicted='A', loss=2.0) results_batches = [ExpectationsVsPredictions([a, b]), ExpectationsVsPredictions([])] results_by_name = ExpectationsVsPredictionsInBatches(result_batches=results_batches) e = ExpectationsVsPredictionsInGroupedBatches(results_by_group_name=dict([('corpus1', results_by_name), ('corpus2', results_by_name), ('empty', ExpectationsVsPredictionsInBatches([]))])) print(str(e))
class ToolsTest(TestCase): def test_paginate(self): a = paginate([1, 2, 3], 2) self.assertEqual(list(a), [[1, 2], [3]])
def single(sequence: List[E]) -> E: first = sequence[0] assert (len(sequence) == 1) return first
def single_or_none(sequence: List[E]) -> Optional[E]: assert (len(sequence) <= 1) return next(iter(sequence), None)
def read_text(path: Path, encoding=None) -> str: '\n Not Path.read_text for compatibility with Python 3.4.\n ' with path.open(encoding=encoding) as f: return f.read()
def write_text(path: Path, text: str, encoding=None): '\n Not Path.write_text for compatibility with Python 3.4.\n ' with path.open(mode='w', encoding=encoding) as f: f.write(text)
def mkdir(directory: Path) -> None: '\n Not Path.mkdir() for compatibility with Python 3.4.\n ' makedirs(str(directory), exist_ok=True)
def home_directory() -> Path: '\n Not Path.home() for compatibility with Python 3.4.\n ' return Path(path.expanduser('~'))
def name_without_extension(audio_file: Path) -> str: return path.splitext(audio_file.name)[0]
def extension(audio_file: Path) -> str: return path.splitext(audio_file.name)[1]
def distinct(sequence: List[E]) -> List[E]: return list(OrderedDict.fromkeys(sequence))
def count_summary(sequence: List[E]) -> str: return ', '.join(['{}: {}'.format(tag, count) for (tag, count) in Counter(sequence).most_common()])
def group(iterable: Iterable[E], key: Callable[([E], K)], value: Callable[([E], V)]=(lambda x: x)) -> Dict[(K, Tuple[V])]: return OrderedDict(((k, tuple(map(value, values))) for (k, values) in groupby(sorted(iterable, key=key), key)))
def timestamp() -> str: return strftime('%Y%m%d-%H%M%S')
def duplicates(sequence: Iterable[E]) -> List[E]: return [item for (item, count) in Counter(sequence).items() if (count > 1)]
def average_or_nan(numbers: List[float]) -> float: if (len(numbers) == 0): return float('nan') return (sum(numbers) / len(numbers))
def paginate(sequence: List[E], page_size: int) -> Iterable[List[E]]: for start in range(0, len(sequence), page_size): (yield sequence[start:(start + page_size)])
def log(obj: Any): logger.info(str(obj))
class Dataset(torch.utils.data.Dataset): def __init__(self, data, n_context=None, question_prefix='question:', title_prefix='title:', passage_prefix='context:'): self.data = data self.n_context = n_context self.question_prefix = question_prefix self.title_prefix = title_prefix self.passage_prefix = passage_prefix self.f = (((self.title_prefix + ' {}. ') + self.passage_prefix) + ' {}') def __len__(self): return len(self.data) def get_target(self, example): if ('target' in example): return example['target'] elif ('answers' in example): return random.choice(example['answers']) else: return None def __getitem__(self, index): return {'index': index, 'question': ((self.question_prefix + ' ') + self.data[index]['question']), 'target': self.get_target(self.data[index]), 'passages': [self.f.format(c['title'], c['text']) for c in self.data[index]['ctxs'][:self.n_context]], 'question_entities': self.data[index]['linked_question_entity'], 'question_entities_link_offset': [((e + len(self.question_prefix)) + 1) for e in self.data[index]['link_offset']], 'question_entities_link_length': self.data[index]['link_length'], 'passage_related_question_entity': [c['related_question_entity'] for c in self.data[index]['ctxs'][:self.n_context]], 'passage_related_passage_entity': [c['related_passage_entity'] for c in self.data[index]['ctxs'][:self.n_context]], 'graph': self.data[index]['graph'], 'real_index': self.data[index]['real_index']} def get_example(self, index): return self.data[index]
class Collator(object): def __init__(self, text_maxlength, tokenizer, answer_maxlength=20, for_eval=False): self.tokenizer = tokenizer self.text_maxlength = text_maxlength self.answer_maxlength = answer_maxlength identifiers = tokenizer.encode('<PM_LEFT> <PM_RIGHT> <QM_LEFT> <QM_RIGHT>')[:(- 1)] (self.pes, self.pee, self.qes, self.qee) = (identifiers[0], identifiers[1], identifiers[2], identifiers[3]) self.for_eval = for_eval def __call__(self, batch): assert (batch[0]['target'] != None) index = torch.tensor([ex['index'] for ex in batch]) target = [ex['target'] for ex in batch] target = self.tokenizer.batch_encode_plus(target, max_length=(self.answer_maxlength if (self.answer_maxlength > 0) else None), padding='max_length', return_tensors='pt', truncation=(True if (self.answer_maxlength > 0) else False)) target_ids = target['input_ids'] target_mask = target['attention_mask'].bool() target_ids = target_ids.masked_fill((~ target_mask), (- 100)) def append_question(example): if (example['passages'] is None): return [example['question']] output = [] for (i, t) in enumerate(example['passages']): if (example['passage_related_question_entity'][i] == []): output.append(((example['question'] + ' ') + t)) else: offset = [e for (j, e) in enumerate(example['question_entities_link_offset']) if (j in example['passage_related_question_entity'][i])] link_length = [e for (j, e) in enumerate(example['question_entities_link_length']) if (j in example['passage_related_question_entity'][i])] output.append(((insert_markers_psg(example['question'], offset, link_length)[0] + ' ') + t)) return output text_passages = [append_question(example) for example in batch] (passage_ids, passage_masks) = encode_passages(text_passages, self.tokenizer, self.text_maxlength) n_qes = [] for g_id in range(len(batch)): this_n_qes = 0 for p_id in range(len(text_passages[0])): this_n_qes += len(batch[g_id]['passage_related_question_entity'][p_id]) n_qes.append(this_n_qes) total_graph_neighbors = [] for g_id in range(len(batch)): batch_graph_neighbors = [] accumulated_offset = n_qes[g_id] for p_id in range(len(text_passages[0])): n_indices = [(e + accumulated_offset) for (e, _) in enumerate(batch[g_id]['passage_related_passage_entity'][p_id])] batch_graph_neighbors.append(n_indices) accumulated_offset += len(n_indices) total_graph_neighbors.append(batch_graph_neighbors) graphs = [e['graph'].clone() for e in batch] node_indices = [] for (graph_index, this_example_ids) in enumerate(passage_ids): nodes_to_remove = [] node_indice = {'question_mention': [], 'passage_mention': []} if (batch[graph_index]['graph'].num_nodes != 0): for (passage_index, this_passage_ids) in enumerate(this_example_ids): idx_qs = (this_passage_ids == self.qes).nonzero(as_tuple=True)[0] idx_qe = (this_passage_ids == self.qee).nonzero(as_tuple=True)[0] idx_ps = (this_passage_ids == self.pes).nonzero(as_tuple=True)[0] idx_pe = (this_passage_ids == self.pee).nonzero(as_tuple=True)[0] graph_neighbors = total_graph_neighbors[graph_index][passage_index] if (len(idx_pe) != len(idx_ps)): idx_ps = idx_ps[:len(idx_pe)] if (len(idx_pe) != len(graph_neighbors)): nodes_to_remove.append(torch.tensor(graph_neighbors[len(idx_pe):])) for (qs, qe) in zip(idx_qs.tolist(), idx_qe.tolist()): node_indice['question_mention'] += [[passage_index, qs, qe]] for (ps, pe) in zip(idx_ps.tolist(), idx_pe.tolist()): node_indice['passage_mention'] += [[passage_index, ps, pe]] if (len(nodes_to_remove) != 0): graphs[graph_index].remove_nodes(torch.cat(nodes_to_remove)) node_indices.append(node_indice) if self.for_eval: return (index, target_ids, target_mask, passage_ids, passage_masks, graphs, node_indices, [e['real_index'] for e in batch]) return (index, target_ids, target_mask, passage_ids, passage_masks, graphs, node_indices)
def encode_passages(batch_text_passages, tokenizer, max_length): (passage_ids, passage_masks) = ([], []) for (k, text_passages) in enumerate(batch_text_passages): p = tokenizer(text_passages, max_length=max_length, padding='max_length', return_tensors='pt', truncation=True) sp = p['input_ids'][None].shape passage_ids.append(p['input_ids'][None]) passage_masks.append(p['attention_mask'][None]) passage_ids = torch.cat(passage_ids, dim=0) passage_masks = torch.cat(passage_masks, dim=0) return (passage_ids, passage_masks.bool())
def split_data(jsonl_path, graph_path, local_rank, world_size, graph_block_size=5000): train_data = [] data_name = graph_path.split('/')[(- 1)] graph_path = graph_path[:graph_path.index(data_name)] prefixed = [filename for filename in os.listdir(graph_path) if filename.startswith(data_name)] def graph_sprt(file_name): index = int(file_name.split('_')[(- 1)].split('.')[0]) return index prefixed.sort(key=graph_sprt) current_block_index = 0 (current_graph_block, _) = dgl.load_graphs(((graph_path + '/') + prefixed[current_block_index])) with open(jsonl_path, encoding='utf8') as f: for (index, line) in enumerate(f): if (index == 76367): continue if ((index % world_size) == local_rank): train_data.append(json.loads(line)) if (index >= ((current_block_index + 1) * graph_block_size)): current_block_index += 1 (current_graph_block, _) = dgl.load_graphs(((graph_path + '/') + prefixed[current_block_index])) train_data[(- 1)]['graph'] = current_graph_block[(index % graph_block_size)] train_data[(- 1)]['real_index'] = index return train_data
def insert_markers_psg(passage_text, gold_link_offsets, gold_link_length): accumulated_offsets = 0 start_marker = '<QM_LEFT>' end_marker = '<QM_RIGHT>' new_gold_link_offsets = [] for (offset, length) in zip(gold_link_offsets, gold_link_length): current_offset = (offset + accumulated_offsets) if (len(end_marker) != 0): passage_text = ((((((passage_text[:current_offset] + start_marker) + ' ') + passage_text[current_offset:(current_offset + length)]) + ' ') + end_marker) + passage_text[(current_offset + length):]) new_gold_link_offsets.append((((offset + accumulated_offsets) + len(start_marker)) + 1)) accumulated_offsets += ((len(start_marker) + len(end_marker)) + 2) else: passage_text = (((passage_text[:current_offset] + start_marker) + ' ') + passage_text[current_offset:]) new_gold_link_offsets.append((((offset + accumulated_offsets) + len(start_marker)) + 1)) accumulated_offsets += (len(start_marker) + 1) return (passage_text, new_gold_link_offsets)
class SimpleTokenizer(object): ALPHA_NUM = '[\\p{L}\\p{N}\\p{M}]+' NON_WS = '[^\\p{Z}\\p{C}]' def __init__(self): '\n Args:\n annotators: None or empty set (only tokenizes).\n ' self._regexp = regex.compile(('(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS)), flags=((regex.IGNORECASE + regex.UNICODE) + regex.MULTILINE)) def tokenize(self, text, uncased=False): matches = [m for m in self._regexp.finditer(text)] if uncased: tokens = [m.group().lower() for m in matches] else: tokens = [m.group() for m in matches] return tokens
def calculate_matches(data: List, workers_num: int): "\n Evaluates answers presence in the set of documents. This function is supposed to be used with a large collection of\n documents and results. It internally forks multiple sub-processes for evaluation and then merges results\n :param all_docs: dictionary of the entire documents database. doc_id -> (doc_text, title)\n :param answers: list of answers's list. One list per question\n :param closest_docs: document ids of the top results along with their scores\n :param workers_num: amount of parallel threads to process data\n :param match_type: type of answer matching. Refer to has_answer code for available options\n :return: matching information tuple.\n top_k_hits - a list where the index is the amount of top documents retrieved and the value is the total amount of\n valid matches across an entire dataset.\n questions_doc_hits - more detailed info with answer matches for every question and every retrieved document\n " logger.info('Matching answers in top docs...') tokenizer = SimpleTokenizer() get_score_partial = partial(check_answer, tokenizer=tokenizer) processes = ProcessPool(processes=workers_num) scores = processes.map(get_score_partial, data) logger.info('Per question validation results len=%d', len(scores)) n_docs = len(data[0]['ctxs']) top_k_hits = ([0] * n_docs) for question_hits in scores: best_hit = next((i for (i, x) in enumerate(question_hits) if x), None) if (best_hit is not None): top_k_hits[best_hit:] = [(v + 1) for v in top_k_hits[best_hit:]] return QAMatchStats(top_k_hits, scores)
def check_answer(example, tokenizer) -> List[bool]: 'Search through all the top docs to see if they have any of the answers.' answers = example['answers'] ctxs = example['ctxs'] hits = [] for (i, doc) in enumerate(ctxs): text = doc['text'] if (text is None): logger.warning('no doc in db') hits.append(False) continue hits.append(has_answer(answers, text, tokenizer)) return hits
def has_answer(answers, text, tokenizer) -> bool: 'Check if a document contains an answer string.' text = _normalize(text) text = tokenizer.tokenize(text, uncased=True) for answer in answers: answer = _normalize(answer) answer = tokenizer.tokenize(answer, uncased=True) for i in range(0, ((len(text) - len(answer)) + 1)): if (answer == text[i:(i + len(answer))]): return True return False
def _normalize(text): return unicodedata.normalize('NFD', text)
def normalize_answer(s): def remove_articles(text): return regex.sub('\\b(a|an|the)\\b', ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join((ch for ch in text if (ch not in exclude))) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s))))
def exact_match_score(prediction, ground_truth): return (normalize_answer(prediction) == normalize_answer(ground_truth))
def ems(prediction, ground_truths): return max([exact_match_score(prediction, gt) for gt in ground_truths])
def eval_batch(scores, inversions, avg_topk, idx_topk): for (k, s) in enumerate(scores): s = s.cpu().numpy() sorted_idx = np.argsort((- s)) score(sorted_idx, inversions, avg_topk, idx_topk)
def count_inversions(arr): inv_count = 0 lenarr = len(arr) for i in range(lenarr): for j in range((i + 1), lenarr): if (arr[i] > arr[j]): inv_count += 1 return inv_count
def score(x, inversions, avg_topk, idx_topk): x = np.array(x) inversions.append(count_inversions(x)) for k in avg_topk: avg_pred_topk = (x[:k] < k).mean() avg_topk[k].append(avg_pred_topk) for k in idx_topk: below_k = (x < k) idx_gold_topk = (len(x) - np.argmax(below_k[::(- 1)])) idx_topk[k].append(idx_gold_topk)
class Indexer(object): def __init__(self, vector_sz, n_subquantizers=0, n_bits=8): if (n_subquantizers > 0): self.index = faiss.IndexPQ(vector_sz, n_subquantizers, n_bits, faiss.METRIC_INNER_PRODUCT) else: self.index = faiss.IndexFlatIP(vector_sz) self.index_id_to_db_id = np.empty(0, dtype=np.int64) def index_data(self, ids, embeddings): self._update_id_mapping(ids) embeddings = embeddings.astype('float32') if (not self.index.is_trained): self.index.train(embeddings) self.index.add(embeddings) logger.info(f'Total data indexed {len(self.index_id_to_db_id)}') def search_knn(self, query_vectors: np.array, top_docs: int, index_batch_size=1024) -> List[Tuple[(List[object], List[float])]]: query_vectors = query_vectors.astype('float32') result = [] nbatch = (((len(query_vectors) - 1) // index_batch_size) + 1) for k in tqdm(range(nbatch)): start_idx = (k * index_batch_size) end_idx = min(((k + 1) * index_batch_size), len(query_vectors)) q = query_vectors[start_idx:end_idx] (scores, indexes) = self.index.search(q, top_docs) db_ids = [[str(self.index_id_to_db_id[i]) for i in query_top_idxs] for query_top_idxs in indexes] result.extend([(db_ids[i], scores[i]) for i in range(len(db_ids))]) return result def serialize(self, dir_path): index_file = (dir_path / 'index.faiss') meta_file = (dir_path / 'index_meta.dpr') logger.info(f'Serializing index to {index_file}, meta data to {meta_file}') faiss.write_index(self.index, index_file) with open(meta_file, mode='wb') as f: pickle.dump(self.index_id_to_db_id, f) def deserialize_from(self, dir_path): index_file = (dir_path / 'index.faiss') meta_file = (dir_path / 'index_meta.dpr') logger.info(f'Loading index from {index_file}, meta data from {meta_file}') self.index = faiss.read_index(index_file) logger.info('Loaded index of type %s and size %d', type(self.index), self.index.ntotal) with open(meta_file, 'rb') as reader: self.index_id_to_db_id = pickle.load(reader) assert (len(self.index_id_to_db_id) == self.index.ntotal), 'Deserialized index_id_to_db_id should match faiss index size' def _update_id_mapping(self, db_ids: List): new_ids = np.array(db_ids, dtype=np.int64) self.index_id_to_db_id = np.concatenate((self.index_id_to_db_id, new_ids), axis=0)
class MPIAdapter(): '\n MPIAdapter automatically detects and analyzes the training environment for distributed training\n and offers methods to set up distributed training jobs.\n\n For example, it determines whether training happens on AML, Philly, or locally.\n It also determines variables such as the world size and the rank of each GPU.\n ' def __init__(self, set_env_vars=True): local_address = '127.0.0.1' default_torch_distributed_port = '55551' if ('OMPI_COMM_WORLD_SIZE' not in os.environ): self.env_info = 'no MPI' self.world_size = 1 self.local_size = 1 self.rank = 0 self.local_rank = 0 self.master_address = local_address self.master_port = default_torch_distributed_port else: self.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) self.local_size = int(os.environ['OMPI_COMM_WORLD_LOCAL_SIZE']) self.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) self.local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) if ('PHILLY_CONTAINER_IP' in os.environ): self.env_info = 'philly' if (self.rank == 0): self.master_address = os.environ['PHILLY_CONTAINER_IP'] self.master_port = os.environ['PHILLY_CONTAINER_PORT_RANGE_START'] else: self.master_address = None self.master_port = None self.master_address = MPI.COMM_WORLD.bcast(self.master_address, root=0) self.master_port = MPI.COMM_WORLD.bcast(self.master_port, root=0) elif (('AMLK8S_NUM_WORKER' in os.environ) or ('AZ_CMK8S_JOB_WORK_DIR' in os.environ)): self.env_info = 'AMLK8S (ITP)' regexp = '[\\s\\S]*export[\\s]*DLTS_SD_worker0_IP=([0-9.]+)[\\s|s]*' with open('/dlts-runtime/env/init.env', 'r') as f: line = f.read() match = re.match(regexp, line) if match: self.master_address = str(match.group(1)) else: assert (self.world_size == self.local_size), "It's not a single-node debugging job on AMLK8S (ITP), but no master ip is found in file." self.env_info = 'single-node AMLK8S (ITP) debugging job' self.master_address = local_address self.master_port = default_torch_distributed_port elif ('AZ_BATCH_MASTER_NODE' in os.environ): self.env_info = 'multi-node AML' master_node_params = os.environ['AZ_BATCH_MASTER_NODE'].split(':') self.master_address = master_node_params[0] self.master_port = default_torch_distributed_port elif (self.world_size == self.local_size): self.env_info = 'single-node AML or other MPI environment' self.master_address = local_address self.master_port = default_torch_distributed_port else: self.env_info = 'multi-node other MPI environment' if (self.rank == 0): hostname_cmd = ['hostname -I'] result = subprocess.check_output(hostname_cmd, shell=True) self.master_address = result.decode('utf-8').split()[0] self.master_port = default_torch_distributed_port else: self.master_address = None self.master_port = None self.master_address = MPI.COMM_WORLD.bcast(self.master_address, root=0) self.master_port = MPI.COMM_WORLD.bcast(self.master_port, root=0) self.init_method_url = f'tcp://{self.master_address}:{self.master_port}' if set_env_vars: self._set_env_vars() def log_info(self): '\n Logs information about distributed training environment.\n ' logger.warning('----------------') logger.warning('MPI Adapter data') logger.warning('----------------') logger.warning(f'environment info: {self.env_info}') logger.warning(f'init method url: {self.init_method_url}') logger.warning(f'world size: {self.world_size}') logger.warning(f'local size: {self.local_size}') logger.warning(f'rank: {self.rank}') logger.warning(f'local rank: {self.local_rank}') logger.warning(f'master address: {self.master_address}') logger.warning(f'master port: {self.master_port}') logger.warning('----------------') def init_process_group(self, backend): '\n Initializes the default PyTorch distributed process group.\n ' logger.warning('trying to initialize process group ...') torch.distributed.init_process_group(backend=backend, init_method=self.init_method_url, world_size=self.world_size, rank=self.rank) logger.warning('process group initialized') def _set_env_vars(self): '\n Sets environment variables for world size, rank, local rank, master addr, and master port.\n ' os.environ['WORLD_SIZE'] = str(self.world_size) os.environ['RANK'] = str(self.rank) os.environ['LOCAL_RANK'] = str(self.local_rank) os.environ['MASTER_ADDR'] = self.master_address os.environ['MASTER_PORT'] = self.master_port
class Options(): def __init__(self): self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.initialize_parser() def add_optim_options(self): self.parser.add_argument('--warmup_steps', type=int, default=1000) self.parser.add_argument('--total_steps', type=int, default=1000) self.parser.add_argument('--scheduler_steps', type=int, default=None, help='total number of step for the scheduler, if None then scheduler_total_step = total_step') self.parser.add_argument('--accumulation_steps', type=int, default=1) self.parser.add_argument('--dropout', type=float, default=0.1, help='dropout rate') self.parser.add_argument('--lr', type=float, default=0.0001, help='learning rate') self.parser.add_argument('--clip', type=float, default=1.0, help='gradient clipping') self.parser.add_argument('--optim', type=str, default='adam') self.parser.add_argument('--scheduler', type=str, default='fixed') self.parser.add_argument('--weight_decay', type=float, default=0.1) self.parser.add_argument('--fixed_lr', action='store_true') def add_eval_options(self): self.parser.add_argument('--write_results', action='store_true', help='save results') self.parser.add_argument('--write_crossattention_scores', action='store_true', help='save dataset with cross-attention scores') def add_reader_options(self): self.parser.add_argument('--train_data', type=str, default='none', help='path of train data') self.parser.add_argument('--eval_data', type=str, default='none', help='path of eval data') self.parser.add_argument('--test_data', type=str, default='none', help='path of test data') self.parser.add_argument('--train_graph', type=str, default='none', help='path of train data') self.parser.add_argument('--eval_graph', type=str, default='none', help='path of eval data') self.parser.add_argument('--test_graph', type=str, default='none', help='path of test data') self.parser.add_argument('--dataset', type=str, default='webq', help='path of test data') self.parser.add_argument('--model_size', type=str, default='base') self.parser.add_argument('--use_checkpoint', action='store_true', help='use checkpoint in the encoder') self.parser.add_argument('--text_maxlength', type=int, default=256, help='maximum number of tokens in text segments (question+passage)') self.parser.add_argument('--answer_maxlength', type=int, default=(- 1), help='maximum number of tokens used to train the model, no truncation if -1') self.parser.add_argument('--no_title', action='store_true', help='article titles not included in passages') self.parser.add_argument('--n_context', type=int, default=1) self.parser.add_argument('--gnn_layer', type=int, default=2) self.parser.add_argument('--gnn_dimension', type=int, default=768) self.parser.add_argument('--add_residual', action='store_true', help='add residual connection for GNN') self.parser.add_argument('--is_distributed', action='store_true', help='is_distributed') self.parser.add_argument('--gnn_dropout', type=float, default=0.3, help='drop out rate for gnn') self.parser.add_argument('--layer2insert', type=int, default=10) self.parser.add_argument('--bpe', action='store_true', help='whether or not back propagate through gnn back to encoder') self.parser.add_argument('--warmed_gnn', type=str, default='none', help='path to warmed gnn') self.parser.add_argument('--wandb', action='store_true', help='enable wandb functionality') self.parser.add_argument('--ground_truth_for_test', type=str, help='json 1-hop solvable') self.parser.add_argument('--relation_base', type=str, help='json name for involved relations') self.parser.add_argument('--n_heads', type=int, help='Number of gnn heads') self.parser.add_argument('--gnn_mode', type=str, default='EGAT', help='type of gnn') def add_gnn_warmup_options(self): self.parser.add_argument('--train_data_json', type=str, default='none', help='path of train data json') self.parser.add_argument('--eval_data_json', type=str, default='none', help='path of eval data json') self.parser.add_argument('--patience', type=int, default=3) def add_retriever_options(self): self.parser.add_argument('--train_data', type=str, default='none', help='path of train data') self.parser.add_argument('--eval_data', type=str, default='none', help='path of eval data') self.parser.add_argument('--test_data', type=str, default='none', help='path of test data') self.parser.add_argument('--indexing_dimension', type=int, default=768) self.parser.add_argument('--no_projection', action='store_true', help='No addition Linear layer and layernorm, only works if indexing size equals 768') self.parser.add_argument('--question_maxlength', type=int, default=40, help='maximum number of tokens in questions') self.parser.add_argument('--passage_maxlength', type=int, default=256, help='maximum number of tokens in passages') self.parser.add_argument('--no_question_mask', action='store_true') self.parser.add_argument('--no_passage_mask', action='store_true') self.parser.add_argument('--extract_cls', action='store_true') self.parser.add_argument('--no_title', action='store_true', help='article titles not included in passages') self.parser.add_argument('--n_context', type=int, default=1) def initialize_parser(self): self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment') self.parser.add_argument('--checkpoint_dir', type=str, default='./checkpoint/', help='models are saved here') self.parser.add_argument('--model_path', type=str, default='none', help='path for retraining') self.parser.add_argument('--use_mpi', action='store_true', help='use openMPI for pytorch distributed training') self.parser.add_argument('--per_gpu_batch_size', default=1, type=int, help='Batch size per GPU/CPU for training.') self.parser.add_argument('--maxload', type=int, default=(- 1)) self.parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank') self.parser.add_argument('--main_port', type=int, default=(- 1), help='Main port (for multi-node SLURM jobs)') self.parser.add_argument('--seed', type=int, default=0, help='random seed for initialization') self.parser.add_argument('--eval_freq', type=int, default=500, help='evaluate model every <eval_freq> steps during training') self.parser.add_argument('--save_freq', type=int, default=5000, help='save model every <save_freq> steps during training') self.parser.add_argument('--eval_print_freq', type=int, default=1000, help='print intermdiate results of evaluation every <eval_print_freq> steps') def print_options(self, opt): message = '\n' for (k, v) in sorted(vars(opt).items()): comment = '' default_value = self.parser.get_default(k) if (v != default_value): comment = f' (default: {default_value})' message += f'''{str(k):>30}: {str(v):<40}{comment} ''' expr_dir = (Path(opt.checkpoint_dir) / opt.name) model_dir = (expr_dir / 'models') model_dir.mkdir(parents=True, exist_ok=True) with open((expr_dir / 'opt.log'), 'wt') as opt_file: opt_file.write(message) opt_file.write('\n') logger.info(message) def parse(self): opt = self.parser.parse_args() return opt
def get_options(use_reader=False, use_retriever=False, use_optim=False, use_eval=False): options = Options() if use_reader: options.add_reader_options() if use_retriever: options.add_retriever_options() if use_optim: options.add_optim_options() if use_eval: options.add_eval_options() return options.parse()
def select_examples_TQA(data, index, passages, passages_index): selected_data = [] for (i, k) in enumerate(index): ex = data[k] q = ex['Question'] answers = ex['Answer']['Aliases'] target = ex['Answer']['Value'] ctxs = [{'id': idx, 'title': passages[idx][1], 'text': passages[idx][0]} for idx in passages_index[ex['QuestionId']]] if target.isupper(): target = target.title() selected_data.append({'question': q, 'answers': answers, 'target': target, 'ctxs': ctxs}) return selected_data
def select_examples_NQ(data, index, passages, passages_index): selected_data = [] for (i, k) in enumerate(index): ctxs = [{'id': idx, 'title': passages[idx][1], 'text': passages[idx][0]} for idx in passages_index[str(i)]] dico = {'question': data[k]['question'], 'answers': data[k]['answer'], 'ctxs': ctxs} selected_data.append(dico) return selected_data
def sig_handler(signum, frame): logger.warning(('Signal handler called with signal ' + str(signum))) prod_id = int(os.environ['SLURM_PROCID']) logger.warning(('Host: %s - Global rank: %i' % (socket.gethostname(), prod_id))) if (prod_id == 0): logger.warning(('Requeuing job ' + os.environ['SLURM_JOB_ID'])) os.system(('scontrol requeue ' + os.environ['SLURM_JOB_ID'])) else: logger.warning('Not the main process, no need to requeue.') sys.exit((- 1))
def term_handler(signum, frame): logger.warning(('Signal handler called with signal ' + str(signum))) logger.warning('Bypassing SIGTERM.')
def init_signal_handler(): '\n Handle signals sent by SLURM for time limit / pre-emption.\n ' signal.signal(signal.SIGUSR1, sig_handler) signal.signal(signal.SIGTERM, term_handler)
def init_distributed_mode(params): '\n Handle single and multi-GPU / multi-node / SLURM jobs.\n Initialize the following variables:\n - n_nodes\n - node_id\n - local_rank\n - global_rank\n - world_size\n ' params.is_slurm_job = ('SLURM_JOB_ID' in os.environ) has_local_rank = hasattr(params, 'local_rank') if (params.is_slurm_job and has_local_rank): assert (params.local_rank == (- 1)) SLURM_VARIABLES = ['SLURM_JOB_ID', 'SLURM_JOB_NODELIST', 'SLURM_JOB_NUM_NODES', 'SLURM_NTASKS', 'SLURM_TASKS_PER_NODE', 'SLURM_MEM_PER_NODE', 'SLURM_MEM_PER_CPU', 'SLURM_NODEID', 'SLURM_PROCID', 'SLURM_LOCALID', 'SLURM_TASK_PID'] PREFIX = ('%i - ' % int(os.environ['SLURM_PROCID'])) for name in SLURM_VARIABLES: value = os.environ.get(name, None) params.n_nodes = int(os.environ['SLURM_JOB_NUM_NODES']) params.node_id = int(os.environ['SLURM_NODEID']) params.local_rank = int(os.environ['SLURM_LOCALID']) params.global_rank = int(os.environ['SLURM_PROCID']) params.world_size = int(os.environ['SLURM_NTASKS']) params.n_gpu_per_node = (params.world_size // params.n_nodes) hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']]) params.main_addr = hostnames.split()[0].decode('utf-8') assert ((10001 <= params.main_port <= 20000) or (params.world_size == 1)) os.environ['MASTER_ADDR'] = params.main_addr os.environ['MASTER_PORT'] = str(params.main_port) os.environ['WORLD_SIZE'] = str(params.world_size) os.environ['RANK'] = str(params.global_rank) params.is_distributed = True elif (has_local_rank and (params.local_rank != (- 1))): assert (params.main_port == (- 1)) params.global_rank = int(os.environ['RANK']) params.world_size = int(os.environ['WORLD_SIZE']) params.n_gpu_per_node = int(os.environ['NGPU']) params.n_nodes = (params.world_size // params.n_gpu_per_node) params.node_id = (params.global_rank // params.n_gpu_per_node) params.is_distributed = True else: n_gpu = torch.cuda.device_count() params.n_nodes = 1 params.node_id = 0 params.local_rank = 0 params.global_rank = 0 params.world_size = n_gpu params.n_gpu_per_node = n_gpu params.is_distributed = False params.is_main = ((params.node_id == 0) and (params.local_rank == 0)) params.multi_node = (params.n_nodes > 1) params.multi_gpu = (params.world_size > 1) PREFIX = ('%i - ' % params.global_rank) if params.is_distributed: torch.cuda.set_device(params.local_rank) device = torch.device('cuda', params.local_rank) else: device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) params.device = device if params.is_distributed: torch.distributed.init_process_group(init_method='env://', backend='nccl')
def init_logger(is_main=True, is_distributed=False, filename=None): if is_distributed: torch.distributed.barrier() handlers = [logging.StreamHandler(sys.stdout)] if (filename is not None): handlers.append(logging.FileHandler(filename=filename)) logging.basicConfig(datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if is_main else logging.WARN), format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', handlers=handlers) logging.getLogger('transformers.tokenization_utils').setLevel(logging.ERROR) logging.getLogger('transformers.tokenization_utils_base').setLevel(logging.ERROR) return logger
def get_checkpoint_path(opt): checkpoint_path = (Path(opt.checkpoint_dir) / opt.name) checkpoint_exists = checkpoint_path.exists() if opt.is_distributed: torch.distributed.barrier() checkpoint_path.mkdir(parents=True, exist_ok=True) return (checkpoint_path, checkpoint_exists)
def symlink_force(target, link_name): try: os.symlink(target, link_name) except OSError as e: if (e.errno == errno.EEXIST): os.remove(link_name) os.symlink(target, link_name) else: raise e
def save(model, optimizer, scheduler, step, best_eval_metric, opt, dir_path, name): model_to_save = (model.module if hasattr(model, 'module') else model) path = os.path.join(dir_path, 'checkpoint') epoch_path = os.path.join(path, name) os.makedirs(epoch_path, exist_ok=True) model_to_save.save_pretrained(epoch_path) with open('{}/gnn_config.json'.format(epoch_path), 'w') as fp: json.dump(model_to_save.gnn_config, fp, sort_keys=True, indent=4) cp = os.path.join(path, 'latest') fp = os.path.join(epoch_path, 'optimizer.pth.tar') checkpoint = {'step': step, 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'opt': opt, 'best_eval_metric': best_eval_metric} torch.save(checkpoint, fp)
def load(model_class, dir_path, opt, reset_params=False): epoch_path = os.path.realpath(dir_path) optimizer_path = os.path.join(epoch_path, 'optimizer.pth.tar') logger.info(('Loading %s' % epoch_path)) gnn_config = json.load(open((epoch_path + '/gnn_config.json'))) model = model_class.from_pretrained(epoch_path, **gnn_config) model = model.to(opt.device) logger.info(('loading checkpoint %s' % optimizer_path)) checkpoint = torch.load(optimizer_path, map_location=opt.device) opt_checkpoint = checkpoint['opt'] step = checkpoint['step'] if ('best_eval_metric' in checkpoint): best_eval_metric = checkpoint['best_eval_metric'] else: best_eval_metric = checkpoint['best_dev_em'] if (not reset_params): (optimizer, scheduler) = set_optim(opt_checkpoint, model) scheduler.load_state_dict(checkpoint['scheduler']) optimizer.load_state_dict(checkpoint['optimizer']) else: (optimizer, scheduler) = set_optim(opt, model) return (model, optimizer, scheduler, opt_checkpoint, step, best_eval_metric)
class WarmupLinearScheduler(torch.optim.lr_scheduler.LambdaLR): def __init__(self, optimizer, warmup_steps, scheduler_steps, min_ratio, fixed_lr, last_epoch=(- 1)): self.warmup_steps = warmup_steps self.scheduler_steps = scheduler_steps self.min_ratio = min_ratio self.fixed_lr = fixed_lr super(WarmupLinearScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) def lr_lambda(self, step): if (step < self.warmup_steps): return ((((1 - self.min_ratio) * step) / float(max(1, self.warmup_steps))) + self.min_ratio) if self.fixed_lr: return 1.0 return max(0.0, (1.0 + (((self.min_ratio - 1) * (step - self.warmup_steps)) / float(max(1.0, (self.scheduler_steps - self.warmup_steps))))))
class FixedScheduler(torch.optim.lr_scheduler.LambdaLR): def __init__(self, optimizer, last_epoch=(- 1)): super(FixedScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) def lr_lambda(self, step): return 1.0
def set_dropout(model, dropout_rate): for mod in model.modules(): if isinstance(mod, torch.nn.Dropout): mod.p = dropout_rate
def set_optim(opt, model): if (opt.optim == 'adam'): optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr) elif (opt.optim == 'adamw'): optimizer = torch.optim.AdamW(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay) if (opt.scheduler == 'fixed'): scheduler = FixedScheduler(optimizer) elif (opt.scheduler == 'linear'): if (opt.scheduler_steps is None): scheduler_steps = opt.total_steps else: scheduler_steps = opt.scheduler_steps scheduler = WarmupLinearScheduler(optimizer, warmup_steps=opt.warmup_steps, scheduler_steps=scheduler_steps, min_ratio=0.0, fixed_lr=opt.fixed_lr) return (optimizer, scheduler)
def average_main(x, opt): if (not opt.is_distributed): return x if (opt.world_size > 1): dist.reduce(x, 0, op=dist.ReduceOp.SUM) if opt.is_main: x = (x / opt.world_size) return x
def sum_main(x, opt): if (not opt.is_distributed): return x if (opt.world_size > 1): dist.reduce(x, 0, op=dist.ReduceOp.SUM) return x
def weighted_average(x, count, opt): if (not opt.is_distributed): return (x, count) t_loss = torch.tensor([(x * count)], device=opt.device) t_total = torch.tensor([count], device=opt.device) t_loss = sum_main(t_loss, opt) t_total = sum_main(t_total, opt) return ((t_loss / t_total).item(), t_total.item())
def write_output(glob_path, output_path): files = list(glob_path.glob('*.txt')) files.sort() with open(output_path, 'w') as outfile: for path in files: with open(path, 'r') as f: lines = f.readlines() for line in lines: outfile.write(line) path.unlink() glob_path.rmdir()
def save_distributed_dataset(data, opt): dir_path = (Path(opt.checkpoint_dir) / opt.name) write_path = (dir_path / 'tmp_dir') write_path.mkdir(exist_ok=True) tmp_path = (write_path / f'{opt.global_rank}.json') with open(tmp_path, 'w') as fw: json.dump(data, fw) if opt.is_distributed: torch.distributed.barrier() if opt.is_main: final_path = (dir_path / 'dataset_wscores.json') logger.info(f'Writing dataset with scores at {final_path}') glob_path = (write_path / '*') results_path = write_path.glob('*.json') alldata = [] for path in results_path: with open(path, 'r') as f: data = json.load(f) alldata.extend(data) path.unlink() with open(final_path, 'w') as fout: json.dump(alldata, fout, indent=4) write_path.rmdir()
def load_passages(path): if (not os.path.exists(path)): logger.info(f'{path} does not exist') return logger.info(f'Loading passages from: {path}') passages = [] with open(path) as fin: reader = csv.reader(fin, delimiter='\t') for (k, row) in enumerate(reader): if (not (row[0] == 'id')): try: passages.append((row[0], row[1], row[2])) except: logger.warning(f'The following input line has not been correctly loaded: {row}') return passages
def truncate_graph_wrt_n_passage(graph, node_indices, n_passage): if (n_passage == 100): return (graph, node_indices) mention_nodes_to_remove = [] new_mention_indices = [] for (index, triple) in enumerate(node_indices['mention_nodes']): if (triple[0] >= n_passage): mention_nodes_to_remove.append(index) else: new_mention_indices.append(triple) graph.remove_nodes(mention_nodes_to_remove, ntype='mention') graph.remove_nodes(list(range(n_passage, 100)), ntype='passage') node_indices['mention_nodes'] = new_mention_indices node_indices['passage_nodes'] = node_indices['passage_nodes'][:n_passage] return node_indices
def freeze_t5(model): model = (model.module if hasattr(model, 'module') else model) for (name, child) in model.named_children(): if (name == 'gnn_model'): continue for param in child.parameters(): param.requires_grad = False