idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
38,200
def read_samples_from_file ( self ) : self . log ( u"Loading audio data..." ) if not gf . file_can_be_read ( self . file_path ) : self . log_exc ( u"File '%s' cannot be read" % ( self . file_path ) , None , True , OSError ) convert_audio_file = ( ( self . file_format is None ) or ( ( self . rconf . safety_checks ) and ( self . file_format != ( "pcm_s16le" , 1 , self . rconf . sample_rate ) ) ) ) if convert_audio_file : self . log ( u"self.file_format is None or not good => converting self.file_path" ) tmp_handler , tmp_file_path = gf . tmp_file ( suffix = u".wav" , root = self . rconf [ RuntimeConfiguration . TMP_PATH ] ) self . log ( [ u"Temporary PCM16 mono WAVE file: '%s'" , tmp_file_path ] ) try : self . log ( u"Converting audio file to mono..." ) converter = FFMPEGWrapper ( rconf = self . rconf , logger = self . logger ) converter . convert ( self . file_path , tmp_file_path ) self . file_format = ( "pcm_s16le" , 1 , self . rconf . sample_rate ) self . log ( u"Converting audio file to mono... done" ) except FFMPEGPathError : gf . delete_file ( tmp_handler , tmp_file_path ) self . log_exc ( u"Unable to call ffmpeg executable" , None , True , AudioFileConverterError ) except OSError : gf . delete_file ( tmp_handler , tmp_file_path ) self . log_exc ( u"Audio file format not supported by ffmpeg" , None , True , AudioFileUnsupportedFormatError ) else : if self . rconf . safety_checks : self . log ( u"self.file_format is good => reading self.file_path directly" ) else : self . log_warn ( u"Safety checks disabled => reading self.file_path directly" ) tmp_handler = None tmp_file_path = self . file_path try : self . audio_format = "pcm16" self . audio_channels = 1 self . audio_sample_rate , self . __samples = scipywavread ( tmp_file_path ) self . __samples = self . __samples . astype ( "float64" ) / 32768 self . __samples_capacity = len ( self . __samples ) self . __samples_length = self . __samples_capacity self . _update_length ( ) except ValueError : self . log_exc ( u"Audio format not supported by scipywavread" , None , True , AudioFileUnsupportedFormatError ) if convert_audio_file : gf . delete_file ( tmp_handler , tmp_file_path ) self . log ( [ u"Deleted temporary audio file: '%s'" , tmp_file_path ] ) self . _update_length ( ) self . log ( [ u"Sample length: %.3f" , self . audio_length ] ) self . log ( [ u"Sample rate: %d" , self . audio_sample_rate ] ) self . log ( [ u"Audio format: %s" , self . audio_format ] ) self . log ( [ u"Audio channels: %d" , self . audio_channels ] ) self . log ( u"Loading audio data... done" )
Load the audio samples from file into memory .
38,201
def preallocate_memory ( self , capacity ) : if capacity < 0 : raise ValueError ( u"The capacity value cannot be negative" ) if self . __samples is None : self . log ( u"Not initialized" ) self . __samples = numpy . zeros ( capacity ) self . __samples_length = 0 else : self . log ( [ u"Previous sample length was (samples): %d" , self . __samples_length ] ) self . log ( [ u"Previous sample capacity was (samples): %d" , self . __samples_capacity ] ) self . __samples = numpy . resize ( self . __samples , capacity ) self . __samples_length = min ( self . __samples_length , capacity ) self . __samples_capacity = capacity self . log ( [ u"Current sample capacity is (samples): %d" , self . __samples_capacity ] )
Preallocate memory to store audio samples to avoid repeated new allocations and copies while performing several consecutive append operations .
38,202
def minimize_memory ( self ) : if self . __samples is None : self . log ( u"Not initialized, returning" ) else : self . log ( u"Initialized, minimizing memory..." ) self . preallocate_memory ( self . __samples_length ) self . log ( u"Initialized, minimizing memory... done" )
Reduce the allocated memory to the minimum required to store the current audio samples .
38,203
def add_samples ( self , samples , reverse = False ) : self . log ( u"Adding samples..." ) samples_length = len ( samples ) current_length = self . __samples_length future_length = current_length + samples_length if ( self . __samples is None ) or ( self . __samples_capacity < future_length ) : self . preallocate_memory ( 2 * future_length ) if reverse : self . __samples [ current_length : future_length ] = samples [ : : - 1 ] else : self . __samples [ current_length : future_length ] = samples [ : ] self . __samples_length = future_length self . _update_length ( ) self . log ( u"Adding samples... done" )
Concatenate the given new samples to the current audio data .
38,204
def reverse ( self ) : if self . __samples is None : if self . file_path is None : self . log_exc ( u"AudioFile object not initialized" , None , True , AudioFileNotInitializedError ) else : self . read_samples_from_file ( ) self . log ( u"Reversing..." ) self . __samples [ 0 : self . __samples_length ] = numpy . flipud ( self . __samples [ 0 : self . __samples_length ] ) self . log ( u"Reversing... done" )
Reverse the audio data .
38,205
def trim ( self , begin = None , length = None ) : for variable , name in [ ( begin , "begin" ) , ( length , "length" ) ] : if ( variable is not None ) and ( not isinstance ( variable , TimeValue ) ) : raise TypeError ( u"%s is not None or TimeValue" % name ) self . log ( u"Trimming..." ) if ( begin is None ) and ( length is None ) : self . log ( u"begin and length are both None: nothing to do" ) else : if begin is None : begin = TimeValue ( "0.000" ) self . log ( [ u"begin was None, now set to %.3f" , begin ] ) begin = min ( max ( TimeValue ( "0.000" ) , begin ) , self . audio_length ) self . log ( [ u"begin is %.3f" , begin ] ) if length is None : length = self . audio_length - begin self . log ( [ u"length was None, now set to %.3f" , length ] ) length = min ( max ( TimeValue ( "0.000" ) , length ) , self . audio_length - begin ) self . log ( [ u"length is %.3f" , length ] ) begin_index = int ( begin * self . audio_sample_rate ) end_index = int ( ( begin + length ) * self . audio_sample_rate ) new_idx = end_index - begin_index self . __samples [ 0 : new_idx ] = self . __samples [ begin_index : end_index ] self . __samples_length = new_idx self . _update_length ( ) self . log ( u"Trimming... done" )
Get a slice of the audio data of length seconds starting from begin seconds .
38,206
def write ( self , file_path ) : if self . __samples is None : if self . file_path is None : self . log_exc ( u"AudioFile object not initialized" , None , True , AudioFileNotInitializedError ) else : self . read_samples_from_file ( ) self . log ( [ u"Writing audio file '%s'..." , file_path ] ) try : data = ( self . audio_samples * 32768 ) . astype ( "int16" ) scipywavwrite ( file_path , self . audio_sample_rate , data ) except Exception as exc : self . log_exc ( u"Error writing audio file to '%s'" % ( file_path ) , exc , True , OSError ) self . log ( [ u"Writing audio file '%s'... done" , file_path ] )
Write the audio data to file . Return True on success or False otherwise .
38,207
def clear_data ( self ) : self . log ( u"Clear audio_data" ) self . __samples_capacity = 0 self . __samples_length = 0 self . __samples = None
Clear the audio data freeing memory .
38,208
def _update_length ( self ) : if ( self . audio_sample_rate is not None ) and ( self . __samples is not None ) : self . audio_length = TimeValue ( self . __samples_length ) / TimeValue ( self . audio_sample_rate )
Update the audio length property according to the length of the current audio data and audio sample rate .
38,209
def masked_middle_mfcc ( self ) : begin , end = self . _masked_middle_begin_end ( ) return ( self . masked_mfcc ) [ : , begin : end ]
Return the MFCC speech frames in the MIDDLE portion of the wave .
38,210
def masked_middle_map ( self ) : begin , end = self . _masked_middle_begin_end ( ) return self . __mfcc_mask_map [ begin : end ]
Return the map from the MFCC speech frame indices in the MIDDLE portion of the wave to the MFCC FULL frame indices .
38,211
def _binary_search_intervals ( cls , intervals , index ) : start = 0 end = len ( intervals ) - 1 while start <= end : middle_index = start + ( ( end - start ) // 2 ) middle = intervals [ middle_index ] if ( middle [ 0 ] <= index ) and ( index < middle [ 1 ] ) : return middle elif middle [ 0 ] > index : end = middle_index - 1 else : start = middle_index + 1 return None
Binary search for the interval containing index assuming there is such an interval . This function should never return None .
38,212
def middle_begin ( self , index ) : if ( index < 0 ) or ( index > self . all_length ) : raise ValueError ( u"The given index is not valid" ) self . __middle_begin = index
Set the index where MIDDLE starts .
38,213
def _compute_mfcc_c_extension ( self ) : self . log ( u"Computing MFCCs using C extension..." ) try : self . log ( u"Importing cmfcc..." ) import aeneas . cmfcc . cmfcc self . log ( u"Importing cmfcc... done" ) self . __mfcc = ( aeneas . cmfcc . cmfcc . compute_from_data ( self . audio_file . audio_samples , self . audio_file . audio_sample_rate , self . rconf [ RuntimeConfiguration . MFCC_FILTERS ] , self . rconf [ RuntimeConfiguration . MFCC_SIZE ] , self . rconf [ RuntimeConfiguration . MFCC_FFT_ORDER ] , self . rconf [ RuntimeConfiguration . MFCC_LOWER_FREQUENCY ] , self . rconf [ RuntimeConfiguration . MFCC_UPPER_FREQUENCY ] , self . rconf [ RuntimeConfiguration . MFCC_EMPHASIS_FACTOR ] , self . rconf [ RuntimeConfiguration . MFCC_WINDOW_LENGTH ] , self . rconf [ RuntimeConfiguration . MFCC_WINDOW_SHIFT ] ) [ 0 ] ) . transpose ( ) self . log ( u"Computing MFCCs using C extension... done" ) return ( True , None ) except Exception as exc : self . log_exc ( u"An unexpected error occurred while running cmfcc" , exc , False , None ) return ( False , None )
Compute MFCCs using the Python C extension cmfcc .
38,214
def _compute_mfcc_pure_python ( self ) : self . log ( u"Computing MFCCs using pure Python code..." ) try : self . __mfcc = MFCC ( rconf = self . rconf , logger = self . logger ) . compute_from_data ( self . audio_file . audio_samples , self . audio_file . audio_sample_rate ) . transpose ( ) self . log ( u"Computing MFCCs using pure Python code... done" ) return ( True , None ) except Exception as exc : self . log_exc ( u"An unexpected error occurred while running pure Python code" , exc , False , None ) return ( False , None )
Compute MFCCs using the pure Python code .
38,215
def reverse ( self ) : self . log ( u"Reversing..." ) all_length = self . all_length self . __mfcc = self . __mfcc [ : , : : - 1 ] tmp = self . __middle_end self . __middle_end = all_length - self . __middle_begin self . __middle_begin = all_length - tmp if self . __mfcc_mask is not None : self . __mfcc_mask = self . __mfcc_mask [ : : - 1 ] self . __mfcc_mask_map *= - 1 self . __mfcc_mask_map += all_length - 1 self . __mfcc_mask_map = self . __mfcc_mask_map [ : : - 1 ] self . __speech_intervals = [ ( all_length - i [ 1 ] , all_length - i [ 0 ] ) for i in self . __speech_intervals [ : : - 1 ] ] self . __nonspeech_intervals = [ ( all_length - i [ 1 ] , all_length - i [ 0 ] ) for i in self . __nonspeech_intervals [ : : - 1 ] ] self . is_reversed = not self . is_reversed self . log ( u"Reversing...done" )
Reverse the audio file .
38,216
def run_vad ( self , log_energy_threshold = None , min_nonspeech_length = None , extend_before = None , extend_after = None ) : def _compute_runs ( array ) : if len ( array ) < 1 : return [ ] return numpy . split ( array , numpy . where ( numpy . diff ( array ) != 1 ) [ 0 ] + 1 ) self . log ( u"Creating VAD object" ) vad = VAD ( rconf = self . rconf , logger = self . logger ) self . log ( u"Running VAD..." ) self . __mfcc_mask = vad . run_vad ( wave_energy = self . __mfcc [ 0 ] , log_energy_threshold = log_energy_threshold , min_nonspeech_length = min_nonspeech_length , extend_before = extend_before , extend_after = extend_after ) self . __mfcc_mask_map = ( numpy . where ( self . __mfcc_mask ) ) [ 0 ] self . log ( u"Running VAD... done" ) self . log ( u"Storing speech and nonspeech intervals..." ) runs = _compute_runs ( self . __mfcc_mask_map ) self . __speech_intervals = [ ( r [ 0 ] , r [ - 1 ] ) for r in runs ] runs = _compute_runs ( ( numpy . where ( ~ self . __mfcc_mask ) ) [ 0 ] ) self . __nonspeech_intervals = [ ( r [ 0 ] , r [ - 1 ] ) for r in runs ] self . log ( u"Storing speech and nonspeech intervals... done" )
Determine which frames contain speech and nonspeech and store the resulting boolean mask internally .
38,217
def set_head_middle_tail ( self , head_length = None , middle_length = None , tail_length = None ) : for variable , name in [ ( head_length , "head_length" ) , ( middle_length , "middle_length" ) , ( tail_length , "tail_length" ) ] : if ( variable is not None ) and ( not isinstance ( variable , TimeValue ) ) : raise TypeError ( u"%s is not None or TimeValue" % name ) if ( variable is not None ) and ( variable > self . audio_length ) : raise ValueError ( u"%s is greater than the length of the audio file" % name ) self . log ( u"Setting head middle tail..." ) mws = self . rconf . mws self . log ( [ u"Before: 0 %d %d %d" , self . middle_begin , self . middle_end , self . all_length ] ) if head_length is not None : self . middle_begin = int ( head_length / mws ) if middle_length is not None : self . middle_end = self . middle_begin + int ( middle_length / mws ) elif tail_length is not None : self . middle_end = self . all_length - int ( tail_length / mws ) self . log ( [ u"After: 0 %d %d %d" , self . middle_begin , self . middle_end , self . all_length ] ) self . log ( u"Setting head middle tail... done" )
Set the HEAD MIDDLE TAIL explicitly .
38,218
def chars ( self ) : if self . lines is None : return 0 return sum ( [ len ( line ) for line in self . lines ] )
Return the number of characters of the text fragment not including the line separators .
38,219
def children_not_empty ( self ) : children = [ ] for child_node in self . fragments_tree . children_not_empty : child_text_file = self . get_subtree ( child_node ) child_text_file . set_language ( child_node . value . language ) children . append ( child_text_file ) return children
Return the direct not empty children of the root of the fragments tree as TextFile objects .
38,220
def characters ( self ) : chars = 0 for fragment in self . fragments : chars += fragment . characters return chars
The number of characters in this text file .
38,221
def add_fragment ( self , fragment , as_last = True ) : if not isinstance ( fragment , TextFragment ) : self . log_exc ( u"fragment is not an instance of TextFragment" , None , True , TypeError ) self . fragments_tree . add_child ( Tree ( value = fragment ) , as_last = as_last )
Add the given text fragment as the first or last child of the root node of the text file tree .
38,222
def set_language ( self , language ) : self . log ( [ u"Setting language: '%s'" , language ] ) for fragment in self . fragments : fragment . language = language
Set the given language for all the text fragments .
38,223
def _read_from_file ( self ) : if not gf . file_can_be_read ( self . file_path ) : self . log_exc ( u"File '%s' cannot be read" % ( self . file_path ) , None , True , OSError ) if self . file_format not in TextFileFormat . ALLOWED_VALUES : self . log_exc ( u"Text file format '%s' is not supported." % ( self . file_format ) , None , True , ValueError ) self . log ( [ u"Reading contents of file '%s'" , self . file_path ] ) with io . open ( self . file_path , "r" , encoding = "utf-8" ) as text_file : lines = text_file . readlines ( ) self . clear ( ) map_read_function = { TextFileFormat . MPLAIN : self . _read_mplain , TextFileFormat . MUNPARSED : self . _read_munparsed , TextFileFormat . PARSED : self . _read_parsed , TextFileFormat . PLAIN : self . _read_plain , TextFileFormat . SUBTITLES : self . _read_subtitles , TextFileFormat . UNPARSED : self . _read_unparsed } map_read_function [ self . file_format ] ( lines ) self . log ( [ u"Parsed %d fragments" , len ( self . fragments ) ] )
Read text fragments from file .
38,224
def _mplain_word_separator ( self ) : word_separator = gf . safe_get ( self . parameters , gc . PPN_TASK_IS_TEXT_MPLAIN_WORD_SEPARATOR , u" " ) if ( word_separator is None ) or ( word_separator == "space" ) : return u" " elif word_separator == "equal" : return u"=" elif word_separator == "pipe" : return u"|" elif word_separator == "tab" : return u"\u0009" return word_separator
Get the word separator to split words in mplain format .
38,225
def _read_mplain ( self , lines ) : self . log ( u"Parsing fragments from subtitles text format" ) word_separator = self . _mplain_word_separator ( ) self . log ( [ u"Word separator is: '%s'" , word_separator ] ) lines = [ line . strip ( ) for line in lines ] pairs = [ ] i = 1 current = 0 tree = Tree ( ) while current < len ( lines ) : line_text = lines [ current ] if len ( line_text ) > 0 : sentences = [ line_text ] following = current + 1 while ( following < len ( lines ) ) and ( len ( lines [ following ] ) > 0 ) : sentences . append ( lines [ following ] ) following += 1 paragraph_identifier = u"p%06d" % i paragraph_lines = [ u" " . join ( sentences ) ] paragraph_fragment = TextFragment ( identifier = paragraph_identifier , lines = paragraph_lines , filtered_lines = paragraph_lines ) paragraph_node = Tree ( value = paragraph_fragment ) tree . add_child ( paragraph_node ) self . log ( [ u"Paragraph %s" , paragraph_identifier ] ) j = 1 for s in sentences : sentence_identifier = paragraph_identifier + u"s%06d" % j sentence_lines = [ s ] sentence_fragment = TextFragment ( identifier = sentence_identifier , lines = sentence_lines , filtered_lines = sentence_lines ) sentence_node = Tree ( value = sentence_fragment ) paragraph_node . add_child ( sentence_node ) j += 1 self . log ( [ u" Sentence %s" , sentence_identifier ] ) k = 1 for w in [ w for w in s . split ( word_separator ) if len ( w ) > 0 ] : word_identifier = sentence_identifier + u"w%06d" % k word_lines = [ w ] word_fragment = TextFragment ( identifier = word_identifier , lines = word_lines , filtered_lines = word_lines ) word_node = Tree ( value = word_fragment ) sentence_node . add_child ( word_node ) k += 1 self . log ( [ u" Word %s" , word_identifier ] ) current = following i += 1 current += 1 self . log ( u"Storing tree" ) self . fragments_tree = tree
Read text fragments from a multilevel format text file .
38,226
def _read_subtitles ( self , lines ) : self . log ( u"Parsing fragments from subtitles text format" ) id_format = self . _get_id_format ( ) lines = [ line . strip ( ) for line in lines ] pairs = [ ] i = 1 current = 0 while current < len ( lines ) : line_text = lines [ current ] if len ( line_text ) > 0 : fragment_lines = [ line_text ] following = current + 1 while ( following < len ( lines ) ) and ( len ( lines [ following ] ) > 0 ) : fragment_lines . append ( lines [ following ] ) following += 1 identifier = id_format % i pairs . append ( ( identifier , fragment_lines ) ) current = following i += 1 current += 1 self . _create_text_fragments ( pairs )
Read text fragments from a subtitles format text file .
38,227
def _read_parsed ( self , lines ) : self . log ( u"Parsing fragments from parsed text format" ) pairs = [ ] for line in lines : pieces = line . split ( gc . PARSED_TEXT_SEPARATOR ) if len ( pieces ) == 2 : identifier = pieces [ 0 ] . strip ( ) text = pieces [ 1 ] . strip ( ) if len ( identifier ) > 0 : pairs . append ( ( identifier , [ text ] ) ) self . _create_text_fragments ( pairs )
Read text fragments from a parsed format text file .
38,228
def _read_plain ( self , lines ) : self . log ( u"Parsing fragments from plain text format" ) id_format = self . _get_id_format ( ) lines = [ line . strip ( ) for line in lines ] pairs = [ ] i = 1 for line in lines : identifier = id_format % i text = line . strip ( ) pairs . append ( ( identifier , [ text ] ) ) i += 1 self . _create_text_fragments ( pairs )
Read text fragments from a plain format text file .
38,229
def _read_unparsed ( self , lines ) : from bs4 import BeautifulSoup def filter_attributes ( ) : attributes = { } for attribute_name , filter_name in [ ( "class" , gc . PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX ) , ( "id" , gc . PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX ) ] : if filter_name in self . parameters : regex_string = self . parameters [ filter_name ] if regex_string is not None : self . log ( [ u"Regex for %s: '%s'" , attribute_name , regex_string ] ) regex = re . compile ( r".*\b" + regex_string + r"\b.*" ) attributes [ attribute_name ] = regex return attributes self . log ( u"Parsing fragments from unparsed text format" ) self . log ( u"Creating soup" ) soup = BeautifulSoup ( "\n" . join ( lines ) , "lxml" ) text_from_id = { } ids = [ ] filter_attributes = filter_attributes ( ) self . log ( [ u"Finding elements matching attributes '%s'" , filter_attributes ] ) nodes = soup . findAll ( attrs = filter_attributes ) for node in nodes : try : f_id = gf . safe_unicode ( node [ "id" ] ) f_text = gf . safe_unicode ( node . text ) text_from_id [ f_id ] = f_text ids . append ( f_id ) except KeyError : self . log_warn ( u"KeyError while parsing a node" ) id_sort = gf . safe_get ( dictionary = self . parameters , key = gc . PPN_TASK_IS_TEXT_UNPARSED_ID_SORT , default_value = IDSortingAlgorithm . UNSORTED , can_return_none = False ) self . log ( [ u"Sorting text fragments using '%s'" , id_sort ] ) sorted_ids = IDSortingAlgorithm ( id_sort ) . sort ( ids ) self . log ( u"Appending fragments" ) self . _create_text_fragments ( [ ( key , [ text_from_id [ key ] ] ) for key in sorted_ids ] )
Read text fragments from an unparsed format text file .
38,230
def _get_id_format ( self ) : id_format = gf . safe_get ( self . parameters , gc . PPN_TASK_OS_FILE_ID_REGEX , self . DEFAULT_ID_FORMAT , can_return_none = False ) try : identifier = id_format % 1 except ( TypeError , ValueError ) as exc : self . log_exc ( u"String '%s' is not a valid id format" % ( id_format ) , exc , True , ValueError ) return id_format
Return the id regex from the parameters
38,231
def _create_text_fragments ( self , pairs ) : self . log ( u"Creating TextFragment objects" ) text_filter = self . _build_text_filter ( ) for pair in pairs : self . add_fragment ( TextFragment ( identifier = pair [ 0 ] , lines = pair [ 1 ] , filtered_lines = text_filter . apply_filter ( pair [ 1 ] ) ) )
Create text fragment objects and append them to this list .
38,232
def _build_text_filter ( self ) : text_filter = TextFilter ( logger = self . logger ) self . log ( u"Created TextFilter object" ) for key , cls , param_name in [ ( gc . PPN_TASK_IS_TEXT_FILE_IGNORE_REGEX , TextFilterIgnoreRegex , "regex" ) , ( gc . PPN_TASK_IS_TEXT_FILE_TRANSLITERATE_MAP , TextFilterTransliterate , "map_file_path" ) ] : cls_name = cls . __name__ param_value = gf . safe_get ( self . parameters , key , None ) if param_value is not None : self . log ( [ u"Creating %s object..." , cls_name ] ) params = { param_name : param_value , "logger" : self . logger } try : inner_filter = cls ( ** params ) text_filter . add_filter ( inner_filter ) self . log ( [ u"Creating %s object... done" , cls_name ] ) except ValueError as exc : self . log_exc ( u"Creating %s object failed" % ( cls_name ) , exc , False , None ) return text_filter
Build a suitable TextFilter object .
38,233
def add_filter ( self , new_filter , as_last = True ) : if as_last : self . filters . append ( new_filter ) else : self . filters = [ new_filter ] + self . filters
Compose this filter with the given new_filter filter .
38,234
def apply_filter ( self , strings ) : result = strings for filt in self . filters : result = filt . apply_filter ( result ) self . log ( [ u"Applying regex: '%s' => '%s'" , strings , result ] ) return result
Apply the text filter filter to the given list of strings .
38,235
def _build_map ( self ) : if gf . is_py2_narrow_build ( ) : self . log_warn ( u"Running on a Python 2 narrow build: be aware that Unicode chars above 0x10000 cannot be replaced correctly." ) self . trans_map = { } with io . open ( self . file_path , "r" , encoding = "utf-8" ) as file_obj : contents = file_obj . read ( ) . replace ( u"\t" , u" " ) for line in contents . splitlines ( ) : if not line . startswith ( u"#" ) : line = line . strip ( ) if len ( line ) > 0 : self . _process_map_rule ( line )
Read the map file at path .
38,236
def _process_map_rule ( self , line ) : result = self . REPLACE_REGEX . match ( line ) if result is not None : what = self . _process_first_group ( result . group ( 1 ) ) replacement = self . _process_second_group ( result . group ( 2 ) ) for char in what : self . trans_map [ char ] = replacement self . log ( [ u"Adding rule: replace '%s' with '%s'" , char , replacement ] ) else : result = self . DELETE_REGEX . match ( line ) if result is not None : what = self . _process_first_group ( result . group ( 1 ) ) for char in what : self . trans_map [ char ] = "" self . log ( [ u"Adding rule: delete '%s'" , char ] )
Process the line string containing a map rule .
38,237
def _process_first_group ( self , group ) : if "-" in group : if len ( group . split ( "-" ) ) == 2 : arr = group . split ( "-" ) start = self . _parse_codepoint ( arr [ 0 ] ) end = self . _parse_codepoint ( arr [ 1 ] ) else : start = self . _parse_codepoint ( group ) end = start result = [ ] if ( start > - 1 ) and ( end >= start ) : for index in range ( start , end + 1 ) : result . append ( gf . safe_unichr ( index ) ) return result
Process the first group of a rule .
38,238
def load_job ( self , job ) : if not isinstance ( job , Job ) : self . log_exc ( u"job is not an instance of Job" , None , True , ExecuteJobInputError ) self . job = job
Load the job from the given Job object .
38,239
def execute ( self ) : self . log ( u"Executing job" ) if self . job is None : self . log_exc ( u"The job object is None" , None , True , ExecuteJobExecutionError ) if len ( self . job ) == 0 : self . log_exc ( u"The job has no tasks" , None , True , ExecuteJobExecutionError ) job_max_tasks = self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] if ( job_max_tasks > 0 ) and ( len ( self . job ) > job_max_tasks ) : self . log_exc ( u"The Job has %d Tasks, more than the maximum allowed (%d)." % ( len ( self . job ) , job_max_tasks ) , None , True , ExecuteJobExecutionError ) self . log ( [ u"Number of tasks: '%d'" , len ( self . job ) ] ) for task in self . job . tasks : try : custom_id = task . configuration [ "custom_id" ] self . log ( [ u"Executing task '%s'..." , custom_id ] ) executor = ExecuteTask ( task , rconf = self . rconf , logger = self . logger ) executor . execute ( ) self . log ( [ u"Executing task '%s'... done" , custom_id ] ) except Exception as exc : self . log_exc ( u"Error while executing task '%s'" % ( custom_id ) , exc , True , ExecuteJobExecutionError ) self . log ( u"Executing task: succeeded" ) self . log ( u"Executing job: succeeded" )
Execute the job that is execute all of its tasks .
38,240
def write_output_container ( self , output_directory_path ) : self . log ( u"Writing output container for this job" ) if self . job is None : self . log_exc ( u"The job object is None" , None , True , ExecuteJobOutputError ) if len ( self . job ) == 0 : self . log_exc ( u"The job has no tasks" , None , True , ExecuteJobOutputError ) self . log ( [ u"Number of tasks: '%d'" , len ( self . job ) ] ) self . tmp_directory = gf . tmp_directory ( root = self . rconf [ RuntimeConfiguration . TMP_PATH ] ) self . log ( [ u"Created temporary directory '%s'" , self . tmp_directory ] ) for task in self . job . tasks : custom_id = task . configuration [ "custom_id" ] if task . sync_map_file_path is None : self . log_exc ( u"Task '%s' has sync_map_file_path not set" % ( custom_id ) , None , True , ExecuteJobOutputError ) if task . sync_map is None : self . log_exc ( u"Task '%s' has sync_map not set" % ( custom_id ) , None , True , ExecuteJobOutputError ) try : self . log ( [ u"Outputting sync map for task '%s'..." , custom_id ] ) task . output_sync_map_file ( self . tmp_directory ) self . log ( [ u"Outputting sync map for task '%s'... done" , custom_id ] ) except Exception as exc : self . log_exc ( u"Error while outputting sync map for task '%s'" % ( custom_id ) , None , True , ExecuteJobOutputError ) output_container_format = self . job . configuration [ "o_container_format" ] self . log ( [ u"Output container format: '%s'" , output_container_format ] ) output_file_name = self . job . configuration [ "o_name" ] if ( ( output_container_format != ContainerFormat . UNPACKED ) and ( not output_file_name . endswith ( output_container_format ) ) ) : self . log ( u"Adding extension to output_file_name" ) output_file_name += "." + output_container_format self . log ( [ u"Output file name: '%s'" , output_file_name ] ) output_file_path = gf . norm_join ( output_directory_path , output_file_name ) self . log ( [ u"Output file path: '%s'" , output_file_path ] ) try : self . log ( u"Compressing..." ) container = Container ( output_file_path , output_container_format , logger = self . logger ) container . compress ( self . tmp_directory ) self . log ( u"Compressing... done" ) self . log ( [ u"Created output file: '%s'" , output_file_path ] ) self . log ( u"Writing output container for this job: succeeded" ) self . clean ( False ) return output_file_path except Exception as exc : self . clean ( False ) self . log_exc ( u"Error while compressing" , exc , True , ExecuteJobOutputError ) return None
Write the output container for this job .
38,241
def clean ( self , remove_working_directory = True ) : if remove_working_directory is not None : self . log ( u"Removing working directory... " ) gf . delete_directory ( self . working_directory ) self . working_directory = None self . log ( u"Removing working directory... done" ) self . log ( u"Removing temporary directory... " ) gf . delete_directory ( self . tmp_directory ) self . tmp_directory = None self . log ( u"Removing temporary directory... done" )
Remove the temporary directory . If remove_working_directory is True remove the working directory as well otherwise just remove the temporary directory .
38,242
def _read_syncmap_file ( self , path , extension , text = False ) : syncmap = SyncMap ( logger = self . logger ) syncmap . read ( extension , path , parameters = None ) if text : return [ ( f . begin , f . end , u" " . join ( f . text_fragment . lines ) ) for f in syncmap . fragments ] return [ ( f . begin , f . end , f . text_fragment . identifier ) for f in syncmap . fragments ]
Read labels from a SyncMap file
38,243
def has_adjacent_leaves_only ( self ) : leaves = self . leaves ( ) for i in range ( len ( leaves ) - 1 ) : current_interval = leaves [ i ] . interval next_interval = leaves [ i + 1 ] . interval if not current_interval . is_adjacent_before ( next_interval ) : return False return True
Return True if the sync map fragments which are the leaves of the sync map tree are all adjacent .
38,244
def json_string ( self ) : def visit_children ( node ) : output_fragments = [ ] for child in node . children_not_empty : fragment = child . value text = fragment . text_fragment output_fragments . append ( { "id" : text . identifier , "language" : text . language , "lines" : text . lines , "begin" : gf . time_to_ssmmm ( fragment . begin ) , "end" : gf . time_to_ssmmm ( fragment . end ) , "children" : visit_children ( child ) } ) return output_fragments output_fragments = visit_children ( self . fragments_tree ) return gf . safe_unicode ( json . dumps ( { "fragments" : output_fragments } , indent = 1 , sort_keys = True ) )
Return a JSON representation of the sync map .
38,245
def add_fragment ( self , fragment , as_last = True ) : if not isinstance ( fragment , SyncMapFragment ) : self . log_exc ( u"fragment is not an instance of SyncMapFragment" , None , True , TypeError ) self . fragments_tree . add_child ( Tree ( value = fragment ) , as_last = as_last )
Add the given sync map fragment as the first or last child of the root node of the sync map tree .
38,246
def output_html_for_tuning ( self , audio_file_path , output_file_path , parameters = None ) : if not gf . file_can_be_written ( output_file_path ) : self . log_exc ( u"Cannot output HTML file '%s'. Wrong permissions?" % ( output_file_path ) , None , True , OSError ) if parameters is None : parameters = { } audio_file_path_absolute = gf . fix_slash ( os . path . abspath ( audio_file_path ) ) template_path_absolute = gf . absolute_path ( self . FINETUNEAS_PATH , __file__ ) with io . open ( template_path_absolute , "r" , encoding = "utf-8" ) as file_obj : template = file_obj . read ( ) for repl in self . FINETUNEAS_REPLACEMENTS : template = template . replace ( repl [ 0 ] , repl [ 1 ] ) template = template . replace ( self . FINETUNEAS_REPLACE_AUDIOFILEPATH , u"audioFilePath = \"file://%s\";" % audio_file_path_absolute ) template = template . replace ( self . FINETUNEAS_REPLACE_FRAGMENTS , u"fragments = (%s).fragments;" % self . json_string ) if gc . PPN_TASK_OS_FILE_FORMAT in parameters : output_format = parameters [ gc . PPN_TASK_OS_FILE_FORMAT ] if output_format in self . FINETUNEAS_ALLOWED_FORMATS : template = template . replace ( self . FINETUNEAS_REPLACE_OUTPUT_FORMAT , u"outputFormat = \"%s\";" % output_format ) if output_format == "smil" : for key , placeholder , replacement in [ ( gc . PPN_TASK_OS_FILE_SMIL_AUDIO_REF , self . FINETUNEAS_REPLACE_SMIL_AUDIOREF , "audioref = \"%s\";" ) , ( gc . PPN_TASK_OS_FILE_SMIL_PAGE_REF , self . FINETUNEAS_REPLACE_SMIL_PAGEREF , "pageref = \"%s\";" ) , ] : if key in parameters : template = template . replace ( placeholder , replacement % parameters [ key ] ) with io . open ( output_file_path , "w" , encoding = "utf-8" ) as file_obj : file_obj . write ( template )
Output an HTML file for fine tuning the sync map manually .
38,247
def _create_dct_matrix ( self ) : self . s2dct = numpy . zeros ( ( self . mfcc_size , self . filter_bank_size ) ) for i in range ( 0 , self . mfcc_size ) : freq = numpy . pi * float ( i ) / self . filter_bank_size self . s2dct [ i ] = numpy . cos ( freq * numpy . arange ( 0.5 , 0.5 + self . filter_bank_size , 1.0 , 'float64' ) ) self . s2dct [ : , 0 ] *= 0.5 self . s2dct = self . s2dct . transpose ( )
Create the not - quite - DCT matrix as used by Sphinx and store it in self . s2dct .
38,248
def _create_mel_filter_bank ( self ) : self . filters = numpy . zeros ( ( 1 + ( self . fft_order // 2 ) , self . filter_bank_size ) , 'd' ) dfreq = float ( self . sample_rate ) / self . fft_order nyquist_frequency = self . sample_rate / 2 if self . upper_frequency > nyquist_frequency : self . log_exc ( u"Upper frequency %f exceeds Nyquist frequency %f" % ( self . upper_frequency , nyquist_frequency ) , None , True , ValueError ) melmax = MFCC . _hz2mel ( self . upper_frequency ) melmin = MFCC . _hz2mel ( self . lower_frequency ) dmelbw = ( melmax - melmin ) / ( self . filter_bank_size + 1 ) filt_edge = MFCC . _mel2hz ( melmin + dmelbw * numpy . arange ( self . filter_bank_size + 2 , dtype = 'd' ) ) for whichfilt in range ( 0 , self . filter_bank_size ) : leftfr = int ( round ( filt_edge [ whichfilt ] / dfreq ) ) centerfr = int ( round ( filt_edge [ whichfilt + 1 ] / dfreq ) ) rightfr = int ( round ( filt_edge [ whichfilt + 2 ] / dfreq ) ) fwidth = ( rightfr - leftfr ) * dfreq height = 2.0 / fwidth if centerfr != leftfr : leftslope = height / ( centerfr - leftfr ) else : leftslope = 0 freq = leftfr + 1 while freq < centerfr : self . filters [ freq , whichfilt ] = ( freq - leftfr ) * leftslope freq = freq + 1 if freq == centerfr : self . filters [ freq , whichfilt ] = height freq = freq + 1 if centerfr != rightfr : rightslope = height / ( centerfr - rightfr ) while freq < rightfr : self . filters [ freq , whichfilt ] = ( freq - rightfr ) * rightslope freq = freq + 1
Create the Mel filter bank and store it in self . filters .
38,249
def _pre_emphasis ( self ) : self . data = numpy . append ( self . data [ 0 ] , self . data [ 1 : ] - self . emphasis_factor * self . data [ : - 1 ] )
Pre - emphasize the entire signal at once by self . emphasis_factor overwriting self . data .
38,250
def compute_from_data ( self , data , sample_rate ) : def _process_frame ( self , frame ) : frame *= self . hamming_window fft = numpy . fft . rfft ( frame , self . fft_order ) power = numpy . square ( numpy . absolute ( fft ) ) return numpy . log ( numpy . dot ( power , self . filters ) . clip ( self . CUTOFF , numpy . inf ) ) if len ( data . shape ) != 1 : self . log_exc ( u"The audio data must be a 1D numpy array (mono)." , None , True , ValueError ) if len ( data ) < 1 : self . log_exc ( u"The audio data must not be empty." , None , True , ValueError ) self . data = data self . sample_rate = sample_rate data_length = len ( self . data ) frame_length = int ( self . window_length * self . sample_rate ) frame_length_padded = max ( frame_length , self . fft_order ) frame_shift = int ( self . window_shift * self . sample_rate ) number_of_frames = int ( ( 1.0 * data_length ) / frame_shift ) self . hamming_window = numpy . hamming ( frame_length_padded ) self . _create_mel_filter_bank ( ) self . _pre_emphasis ( ) mfcc = numpy . zeros ( ( number_of_frames , self . filter_bank_size ) , 'float64' ) for frame_index in range ( number_of_frames ) : frame_start = frame_index * frame_shift frame_end = min ( frame_start + frame_length_padded , data_length ) frame = numpy . zeros ( frame_length_padded ) frame [ 0 : ( frame_end - frame_start ) ] = self . data [ frame_start : frame_end ] mfcc [ frame_index ] = _process_frame ( self , frame ) return numpy . dot ( mfcc , self . s2dct ) / self . filter_bank_size
Compute MFCCs for the given audio data .
38,251
def write ( filename , rate , data ) : if hasattr ( filename , 'write' ) : fid = filename else : fid = open ( filename , 'wb' ) try : dkind = data . dtype . kind if not ( dkind == 'i' or dkind == 'f' or ( dkind == 'u' and data . dtype . itemsize == 1 ) ) : raise ValueError ( "Unsupported data type '%s'" % data . dtype ) fid . write ( b'RIFF' ) fid . write ( b'\x00\x00\x00\x00' ) fid . write ( b'WAVE' ) fid . write ( b'fmt ' ) if dkind == 'f' : comp = 3 else : comp = 1 if data . ndim == 1 : noc = 1 else : noc = data . shape [ 1 ] bits = data . dtype . itemsize * 8 sbytes = rate * ( bits // 8 ) * noc ba = noc * ( bits // 8 ) fid . write ( struct . pack ( '<ihHIIHH' , 16 , comp , noc , rate , sbytes , ba , bits ) ) fid . write ( b'data' ) fid . write ( struct . pack ( '<i' , data . nbytes ) ) if data . dtype . byteorder == '>' or ( data . dtype . byteorder == '=' and sys . byteorder == 'big' ) : data = data . byteswap ( ) _array_tofile ( fid , data ) size = fid . tell ( ) fid . seek ( 4 ) fid . write ( struct . pack ( '<i' , size - 8 ) ) finally : if not hasattr ( filename , 'write' ) : fid . close ( ) else : fid . seek ( 0 )
Write a numpy array as a WAV file
38,252
def safe_print ( msg ) : try : print ( msg ) except UnicodeEncodeError : try : encoded = msg . encode ( sys . stdout . encoding , "replace" ) decoded = encoded . decode ( sys . stdout . encoding , "replace" ) print ( decoded ) except ( UnicodeDecodeError , UnicodeEncodeError ) : print ( u"[ERRO] An unexpected error happened while printing to stdout." ) print ( u"[ERRO] Please check that your file/string encoding matches the shell encoding." ) print ( u"[ERRO] If possible, set your shell encoding to UTF-8 and convert any files with legacy encodings." )
Safely print a given Unicode string to stdout possibly replacing characters non - printable in the current stdout encoding .
38,253
def print_error ( msg , color = True ) : if color and is_posix ( ) : safe_print ( u"%s[ERRO] %s%s" % ( ANSI_ERROR , msg , ANSI_END ) ) else : safe_print ( u"[ERRO] %s" % ( msg ) )
Print an error message .
38,254
def print_success ( msg , color = True ) : if color and is_posix ( ) : safe_print ( u"%s[INFO] %s%s" % ( ANSI_OK , msg , ANSI_END ) ) else : safe_print ( u"[INFO] %s" % ( msg ) )
Print a success message .
38,255
def print_warning ( msg , color = True ) : if color and is_posix ( ) : safe_print ( u"%s[WARN] %s%s" % ( ANSI_WARNING , msg , ANSI_END ) ) else : safe_print ( u"[WARN] %s" % ( msg ) )
Print a warning message .
38,256
def file_extension ( path ) : if path is None : return None ext = os . path . splitext ( os . path . basename ( path ) ) [ 1 ] if ext . startswith ( "." ) : ext = ext [ 1 : ] return ext
Return the file extension .
38,257
def mimetype_from_path ( path ) : extension = file_extension ( path ) if extension is not None : extension = extension . lower ( ) if extension in gc . MIMETYPE_MAP : return gc . MIMETYPE_MAP [ extension ] return None
Return a mimetype from the file extension .
38,258
def file_name_without_extension ( path ) : if path is None : return None return os . path . splitext ( os . path . basename ( path ) ) [ 0 ]
Return the file name without extension .
38,259
def safe_float ( string , default = None ) : value = default try : value = float ( string ) except TypeError : pass except ValueError : pass return value
Safely parse a string into a float .
38,260
def safe_int ( string , default = None ) : value = safe_float ( string , default ) if value is not None : value = int ( value ) return value
Safely parse a string into an int .
38,261
def safe_get ( dictionary , key , default_value , can_return_none = True ) : return_value = default_value try : return_value = dictionary [ key ] if ( return_value is None ) and ( not can_return_none ) : return_value = default_value except ( KeyError , TypeError ) : pass return return_value
Safely perform a dictionary get returning the default value if the key is not found .
38,262
def norm_join ( prefix , suffix ) : if ( prefix is None ) and ( suffix is None ) : return "." if prefix is None : return os . path . normpath ( suffix ) if suffix is None : return os . path . normpath ( prefix ) return os . path . normpath ( os . path . join ( prefix , suffix ) )
Join prefix and suffix paths and return the resulting path normalized .
38,263
def copytree ( source_directory , destination_directory , ignore = None ) : if os . path . isdir ( source_directory ) : if not os . path . isdir ( destination_directory ) : os . makedirs ( destination_directory ) files = os . listdir ( source_directory ) if ignore is not None : ignored = ignore ( source_directory , files ) else : ignored = set ( ) for f in files : if f not in ignored : copytree ( os . path . join ( source_directory , f ) , os . path . join ( destination_directory , f ) , ignore ) else : shutil . copyfile ( source_directory , destination_directory )
Recursively copy the contents of a source directory into a destination directory . Both directories must exist .
38,264
def ensure_parent_directory ( path , ensure_parent = True ) : parent_directory = os . path . abspath ( path ) if ensure_parent : parent_directory = os . path . dirname ( parent_directory ) if not os . path . exists ( parent_directory ) : try : os . makedirs ( parent_directory ) except ( IOError , OSError ) : raise OSError ( u"Directory '%s' cannot be created" % parent_directory )
Ensures the parent directory exists .
38,265
def can_run_c_extension ( name = None ) : def can_run_cdtw ( ) : try : import aeneas . cdtw . cdtw return True except ImportError : return False def can_run_cmfcc ( ) : try : import aeneas . cmfcc . cmfcc return True except ImportError : return False def can_run_cew ( ) : try : import aeneas . cew . cew return True except ImportError : return False def can_run_cfw ( ) : try : import aeneas . cfw . cfw return True except ImportError : return False if name == "cdtw" : return can_run_cdtw ( ) elif name == "cmfcc" : return can_run_cmfcc ( ) elif name == "cew" : return can_run_cew ( ) elif name == "cfw" : return can_run_cfw ( ) else : return can_run_cdtw ( ) and can_run_cmfcc ( ) and can_run_cew ( )
Determine whether the given Python C extension loads correctly .
38,266
def run_c_extension_with_fallback ( log_function , extension , c_function , py_function , args , rconf ) : computed = False if not rconf [ u"c_extensions" ] : log_function ( u"C extensions disabled" ) elif extension not in rconf : log_function ( [ u"C extension '%s' not recognized" , extension ] ) elif not rconf [ extension ] : log_function ( [ u"C extension '%s' disabled" , extension ] ) else : log_function ( [ u"C extension '%s' enabled" , extension ] ) if c_function is None : log_function ( u"C function is None" ) elif can_run_c_extension ( extension ) : log_function ( [ u"C extension '%s' enabled and it can be loaded" , extension ] ) computed , result = c_function ( * args ) else : log_function ( [ u"C extension '%s' enabled but it cannot be loaded" , extension ] ) if not computed : if py_function is None : log_function ( u"Python function is None" ) else : log_function ( u"Running the pure Python code" ) computed , result = py_function ( * args ) if not computed : raise RuntimeError ( u"Both the C extension and the pure Python code failed. (Wrong arguments? Input too big?)" ) return result
Run a function calling a C extension falling back to a pure Python function if the former does not succeed .
38,267
def file_can_be_read ( path ) : if path is None : return False try : with io . open ( path , "rb" ) as test_file : pass return True except ( IOError , OSError ) : pass return False
Return True if the file at the given path can be read .
38,268
def file_can_be_written ( path ) : if path is None : return False try : with io . open ( path , "wb" ) as test_file : pass delete_file ( None , path ) return True except ( IOError , OSError ) : pass return False
Return True if a file can be written at the given path .
38,269
def read_file_bytes ( input_file_path ) : contents = None try : with io . open ( input_file_path , "rb" ) as input_file : contents = input_file . read ( ) except : pass return contents
Read the file at the given file path and return its contents as a byte string or None if an error occurred .
38,270
def human_readable_number ( number , suffix = "" ) : for unit in [ "" , "K" , "M" , "G" , "T" , "P" , "E" , "Z" ] : if abs ( number ) < 1024.0 : return "%3.1f%s%s" % ( number , unit , suffix ) number /= 1024.0 return "%.1f%s%s" % ( number , "Y" , suffix )
Format the given number into a human - readable string .
38,271
def safe_unichr ( codepoint ) : if is_py2_narrow_build ( ) : return ( "\\U%08x" % codepoint ) . decode ( "unicode-escape" ) elif PY2 : return unichr ( codepoint ) return chr ( codepoint )
Safely return a Unicode string of length one containing the Unicode character with given codepoint .
38,272
def safe_unicode_stdin ( string ) : if string is None : return None if is_bytes ( string ) : if FROZEN : return string . decode ( "utf-8" ) try : return string . decode ( sys . stdin . encoding ) except UnicodeDecodeError : return string . decode ( sys . stdin . encoding , "replace" ) except : return string . decode ( "utf-8" ) return string
Safely convert the given string to a Unicode string decoding using sys . stdin . encoding if needed .
38,273
def get ( self , fragment_info ) : if not self . is_cached ( fragment_info ) : raise KeyError ( u"Attempt to get text not cached" ) return self . cache [ fragment_info ]
Get the value associated with the given key .
38,274
def clear ( self ) : self . log ( u"Clearing cache..." ) for file_handler , file_info in self . cache . values ( ) : self . log ( [ u" Removing file '%s'" , file_info ] ) gf . delete_file ( file_handler , file_info ) self . _initialize_cache ( ) self . log ( u"Clearing cache... done" )
Clear the cache and remove all the files from disk .
38,275
def _language_to_voice_code ( self , language ) : voice_code = self . rconf [ RuntimeConfiguration . TTS_VOICE_CODE ] if voice_code is None : try : voice_code = self . LANGUAGE_TO_VOICE_CODE [ language ] except KeyError as exc : self . log_exc ( u"Language code '%s' not found in LANGUAGE_TO_VOICE_CODE" % ( language ) , exc , False , None ) self . log_warn ( u"Using the language code as the voice code" ) voice_code = language else : self . log ( u"TTS voice override in rconf" ) self . log ( [ u"Language to voice code: '%s' => '%s'" , language , voice_code ] ) return voice_code
Translate a language value to a voice code .
38,276
def clear_cache ( self ) : if self . use_cache : self . log ( u"Requested to clear TTS cache" ) self . cache . clear ( )
Clear the TTS cache removing all cache files from disk .
38,277
def set_subprocess_arguments ( self , subprocess_arguments ) : self . subprocess_arguments = subprocess_arguments self . log ( [ u"Subprocess arguments: %s" , subprocess_arguments ] )
Set the list of arguments that the wrapper will pass to subprocess .
38,278
def synthesize_multiple ( self , text_file , output_file_path , quit_after = None , backwards = False ) : if text_file is None : self . log_exc ( u"text_file is None" , None , True , TypeError ) if len ( text_file ) < 1 : self . log_exc ( u"The text file has no fragments" , None , True , ValueError ) if text_file . chars == 0 : self . log_exc ( u"All fragments in the text file are empty" , None , True , ValueError ) if not self . rconf [ RuntimeConfiguration . ALLOW_UNLISTED_LANGUAGES ] : for fragment in text_file . fragments : if fragment . language not in self . LANGUAGE_TO_VOICE_CODE : self . log_exc ( u"Language '%s' is not supported by the selected TTS engine" % ( fragment . language ) , None , True , ValueError ) for fragment in text_file . fragments : for line in fragment . lines : if not gf . is_unicode ( line ) : self . log_exc ( u"The text file contain a line which is not a Unicode string" , None , True , TypeError ) if quit_after is not None : self . log ( [ u"Quit after reaching %.3f" , quit_after ] ) if backwards : self . log ( u"Synthesizing backwards" ) if not gf . file_can_be_written ( output_file_path ) : self . log_exc ( u"Cannot write to output file '%s'" % ( output_file_path ) , None , True , OSError ) if self . HAS_PYTHON_CALL : self . log ( u"Calling TTS engine via Python" ) try : computed , result = self . _synthesize_multiple_python ( text_file , output_file_path , quit_after , backwards ) if computed : self . log ( u"The _synthesize_multiple_python call was successful, returning anchors" ) return result else : self . log ( u"The _synthesize_multiple_python call failed" ) except Exception as exc : self . log_exc ( u"An unexpected error occurred while calling _synthesize_multiple_python" , exc , False , None ) self . log ( u"Calling TTS engine via C extension or subprocess" ) c_extension_function = self . _synthesize_multiple_c_extension if self . HAS_C_EXTENSION_CALL else None subprocess_function = self . _synthesize_multiple_subprocess if self . HAS_SUBPROCESS_CALL else None return gf . run_c_extension_with_fallback ( self . log , self . C_EXTENSION_NAME , c_extension_function , subprocess_function , ( text_file , output_file_path , quit_after , backwards ) , rconf = self . rconf )
Synthesize the text contained in the given fragment list into a WAVE file .
38,279
def _synthesize_multiple_python ( self , text_file , output_file_path , quit_after = None , backwards = False ) : self . log ( u"Synthesizing multiple via a Python call..." ) ret = self . _synthesize_multiple_generic ( helper_function = self . _synthesize_single_python_helper , text_file = text_file , output_file_path = output_file_path , quit_after = quit_after , backwards = backwards ) self . log ( u"Synthesizing multiple via a Python call... done" ) return ret
Synthesize multiple fragments via a Python call .
38,280
def _synthesize_multiple_subprocess ( self , text_file , output_file_path , quit_after = None , backwards = False ) : self . log ( u"Synthesizing multiple via subprocess..." ) ret = self . _synthesize_multiple_generic ( helper_function = self . _synthesize_single_subprocess_helper , text_file = text_file , output_file_path = output_file_path , quit_after = quit_after , backwards = backwards ) self . log ( u"Synthesizing multiple via subprocess... done" ) return ret
Synthesize multiple fragments via subprocess .
38,281
def _read_audio_data ( self , file_path ) : try : self . log ( u"Reading audio data..." ) audio_file = AudioFile ( file_path = file_path , file_format = self . OUTPUT_AUDIO_FORMAT , rconf = self . rconf , logger = self . logger ) audio_file . read_samples_from_file ( ) self . log ( [ u"Duration of '%s': %f" , file_path , audio_file . audio_length ] ) self . log ( u"Reading audio data... done" ) return ( True , ( audio_file . audio_length , audio_file . audio_sample_rate , audio_file . audio_format , audio_file . audio_samples ) ) except ( AudioFileUnsupportedFormatError , OSError ) as exc : self . log_exc ( u"An unexpected error occurred while reading audio data" , exc , True , None ) return ( False , None )
Read audio data from file .
38,282
def _loop_no_cache ( self , helper_function , num , fragment ) : self . log ( [ u"Examining fragment %d (no cache)..." , num ] ) voice_code = self . _language_to_voice_code ( fragment . language ) self . log ( u"Calling helper function" ) succeeded , data = helper_function ( text = fragment . filtered_text , voice_code = voice_code , output_file_path = None , return_audio_data = True ) if not succeeded : self . log_crit ( u"An unexpected error occurred in helper_function" ) return ( False , None ) self . log ( [ u"Examining fragment %d (no cache)... done" , num ] ) return ( True , data )
Synthesize all fragments without using the cache
38,283
def _loop_use_cache ( self , helper_function , num , fragment ) : self . log ( [ u"Examining fragment %d (cache)..." , num ] ) fragment_info = ( fragment . language , fragment . filtered_text ) if self . cache . is_cached ( fragment_info ) : self . log ( u"Fragment cached: retrieving audio data from cache" ) file_handler , file_path = self . cache . get ( fragment_info ) self . log ( [ u"Reading cached fragment at '%s'..." , file_path ] ) succeeded , data = self . _read_audio_data ( file_path ) if not succeeded : self . log_crit ( u"An unexpected error occurred while reading cached audio file" ) return ( False , None ) self . log ( [ u"Reading cached fragment at '%s'... done" , file_path ] ) else : self . log ( u"Fragment not cached: synthesizing and caching" ) file_info = gf . tmp_file ( suffix = u".cache.wav" , root = self . rconf [ RuntimeConfiguration . TMP_PATH ] ) file_handler , file_path = file_info self . log ( [ u"Synthesizing fragment to '%s'..." , file_path ] ) voice_code = self . _language_to_voice_code ( fragment . language ) self . log ( u"Calling helper function" ) succeeded , data = helper_function ( text = fragment . filtered_text , voice_code = voice_code , output_file_path = file_path , return_audio_data = True ) if not succeeded : self . log_crit ( u"An unexpected error occurred in helper_function" ) return ( False , None ) self . log ( [ u"Synthesizing fragment to '%s'... done" , file_path ] ) duration , sr_nu , enc_nu , samples = data if duration > 0 : self . log ( u"Fragment has > 0 duration, adding it to cache" ) self . cache . add ( fragment_info , file_info ) self . log ( u"Added fragment to cache" ) else : self . log ( u"Fragment has zero duration, not adding it to cache" ) self . log ( [ u"Closing file handler for cached output file path '%s'" , file_path ] ) gf . close_file_handler ( file_handler ) self . log ( [ u"Examining fragment %d (cache)... done" , num ] ) return ( True , data )
Synthesize all fragments using the cache
38,284
def adjust ( self , aba_parameters , boundary_indices , real_wave_mfcc , text_file , allow_arbitrary_shift = False ) : self . log ( u"Called adjust" ) if boundary_indices is None : self . log_exc ( u"boundary_indices is None" , None , True , TypeError ) if not isinstance ( real_wave_mfcc , AudioFileMFCC ) : self . log_exc ( u"real_wave_mfcc is not an AudioFileMFCC object" , None , True , TypeError ) if not isinstance ( text_file , TextFile ) : self . log_exc ( u"text_file is not a TextFile object" , None , True , TypeError ) nozero = aba_parameters [ "nozero" ] ns_min , ns_string = aba_parameters [ "nonspeech" ] algorithm , algo_parameters = aba_parameters [ "algorithm" ] self . log ( u" Converting boundary indices to fragment list..." ) begin = real_wave_mfcc . middle_begin * real_wave_mfcc . rconf . mws end = real_wave_mfcc . middle_end * real_wave_mfcc . rconf . mws time_values = [ begin ] + list ( boundary_indices * self . mws ) + [ end ] self . intervals_to_fragment_list ( text_file = text_file , time_values = time_values ) self . log ( u" Converting boundary indices to fragment list... done" ) self . log ( u" Processing fragments with zero length..." ) self . _process_zero_length ( nozero , allow_arbitrary_shift ) self . log ( u" Processing fragments with zero length... done" ) self . log ( u" Processing nonspeech fragments..." ) self . _process_long_nonspeech ( ns_min , ns_string , real_wave_mfcc ) self . log ( u" Processing nonspeech fragments... done" ) self . log ( u" Adjusting..." ) ALGORITHM_MAP = { self . AFTERCURRENT : self . _adjust_aftercurrent , self . AUTO : self . _adjust_auto , self . BEFORENEXT : self . _adjust_beforenext , self . OFFSET : self . _adjust_offset , self . PERCENT : self . _adjust_percent , self . RATE : self . _adjust_rate , self . RATEAGGRESSIVE : self . _adjust_rate_aggressive , } ALGORITHM_MAP [ algorithm ] ( real_wave_mfcc , algo_parameters ) self . log ( u" Adjusting... done" ) self . log ( u" Smoothing..." ) self . _smooth_fragment_list ( real_wave_mfcc . audio_length , ns_string ) self . log ( u" Smoothing... done" ) return self . smflist
Adjust the boundaries of the text map using the algorithm and parameters specified in the constructor storing the sync map fragment list internally .
38,285
def append_fragment_list_to_sync_root ( self , sync_root ) : if not isinstance ( sync_root , Tree ) : self . log_exc ( u"sync_root is not a Tree object" , None , True , TypeError ) self . log ( u"Appending fragment list to sync root..." ) for fragment in self . smflist : sync_root . add_child ( Tree ( value = fragment ) ) self . log ( u"Appending fragment list to sync root... done" )
Append the sync map fragment list to the given node from a sync map tree .
38,286
def _process_zero_length ( self , nozero , allow_arbitrary_shift ) : self . log ( u"Called _process_zero_length" ) if not nozero : self . log ( u"Processing zero length intervals not requested: returning" ) return self . log ( u"Processing zero length intervals requested" ) self . log ( u" Checking and fixing..." ) duration = self . rconf [ RuntimeConfiguration . ABA_NO_ZERO_DURATION ] self . log ( [ u" Requested no zero duration: %.3f" , duration ] ) if not allow_arbitrary_shift : self . log ( u" No arbitrary shift => taking max with mws" ) duration = self . rconf . mws . geq_multiple ( duration ) self . log ( [ u" Actual no zero duration: %.3f" , duration ] ) max_index = len ( self . smflist ) - 1 self . smflist . fix_zero_length_fragments ( duration = duration , min_index = 1 , max_index = max_index ) self . log ( u" Checking and fixing... done" ) if self . smflist . has_zero_length_fragments ( 1 , max_index ) : self . log_warn ( u" The fragment list still has fragments with zero length" ) else : self . log ( u" The fragment list does not have fragments with zero length" )
If nozero is True modify the sync map fragment list so that no fragment will have zero length .
38,287
def main ( ) : if FROZEN : HydraCLI ( invoke = "aeneas-cli" ) . run ( arguments = sys . argv , show_help = False ) else : HydraCLI ( invoke = "pyinstaller-aeneas-cli.py" ) . run ( arguments = sys . argv , show_help = False )
This is the aeneas - cli hydra script to be compiled by pyinstaller .
38,288
def run_vad ( self , wave_energy , log_energy_threshold = None , min_nonspeech_length = None , extend_before = None , extend_after = None ) : self . log ( u"Computing VAD for wave" ) mfcc_window_shift = self . rconf . mws self . log ( [ u"MFCC window shift (s): %.3f" , mfcc_window_shift ] ) if log_energy_threshold is None : log_energy_threshold = self . rconf [ RuntimeConfiguration . VAD_LOG_ENERGY_THRESHOLD ] self . log ( [ u"Log energy threshold: %.3f" , log_energy_threshold ] ) if min_nonspeech_length is None : min_nonspeech_length = int ( self . rconf [ RuntimeConfiguration . VAD_MIN_NONSPEECH_LENGTH ] / mfcc_window_shift ) self . log ( [ u"Min nonspeech length (s): %.3f" , self . rconf [ RuntimeConfiguration . VAD_MIN_NONSPEECH_LENGTH ] ] ) if extend_before is None : extend_before = int ( self . rconf [ RuntimeConfiguration . VAD_EXTEND_SPEECH_INTERVAL_BEFORE ] / mfcc_window_shift ) self . log ( [ u"Extend speech before (s): %.3f" , self . rconf [ RuntimeConfiguration . VAD_EXTEND_SPEECH_INTERVAL_BEFORE ] ] ) if extend_after is None : extend_after = int ( self . rconf [ RuntimeConfiguration . VAD_EXTEND_SPEECH_INTERVAL_AFTER ] / mfcc_window_shift ) self . log ( [ u"Extend speech after (s): %.3f" , self . rconf [ RuntimeConfiguration . VAD_EXTEND_SPEECH_INTERVAL_AFTER ] ] ) energy_length = len ( wave_energy ) energy_threshold = numpy . min ( wave_energy ) + log_energy_threshold self . log ( [ u"Min nonspeech length (frames): %d" , min_nonspeech_length ] ) self . log ( [ u"Extend speech before (frames): %d" , extend_before ] ) self . log ( [ u"Extend speech after (frames): %d" , extend_after ] ) self . log ( [ u"Energy vector length (frames): %d" , energy_length ] ) self . log ( [ u"Energy threshold (log): %.3f" , energy_threshold ] ) self . log ( u"Determining initial labels..." ) mask = wave_energy >= energy_threshold windows = self . _rolling_window ( mask , min_nonspeech_length ) nonspeech_runs = self . _compute_runs ( ( numpy . where ( numpy . sum ( windows , axis = 1 ) == 0 ) ) [ 0 ] ) self . log ( u"Determining initial labels... done" ) self . log ( u"Determining final labels..." ) mask = numpy . ones ( energy_length , dtype = "bool" ) for ns in nonspeech_runs : start = ns [ 0 ] if ( extend_after > 0 ) and ( start > 0 ) : start += extend_after stop = ns [ - 1 ] + min_nonspeech_length if ( extend_before > 0 ) and ( stop < energy_length - 1 ) : stop -= extend_before mask [ start : stop ] = 0 self . log ( u"Determining final labels... done" ) return mask
Compute the time intervals containing speech and nonspeech and return a boolean mask with speech frames set to True and nonspeech frames set to False .
38,289
def _compute_runs ( self , array ) : if len ( array ) < 1 : return [ ] return numpy . split ( array , numpy . where ( numpy . diff ( array ) != 1 ) [ 0 ] + 1 )
Compute runs as a list of arrays each containing the indices of a contiguous run .
38,290
def _rolling_window ( self , array , size ) : shape = array . shape [ : - 1 ] + ( array . shape [ - 1 ] - size + 1 , size ) strides = array . strides + ( array . strides [ - 1 ] , ) return numpy . lib . stride_tricks . as_strided ( array , shape = shape , strides = strides )
Compute rolling windows of width size of the given array .
38,291
def usage ( self ) : retval , stderr , stdout = _pysam_dispatch ( self . collection , self . dispatch , is_usage = True , catch_stdout = True ) if stderr : return stderr else : return stdout
return the samtools usage information for this command
38,292
def iterate ( infile ) : conv_subst = ( str , lambda x : int ( x ) - 1 , str , str , int , int , int , int , str , str ) conv_indel = ( str , lambda x : int ( x ) - 1 , str , str , int , int , int , int , str , str , int , int , int ) for line in infile : d = line [ : - 1 ] . split ( ) if d [ 2 ] == "*" : try : yield PileupIndel ( * [ x ( y ) for x , y in zip ( conv_indel , d ) ] ) except TypeError : raise pysam . SamtoolsError ( "parsing error in line: `%s`" % line ) else : try : yield PileupSubstitution ( * [ x ( y ) for x , y in zip ( conv_subst , d ) ] ) except TypeError : raise pysam . SamtoolsError ( "parsing error in line: `%s`" % line )
iterate over samtools pileup - c formatted file .
38,293
def vcf2pileup ( vcf , sample ) : chromosome = vcf . contig pos = vcf . pos reference = vcf . ref allelles = [ reference ] + vcf . alt data = vcf [ sample ] genotypes = data [ "GT" ] if len ( genotypes ) > 1 : raise ValueError ( "only single genotype per position, %s" % ( str ( vcf ) ) ) genotypes = genotypes [ 0 ] if genotypes [ 0 ] == "." : return None genotypes = [ allelles [ int ( x ) ] for x in genotypes if x != "/" ] snp_quality = consensus_quality = data . get ( "GQ" , [ 0 ] ) [ 0 ] mapping_quality = vcf . info . get ( "MQ" , [ 0 ] ) [ 0 ] coverage = data . get ( "DP" , 0 ) if len ( reference ) > 1 or max ( [ len ( x ) for x in vcf . alt ] ) > 1 : genotype , offset = translateIndelGenotypeFromVCF ( genotypes , reference ) return PileupIndel ( chromosome , pos + offset , "*" , genotype , consensus_quality , snp_quality , mapping_quality , coverage , genotype , "<" * len ( genotype ) , 0 , 0 , 0 ) else : genotype = encodeGenotype ( "" . join ( genotypes ) ) read_bases = "" base_qualities = "" return PileupSubstitution ( chromosome , pos , reference , genotype , consensus_quality , snp_quality , mapping_quality , coverage , read_bases , base_qualities )
convert vcf record to pileup record .
38,294
def iterate_from_vcf ( infile , sample ) : vcf = pysam . VCF ( ) vcf . connect ( infile ) if sample not in vcf . getsamples ( ) : raise KeyError ( "sample %s not vcf file" ) for row in vcf . fetch ( ) : result = vcf2pileup ( row , sample ) if result : yield result
iterate over a vcf - formatted file .
38,295
def _update_pysam_files ( cf , destdir ) : basename = os . path . basename ( destdir ) for filename in cf : if not filename : continue dest = filename + ".pysam.c" with open ( filename , encoding = "utf-8" ) as infile : lines = "" . join ( infile . readlines ( ) ) with open ( dest , "w" , encoding = "utf-8" ) as outfile : outfile . write ( '#include "{}.pysam.h"\n\n' . format ( basename ) ) subname , _ = os . path . splitext ( os . path . basename ( filename ) ) if subname in MAIN . get ( basename , [ ] ) : lines = re . sub ( r"int main\(" , "int {}_main(" . format ( basename ) , lines ) else : lines = re . sub ( r"int main\(" , "int {}_{}_main(" . format ( basename , subname ) , lines ) lines = re . sub ( "stderr" , "{}_stderr" . format ( basename ) , lines ) lines = re . sub ( "stdout" , "{}_stdout" . format ( basename ) , lines ) lines = re . sub ( r" printf\(" , " fprintf({}_stdout, " . format ( basename ) , lines ) lines = re . sub ( r"([^kf])puts\(" , r"\1{}_puts(" . format ( basename ) , lines ) lines = re . sub ( r"putchar\(([^)]+)\)" , r"fputc(\1, {}_stdout)" . format ( basename ) , lines ) fn = os . path . basename ( filename ) SPECIFIC_SUBSTITUTIONS = { "bam_md.c" : ( 'sam_open_format("-", mode_w' , 'sam_open_format({}_stdout_fn, mode_w' . format ( basename ) ) , "phase.c" : ( 'putc("ACGT"[f->seq[j] == 1? (c&3, {}_stdout) : (c>>16&3)]);' . format ( basename ) , 'putc("ACGT"[f->seq[j] == 1? (c&3) : (c>>16&3)], {}_stdout);' . format ( basename ) ) , "cut_target.c" : ( 'putc(33 + (cns[j]>>8>>2, {}_stdout));' . format ( basename ) , 'putc(33 + (cns[j]>>8>>2), {}_stdout);' . format ( basename ) ) } if fn in SPECIFIC_SUBSTITUTIONS : lines = lines . replace ( SPECIFIC_SUBSTITUTIONS [ fn ] [ 0 ] , SPECIFIC_SUBSTITUTIONS [ fn ] [ 1 ] ) outfile . write ( lines ) with open ( os . path . join ( "import" , "pysam.h" ) ) as inf , open ( os . path . join ( destdir , "{}.pysam.h" . format ( basename ) ) , "w" ) as outf : outf . write ( re . sub ( "@pysam@" , basename , inf . read ( ) ) ) with open ( os . path . join ( "import" , "pysam.c" ) ) as inf , open ( os . path . join ( destdir , "{}.pysam.c" . format ( basename ) ) , "w" ) as outf : outf . write ( re . sub ( "@pysam@" , basename , inf . read ( ) ) )
update pysam files applying redirection of ouput
38,296
def get_include ( ) : dirname = os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) ) ) htslib_possibilities = [ os . path . join ( dirname , '..' , 'htslib' ) , os . path . join ( dirname , 'include' , 'htslib' ) ] samtool_possibilities = [ os . path . join ( dirname , '..' , 'samtools' ) , os . path . join ( dirname , 'include' , 'samtools' ) ] includes = [ dirname ] for header_locations in [ htslib_possibilities , samtool_possibilities ] : for header_location in header_locations : if os . path . exists ( header_location ) : includes . append ( os . path . abspath ( header_location ) ) break return includes
return a list of include directories .
38,297
def get_libraries ( ) : dirname = os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) ) ) pysam_libs = [ 'libctabixproxies' , 'libcfaidx' , 'libcsamfile' , 'libcvcf' , 'libcbcf' , 'libctabix' ] if pysam . config . HTSLIB == "builtin" : pysam_libs . append ( 'libchtslib' ) so = sysconfig . get_config_var ( 'SO' ) return [ os . path . join ( dirname , x + so ) for x in pysam_libs ]
return a list of libraries to link against .
38,298
def has_edge ( self , edge ) : u , v = edge return ( u , v ) in self . edge_properties
Return whether an edge exists .
38,299
def dump_package_data ( data , buf , format_ = FileFormat . py , skip_attributes = None ) : if format_ == FileFormat . txt : raise ValueError ( "'txt' format not supported for packages." ) data_ = dict ( ( k , v ) for k , v in data . iteritems ( ) if v is not None ) data_ = package_serialise_schema . validate ( data_ ) skip = set ( skip_attributes or [ ] ) items = [ ] for key in package_key_order : if key not in skip : value = data_ . pop ( key , None ) if value is not None : items . append ( ( key , value ) ) for key , value in data_ . iteritems ( ) : if key not in skip : items . append ( ( key , value ) ) dump_func = dump_functions [ format_ ] dump_func ( items , buf )
Write package data to buf .