idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
226,100
def check_config_xml ( self , contents ) : self . log ( u"Checking contents XML config file" ) self . result = ValidatorResult ( ) if self . _are_safety_checks_disabled ( u"check_config_xml" ) : return self . result contents = gf . safe_bytes ( contents ) self . log ( u"Checking that contents is well formed" ) self . check_raw_string ( contents , is_bstring = True ) if not self . result . passed : return self . result self . log ( u"Checking required parameters for job" ) job_parameters = gf . config_xml_to_dict ( contents , self . result , parse_job = True ) self . _check_required_parameters ( self . XML_JOB_REQUIRED_PARAMETERS , job_parameters ) if not self . result . passed : return self . result self . log ( u"Checking required parameters for task" ) tasks_parameters = gf . config_xml_to_dict ( contents , self . result , parse_job = False ) for parameters in tasks_parameters : self . log ( [ u"Checking required parameters for task: '%s'" , parameters ] ) self . _check_required_parameters ( self . XML_TASK_REQUIRED_PARAMETERS , parameters ) if not self . result . passed : return self . result return self . result
Check whether the given XML config file contents is well - formed and it has all the required parameters .
318
20
226,101
def check_container ( self , container_path , container_format = None , config_string = None ) : self . log ( [ u"Checking container '%s'" , container_path ] ) self . result = ValidatorResult ( ) if self . _are_safety_checks_disabled ( u"check_container" ) : return self . result if not ( gf . file_exists ( container_path ) or gf . directory_exists ( container_path ) ) : self . _failed ( u"Container '%s' not found." % container_path ) return self . result container = Container ( container_path , container_format ) try : self . log ( u"Checking container has config file" ) if config_string is not None : self . log ( u"Container with config string from wizard" ) self . check_config_txt ( config_string , is_config_string = True ) elif container . has_config_xml : self . log ( u"Container has XML config file" ) contents = container . read_entry ( container . entry_config_xml ) if contents is None : self . _failed ( u"Unable to read the contents of XML config file." ) return self . result self . check_config_xml ( contents ) elif container . has_config_txt : self . log ( u"Container has TXT config file" ) contents = container . read_entry ( container . entry_config_txt ) if contents is None : self . _failed ( u"Unable to read the contents of TXT config file." ) return self . result self . check_config_txt ( contents , is_config_string = False ) else : self . _failed ( u"Container does not have a TXT or XML configuration file." ) self . log ( u"Checking we have a valid job in the container" ) if not self . result . passed : return self . result self . log ( u"Analyze the contents of the container" ) analyzer = AnalyzeContainer ( container ) if config_string is not None : job = analyzer . analyze ( config_string = config_string ) else : job = analyzer . analyze ( ) self . _check_analyzed_job ( job , container ) except OSError : self . _failed ( u"Unable to read the contents of the container." ) return self . result
Check whether the given container is well - formed .
516
10
226,102
def _are_safety_checks_disabled ( self , caller = u"unknown_function" ) : if self . rconf . safety_checks : return False self . log_warn ( [ u"Safety checks disabled => %s passed" , caller ] ) return True
Return True if safety checks are disabled .
57
8
226,103
def _failed ( self , msg ) : self . log ( msg ) self . result . passed = False self . result . add_error ( msg ) self . log ( u"Failed" )
Log a validation failure .
42
5
226,104
def _check_utf8_encoding ( self , bstring ) : if not gf . is_bytes ( bstring ) : self . _failed ( u"The given string is not a sequence of bytes" ) return if not gf . is_utf8_encoded ( bstring ) : self . _failed ( u"The given string is not encoded in UTF-8." )
Check whether the given sequence of bytes is properly encoded in UTF - 8 .
84
15
226,105
def _check_reserved_characters ( self , ustring ) : forbidden = [ c for c in gc . CONFIG_RESERVED_CHARACTERS if c in ustring ] if len ( forbidden ) > 0 : self . _failed ( u"The given string contains the reserved characters '%s'." % u" " . join ( forbidden ) )
Check whether the given Unicode string contains reserved characters .
78
10
226,106
def _check_allowed_values ( self , parameters ) : for key , allowed_values in self . ALLOWED_VALUES : self . log ( [ u"Checking allowed values for parameter '%s'" , key ] ) if key in parameters : value = parameters [ key ] if value not in allowed_values : self . _failed ( u"Parameter '%s' has value '%s' which is not allowed." % ( key , value ) ) return self . log ( u"Passed" )
Check whether the given parameter value is allowed . Log messages into self . result .
110
16
226,107
def _check_implied_parameters ( self , parameters ) : for key , values , implied_keys in self . IMPLIED_PARAMETERS : self . log ( [ u"Checking implied parameters by '%s'='%s'" , key , values ] ) if ( key in parameters ) and ( parameters [ key ] in values ) : found = False for implied_key in implied_keys : if implied_key in parameters : found = True if not found : if len ( implied_keys ) == 1 : msg = u"Parameter '%s' is required when '%s'='%s'." % ( implied_keys [ 0 ] , key , parameters [ key ] ) else : msg = u"At least one of [%s] is required when '%s'='%s'." % ( "," . join ( implied_keys ) , key , parameters [ key ] ) self . _failed ( msg ) return self . log ( u"Passed" )
Check whether at least one of the keys in implied_keys is in parameters when a given key = value is present in parameters for some value in values . Log messages into self . result .
210
38
226,108
def _check_required_parameters ( self , required_parameters , parameters ) : self . log ( [ u"Checking required parameters '%s'" , required_parameters ] ) self . log ( u"Checking input parameters are not empty" ) if ( parameters is None ) or ( len ( parameters ) == 0 ) : self . _failed ( u"No parameters supplied." ) return self . log ( u"Checking no required parameter is missing" ) for req_param in required_parameters : if req_param not in parameters : self . _failed ( u"Required parameter '%s' not set." % req_param ) return self . log ( u"Checking all parameter values are allowed" ) self . _check_allowed_values ( parameters ) self . log ( u"Checking all implied parameters are present" ) self . _check_implied_parameters ( parameters ) return self . result
Check whether the given parameter dictionary contains all the required paramenters . Log messages into self . result .
198
21
226,109
def _check_analyzed_job ( self , job , container ) : self . log ( u"Checking the Job object generated from container" ) self . log ( u"Checking that the Job is not None" ) if job is None : self . _failed ( u"Unable to create a Job from the container." ) return self . log ( u"Checking that the Job has at least one Task" ) if len ( job ) == 0 : self . _failed ( u"Unable to create at least one Task from the container." ) return if self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] > 0 : self . log ( u"Checking that the Job does not have too many Tasks" ) if len ( job ) > self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] : self . _failed ( u"The Job has %d Tasks, more than the maximum allowed (%d)." % ( len ( job ) , self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] ) ) return self . log ( u"Checking that each Task text file is well formed" ) for task in job . tasks : self . log ( [ u"Checking Task text file '%s'" , task . text_file_path ] ) text_file_bstring = container . read_entry ( task . text_file_path ) if ( text_file_bstring is None ) or ( len ( text_file_bstring ) == 0 ) : self . _failed ( u"Text file '%s' is empty" % task . text_file_path ) return self . _check_utf8_encoding ( text_file_bstring ) if not self . result . passed : self . _failed ( u"Text file '%s' is not encoded in UTF-8" % task . text_file_path ) return self . _check_not_empty ( text_file_bstring ) if not self . result . passed : self . _failed ( u"Text file '%s' is empty" % task . text_file_path ) return self . log ( [ u"Checking Task text file '%s': passed" , task . text_file_path ] ) self . log ( u"Checking each Task text file is well formed: passed" )
Check that the job object generated from the given container is well formed that it has at least one task and that the text file of each task has the correct encoding . Log messages into self . result .
512
40
226,110
def pretty_print ( self , warnings = False ) : msg = [ ] if ( warnings ) and ( len ( self . warnings ) > 0 ) : msg . append ( u"Warnings:" ) for warning in self . warnings : msg . append ( u" %s" % warning ) if len ( self . errors ) > 0 : msg . append ( u"Errors:" ) for error in self . errors : msg . append ( u" %s" % error ) return u"\n" . join ( msg )
Pretty print warnings and errors .
112
6
226,111
def convert ( self , input_file_path , output_file_path , head_length = None , process_length = None ) : # test if we can read the input file if not gf . file_can_be_read ( input_file_path ) : self . log_exc ( u"Input file '%s' cannot be read" % ( input_file_path ) , None , True , OSError ) # test if we can write the output file if not gf . file_can_be_written ( output_file_path ) : self . log_exc ( u"Output file '%s' cannot be written" % ( output_file_path ) , None , True , OSError ) # call ffmpeg arguments = [ self . rconf [ RuntimeConfiguration . FFMPEG_PATH ] ] arguments . extend ( [ "-i" , input_file_path ] ) if head_length is not None : arguments . extend ( [ "-ss" , head_length ] ) if process_length is not None : arguments . extend ( [ "-t" , process_length ] ) if self . rconf . sample_rate in self . FFMPEG_PARAMETERS_MAP : arguments . extend ( self . FFMPEG_PARAMETERS_MAP [ self . rconf . sample_rate ] ) else : arguments . extend ( self . FFMPEG_PARAMETERS_DEFAULT ) arguments . append ( output_file_path ) self . log ( [ u"Calling with arguments '%s'" , arguments ] ) try : proc = subprocess . Popen ( arguments , stdout = subprocess . PIPE , stdin = subprocess . PIPE , stderr = subprocess . PIPE ) proc . communicate ( ) proc . stdout . close ( ) proc . stdin . close ( ) proc . stderr . close ( ) except OSError as exc : self . log_exc ( u"Unable to call the '%s' ffmpeg executable" % ( self . rconf [ RuntimeConfiguration . FFMPEG_PATH ] ) , exc , True , FFMPEGPathError ) self . log ( u"Call completed" ) # check if the output file exists if not gf . file_exists ( output_file_path ) : self . log_exc ( u"Output file '%s' was not written" % ( output_file_path ) , None , True , OSError ) # returning the output file path self . log ( [ u"Returning output file path '%s'" , output_file_path ] ) return output_file_path
Convert the audio file at input_file_path into output_file_path using the parameters set in the constructor or through the parameters property .
576
30
226,112
def rate_lack ( self , max_rate ) : if self . fragment_type == self . REGULAR : return self . chars / max_rate - self . length return TimeValue ( "0.000" )
The time interval that this fragment lacks to respect the given max rate .
47
14
226,113
def rate_slack ( self , max_rate ) : if self . fragment_type == self . REGULAR : return - self . rate_lack ( max_rate ) elif self . fragment_type == self . NONSPEECH : return self . length else : return TimeValue ( "0.000" )
The maximum time interval that can be stolen to this fragment while keeping it respecting the given max rate .
69
20
226,114
def write_to_file ( self , output_file_path , intervals , template ) : msg = [ template % ( interval ) for interval in intervals ] if output_file_path is None : self . print_info ( u"Intervals detected:" ) for line in msg : self . print_generic ( line ) else : with io . open ( output_file_path , "w" , encoding = "utf-8" ) as output_file : output_file . write ( u"\n" . join ( msg ) ) self . print_success ( u"Created file '%s'" % output_file_path )
Write intervals to file .
136
5
226,115
def print_parameters ( self ) : self . print_info ( u"Available parameters:" ) self . print_generic ( u"\n" + u"\n" . join ( self . PARAMETERS ) + u"\n" ) return self . HELP_EXIT_CODE
Print the list of parameters and exit .
65
8
226,116
def main ( ) : # make sure we have enough parameters if len ( sys . argv ) < 6 : print ( "You must pass five arguments: QUIT_AFTER BACKWARDS TEXT_FILE_PATH AUDIO_FILE_PATH DATA_FILE_PATH" ) return 1 # read parameters c_quit_after = float ( sys . argv [ 1 ] ) # NOTE: cew needs float, not TimeValue c_backwards = int ( sys . argv [ 2 ] ) text_file_path = sys . argv [ 3 ] audio_file_path = sys . argv [ 4 ] data_file_path = sys . argv [ 5 ] # read (voice_code, text) from file s_text = [ ] with io . open ( text_file_path , "r" , encoding = "utf-8" ) as text : for line in text . readlines ( ) : # NOTE: not using strip() to avoid removing trailing blank characters line = line . replace ( u"\n" , u"" ) . replace ( u"\r" , u"" ) idx = line . find ( " " ) if idx > 0 : f_voice_code = line [ : idx ] f_text = line [ ( idx + 1 ) : ] s_text . append ( ( f_voice_code , f_text ) ) # convert to bytes/unicode as required by subprocess c_text = [ ] if gf . PY2 : for f_voice_code , f_text in s_text : c_text . append ( ( gf . safe_bytes ( f_voice_code ) , gf . safe_bytes ( f_text ) ) ) else : for f_voice_code , f_text in s_text : c_text . append ( ( gf . safe_unicode ( f_voice_code ) , gf . safe_unicode ( f_text ) ) ) try : import aeneas . cew . cew sr , sf , intervals = aeneas . cew . cew . synthesize_multiple ( audio_file_path , c_quit_after , c_backwards , c_text ) with io . open ( data_file_path , "w" , encoding = "utf-8" ) as data : data . write ( u"%d\n" % ( sr ) ) data . write ( u"%d\n" % ( sf ) ) data . write ( u"\n" . join ( [ u"%.3f %.3f" % ( i [ 0 ] , i [ 1 ] ) for i in intervals ] ) ) except Exception as exc : print ( u"Unexpected error: %s" % str ( exc ) )
Run aeneas . cew reading input text from file and writing audio and interval data to file .
603
21
226,117
def parse ( self , input_text , syncmap ) : from lxml import etree smil_ns = "{http://www.w3.org/ns/SMIL}" root = etree . fromstring ( gf . safe_bytes ( input_text ) ) for par in root . iter ( smil_ns + "par" ) : for child in par : if child . tag == ( smil_ns + "text" ) : identifier = gf . safe_unicode ( gf . split_url ( child . get ( "src" ) ) [ 1 ] ) elif child . tag == ( smil_ns + "audio" ) : begin_text = child . get ( "clipBegin" ) if ":" in begin_text : begin = gf . time_from_hhmmssmmm ( begin_text ) else : begin = gf . time_from_ssmmm ( begin_text ) end_text = child . get ( "clipEnd" ) if ":" in end_text : end = gf . time_from_hhmmssmmm ( end_text ) else : end = gf . time_from_ssmmm ( end_text ) # TODO read text from additional text_file? self . _add_fragment ( syncmap = syncmap , identifier = identifier , lines = [ u"" ] , begin = begin , end = end )
Read from SMIL file .
303
6
226,118
def _is_valid_index ( self , index ) : if isinstance ( index , int ) : return ( index >= 0 ) and ( index < len ( self ) ) if isinstance ( index , list ) : valid = True for i in index : valid = valid or self . _is_valid_index ( i ) return valid return False
Return True if and only if the given index is valid .
73
12
226,119
def _check_boundaries ( self , fragment ) : if not isinstance ( fragment , SyncMapFragment ) : raise TypeError ( u"fragment is not an instance of SyncMapFragment" ) interval = fragment . interval if not isinstance ( interval , TimeInterval ) : raise TypeError ( u"interval is not an instance of TimeInterval" ) if ( self . begin is not None ) and ( interval . begin < self . begin ) : raise ValueError ( u"interval.begin is before self.begin" ) if ( self . end is not None ) and ( interval . end > self . end ) : raise ValueError ( u"interval.end is after self.end" )
Check that the interval of the given fragment is within the boundaries of the list . Raises an error if not OK .
154
24
226,120
def remove ( self , indices ) : if not self . _is_valid_index ( indices ) : self . log_exc ( u"The given list of indices is not valid" , None , True , ValueError ) new_fragments = [ ] sorted_indices = sorted ( indices ) i = 0 j = 0 while ( i < len ( self ) ) and ( j < len ( sorted_indices ) ) : if i != sorted_indices [ j ] : new_fragments . append ( self [ i ] ) else : j += 1 i += 1 while i < len ( self ) : new_fragments . append ( self [ i ] ) i += 1 self . __fragments = new_fragments
Remove the fragments corresponding to the given list of indices .
160
11
226,121
def sort ( self ) : if self . is_guaranteed_sorted : self . log ( u"Already sorted, returning" ) return self . log ( u"Sorting..." ) self . __fragments = sorted ( self . __fragments ) self . log ( u"Sorting... done" ) self . log ( u"Checking relative positions..." ) for i in range ( len ( self ) - 1 ) : current_interval = self [ i ] . interval next_interval = self [ i + 1 ] . interval if current_interval . relative_position_of ( next_interval ) not in self . ALLOWED_POSITIONS : self . log ( u"Found overlapping fragments:" ) self . log ( [ u" Index %d => %s" , i , current_interval ] ) self . log ( [ u" Index %d => %s" , i + 1 , next_interval ] ) self . log_exc ( u"The list contains two fragments overlapping in a forbidden way" , None , True , ValueError ) self . log ( u"Checking relative positions... done" ) self . __sorted = True
Sort the fragments in the list .
254
7
226,122
def remove_nonspeech_fragments ( self , zero_length_only = False ) : self . log ( u"Removing nonspeech fragments..." ) nonspeech = list ( self . nonspeech_fragments ) if zero_length_only : nonspeech = [ ( i , f ) for i , f in nonspeech if f . has_zero_length ] nonspeech_indices = [ i for i , f in nonspeech ] self . remove ( nonspeech_indices ) if zero_length_only : for i , f in list ( self . nonspeech_fragments ) : f . fragment_type = SyncMapFragment . REGULAR self . log ( u"Removing nonspeech fragments... done" )
Remove NONSPEECH fragments from the list .
170
10
226,123
def has_zero_length_fragments ( self , min_index = None , max_index = None ) : min_index , max_index = self . _check_min_max_indices ( min_index , max_index ) zero = [ i for i in range ( min_index , max_index ) if self [ i ] . has_zero_length ] self . log ( [ u"Fragments with zero length: %s" , zero ] ) return ( len ( zero ) > 0 )
Return True if the list has at least one interval with zero length withing min_index and max_index . If the latter are not specified check all intervals .
112
33
226,124
def has_adjacent_fragments_only ( self , min_index = None , max_index = None ) : min_index , max_index = self . _check_min_max_indices ( min_index , max_index ) for i in range ( min_index , max_index - 1 ) : current_interval = self [ i ] . interval next_interval = self [ i + 1 ] . interval if not current_interval . is_adjacent_before ( next_interval ) : self . log ( u"Found non adjacent fragments" ) self . log ( [ u" Index %d => %s" , i , current_interval ] ) self . log ( [ u" Index %d => %s" , i + 1 , next_interval ] ) return False return True
Return True if the list contains only adjacent fragments that is if it does not have gaps .
179
18
226,125
def offset ( self , offset ) : self . log ( u"Applying offset to all fragments..." ) self . log ( [ u" Offset %.3f" , offset ] ) for fragment in self . fragments : fragment . interval . offset ( offset = offset , allow_negative = False , min_begin_value = self . begin , max_end_value = self . end ) self . log ( u"Applying offset to all fragments... done" )
Move all the intervals in the list by the given offset .
99
12
226,126
def move_transition_point ( self , fragment_index , value ) : self . log ( u"Called move_transition_point with" ) self . log ( [ u" fragment_index %d" , fragment_index ] ) self . log ( [ u" value %.3f" , value ] ) if ( fragment_index < 0 ) or ( fragment_index > ( len ( self ) - 3 ) ) : self . log ( u"Bad fragment_index, returning" ) return current_interval = self [ fragment_index ] . interval next_interval = self [ fragment_index + 1 ] . interval if value > next_interval . end : self . log ( u"Bad value, returning" ) return if not current_interval . is_non_zero_before_non_zero ( next_interval ) : self . log ( u"Bad interval configuration, returning" ) return current_interval . end = value next_interval . begin = value self . log ( u"Moved transition point" )
Change the transition point between fragment fragment_index and the next fragment to the time value value .
227
19
226,127
def inject_long_nonspeech_fragments ( self , pairs , replacement_string ) : self . log ( u"Called inject_long_nonspeech_fragments" ) # set the appropriate fragment text if replacement_string in [ None , gc . PPV_TASK_ADJUST_BOUNDARY_NONSPEECH_REMOVE ] : self . log ( u" Remove long nonspeech" ) lines = [ ] else : self . log ( [ u" Replace long nonspeech with '%s'" , replacement_string ] ) lines = [ replacement_string ] # first, make room for the nonspeech intervals self . log ( u" First pass: making room..." ) for nsi , index in pairs : self [ index ] . interval . end = nsi . begin self [ index + 1 ] . interval . begin = nsi . end self . log ( u" First pass: making room... done" ) self . log ( u" Second pass: append nonspeech intervals..." ) for i , ( nsi , index ) in enumerate ( pairs , 1 ) : identifier = u"n%06d" % i self . add ( SyncMapFragment ( text_fragment = TextFragment ( identifier = identifier , language = None , lines = lines , filtered_lines = lines ) , interval = nsi , fragment_type = SyncMapFragment . NONSPEECH ) , sort = False ) self . log ( u" Second pass: append nonspeech intervals... done" ) self . log ( u" Third pass: sorting..." ) self . sort ( ) self . log ( u" Third pass: sorting... done" )
Inject nonspeech fragments corresponding to the given intervals in this fragment list .
367
16
226,128
def compute_accumulated_cost_matrix ( self ) : self . _setup_dtw ( ) if self . dtw is None : self . log ( u"Inner self.dtw is None => returning None" ) return None self . log ( u"Returning accumulated cost matrix" ) return self . dtw . compute_accumulated_cost_matrix ( )
Compute the accumulated cost matrix and return it .
84
10
226,129
def compute_path ( self ) : self . _setup_dtw ( ) if self . dtw is None : self . log ( u"Inner self.dtw is None => returning None" ) return None self . log ( u"Computing path..." ) wave_path = self . dtw . compute_path ( ) self . log ( u"Computing path... done" ) self . log ( u"Translating path to full wave indices..." ) real_indices = numpy . array ( [ t [ 0 ] for t in wave_path ] ) synt_indices = numpy . array ( [ t [ 1 ] for t in wave_path ] ) if self . rconf . mmn : self . log ( u"Translating real indices with masked_middle_map..." ) real_indices = self . real_wave_mfcc . masked_middle_map [ real_indices ] real_indices [ 0 ] = self . real_wave_mfcc . head_length self . log ( u"Translating real indices with masked_middle_map... done" ) self . log ( u"Translating synt indices with masked_middle_map..." ) synt_indices = self . synt_wave_mfcc . masked_middle_map [ synt_indices ] self . log ( u"Translating synt indices with masked_middle_map... done" ) else : self . log ( u"Translating real indices by adding head_length..." ) real_indices += self . real_wave_mfcc . head_length self . log ( u"Translating real indices by adding head_length... done" ) self . log ( u"Nothing to do with synt indices" ) self . log ( u"Translating path to full wave indices... done" ) return ( real_indices , synt_indices )
Compute the min cost path between the two waves and return it .
410
14
226,130
def compute_boundaries ( self , synt_anchors ) : self . _setup_dtw ( ) if self . dtw is None : self . log ( u"Inner self.dtw is None => returning artificial boundary indices" ) begin = self . real_wave_mfcc . middle_begin end = self . real_wave_mfcc . tail_begin n = len ( synt_anchors ) step = float ( end - begin ) / n boundary_indices = [ begin + int ( i * step ) for i in range ( n ) ] + [ end ] return numpy . array ( boundary_indices ) self . log ( u"Computing path..." ) real_indices , synt_indices = self . compute_path ( ) self . log ( u"Computing path... done" ) self . log ( u"Computing boundary indices..." ) # both real_indices and synt_indices are w.r.t. the full wave self . log ( [ u"Fragments: %d" , len ( synt_anchors ) ] ) self . log ( [ u"Path length: %d" , len ( real_indices ) ] ) # synt_anchors as in seconds, convert them in MFCC indices # see also issue #102 mws = self . rconf . mws sample_rate = self . rconf . sample_rate samples_per_mws = mws * sample_rate if samples_per_mws . is_integer : anchor_indices = numpy . array ( [ int ( a [ 0 ] / mws ) for a in synt_anchors ] ) else : # # NOTE this is not elegant, but it saves the day for the user # self . log_warn ( u"The number of samples in each window shift is not an integer, time drift might occur." ) anchor_indices = numpy . array ( [ ( int ( a [ 0 ] * sample_rate / mws ) / sample_rate ) for a in synt_anchors ] ) # # right side sets the split point at the very beginning of "next" fragment # # NOTE clip() is needed since searchsorted() with side="right" might return # an index == len(synt_indices) == len(real_indices) # when the insertion point is past the last element of synt_indices # causing the fancy indexing real_indices[...] below might fail begin_indices = numpy . clip ( numpy . searchsorted ( synt_indices , anchor_indices , side = "right" ) , 0 , len ( synt_indices ) - 1 ) # first split must occur at zero begin_indices [ 0 ] = 0 # # map onto real indices, obtaining "default" boundary indices # # NOTE since len(synt_indices) == len(real_indices) # and because the numpy.clip() above, the fancy indexing is always valid # boundary_indices = numpy . append ( real_indices [ begin_indices ] , self . real_wave_mfcc . tail_begin ) self . log ( [ u"Boundary indices: %d" , len ( boundary_indices ) ] ) self . log ( u"Computing boundary indices... done" ) return boundary_indices
Compute the min cost path between the two waves and return a list of boundary points representing the argmin values with respect to the provided synt_anchors timings .
720
34
226,131
def _setup_dtw ( self ) : # check if the DTW object has already been set up if self . dtw is not None : return # check we have the AudioFileMFCC objects if ( self . real_wave_mfcc is None ) or ( self . real_wave_mfcc . middle_mfcc is None ) : self . log_exc ( u"The real wave MFCCs are not initialized" , None , True , DTWAlignerNotInitialized ) if ( self . synt_wave_mfcc is None ) or ( self . synt_wave_mfcc . middle_mfcc is None ) : self . log_exc ( u"The synt wave MFCCs are not initialized" , None , True , DTWAlignerNotInitialized ) # setup algorithm = self . rconf [ RuntimeConfiguration . DTW_ALGORITHM ] delta = int ( 2 * self . rconf . dtw_margin / self . rconf [ RuntimeConfiguration . MFCC_WINDOW_SHIFT ] ) mfcc2_length = self . synt_wave_mfcc . middle_length self . log ( [ u"Requested algorithm: '%s'" , algorithm ] ) self . log ( [ u"delta = %d" , delta ] ) self . log ( [ u"m = %d" , mfcc2_length ] ) # check if delta is >= length of synt wave if mfcc2_length <= delta : self . log ( u"We have mfcc2_length <= delta" ) if ( self . rconf [ RuntimeConfiguration . C_EXTENSIONS ] ) and ( gf . can_run_c_extension ( ) ) : # the C code can be run: since it is still faster, do not run EXACT self . log ( u"C extensions enabled and loaded: not selecting EXACT algorithm" ) else : self . log ( u"Selecting EXACT algorithm" ) algorithm = DTWAlgorithm . EXACT # select mask here if self . rconf . mmn : self . log ( u"Using masked MFCC" ) real_mfcc = self . real_wave_mfcc . masked_middle_mfcc synt_mfcc = self . synt_wave_mfcc . masked_middle_mfcc else : self . log ( u"Using unmasked MFCC" ) real_mfcc = self . real_wave_mfcc . middle_mfcc synt_mfcc = self . synt_wave_mfcc . middle_mfcc n = real_mfcc . shape [ 1 ] m = synt_mfcc . shape [ 1 ] self . log ( [ u" Number of MFCC frames in real wave: %d" , n ] ) self . log ( [ u" Number of MFCC frames in synt wave: %d" , m ] ) if ( n == 0 ) or ( m == 0 ) : self . log ( u"Setting self.dtw to None" ) self . dtw = None else : # set the selected algorithm if algorithm == DTWAlgorithm . EXACT : self . log ( u"Computing with EXACT algo" ) self . dtw = DTWExact ( m1 = real_mfcc , m2 = synt_mfcc , rconf = self . rconf , logger = self . logger ) else : self . log ( u"Computing with STRIPE algo" ) self . dtw = DTWStripe ( m1 = real_mfcc , m2 = synt_mfcc , delta = delta , rconf = self . rconf , logger = self . logger )
Set the DTW object up .
813
7
226,132
def check_import ( ) : try : import aeneas print_success ( u"aeneas OK" ) return False except ImportError : print_error ( u"aeneas ERROR" ) print_info ( u" Unable to load the aeneas Python package" ) print_info ( u" This error is probably caused by:" ) print_info ( u" A. you did not download/git-clone the aeneas package properly; or" ) print_info ( u" B. you did not install the required Python packages:" ) print_info ( u" 1. BeautifulSoup4" ) print_info ( u" 2. lxml" ) print_info ( u" 3. numpy" ) except Exception as e : print_error ( e ) return True
Try to import the aeneas package and return True if that fails .
169
15
226,133
def main ( ) : # first, check we can import aeneas package, exiting on failure if check_import ( ) : sys . exit ( 1 ) # import and run the built-in diagnostics from aeneas . diagnostics import Diagnostics errors , warnings , c_ext_warnings = Diagnostics . check_all ( ) if errors : sys . exit ( 1 ) if c_ext_warnings : print_warning ( u"All required dependencies are met but at least one Python C extension is not available" ) print_warning ( u"You can still run aeneas but it will be slower" ) print_warning ( u"Enjoy running aeneas!" ) sys . exit ( 2 ) else : print_success ( u"All required dependencies are met and all available Python C extensions are working" ) print_success ( u"Enjoy running aeneas!" ) sys . exit ( 0 )
The entry point for this module
193
6
226,134
def is_pleasant ( self ) : levels = sorted ( [ n . level for n in self . leaves ] ) return levels [ 0 ] == levels [ - 1 ]
Return True if all the leaves in the subtree rooted at this node are at the same level .
35
20
226,135
def add_child ( self , node , as_last = True ) : if not isinstance ( node , Tree ) : self . log_exc ( u"node is not an instance of Tree" , None , True , TypeError ) if as_last : self . __children . append ( node ) else : self . __children = [ node ] + self . __children node . __parent = self new_height = 1 + self . level for n in node . subtree : n . __level += new_height
Add the given child to the current list of children .
110
11
226,136
def remove_child ( self , index ) : if index < 0 : index = index + len ( self ) self . __children = self . __children [ 0 : index ] + self . __children [ ( index + 1 ) : ]
Remove the child at the given index from the current list of children .
50
14
226,137
def remove ( self ) : if self . parent is not None : for i , child in enumerate ( self . parent . children ) : if id ( child ) == id ( self ) : self . parent . remove_child ( i ) self . parent = None break
Remove this node from the list of children of its current parent if the current parent is not None otherwise do nothing .
56
23
226,138
def remove_children ( self , reset_parent = True ) : if reset_parent : for child in self . children : child . parent = None self . __children = [ ]
Remove all the children of this node .
38
8
226,139
def leaves_not_empty ( self ) : return [ n for n in self . dfs if ( ( n . is_leaf ) and ( not n . is_empty ) ) ]
Return the list of leaves not empty in the tree rooted at this node in DFS order .
40
19
226,140
def height ( self ) : return max ( [ n . level for n in self . subtree ] ) - self . level + 1
Return the height of the tree rooted at this node that is the difference between the level of a deepest leaf and the level of this node . Return 1 for a single - node tree 2 for a two - levels tree etc .
28
45
226,141
def levels ( self ) : ret = [ [ ] for i in range ( self . height ) ] for node in self . subtree : ret [ node . level - self . level ] . append ( node ) return ret
Return a list of lists of nodes . The outer list is indexed by the level . Each inner list contains the nodes at that level in DFS order .
46
31
226,142
def level_at_index ( self , index ) : if not isinstance ( index , int ) : self . log_exc ( u"Index is not an integer" , None , True , TypeError ) levels = self . levels if ( index < 0 ) or ( index >= len ( levels ) ) : self . log_exc ( u"The given level index '%d' is not valid" % ( index ) , None , True , ValueError ) return self . levels [ index ]
Return the list of nodes at level index in DFS order .
104
13
226,143
def ancestor ( self , index ) : if not isinstance ( index , int ) : self . log_exc ( u"index is not an integer" , None , True , TypeError ) if index < 0 : self . log_exc ( u"index cannot be negative" , None , True , ValueError ) parent_node = self for i in range ( index ) : if parent_node is None : break parent_node = parent_node . parent return parent_node
Return the index - th ancestor .
100
7
226,144
def keep_levels ( self , level_indices ) : if not isinstance ( level_indices , list ) : self . log_exc ( u"level_indices is not an instance of list" , None , True , TypeError ) for l in level_indices : if not isinstance ( l , int ) : self . log_exc ( u"level_indices contains an element not int" , None , True , TypeError ) prev_levels = self . levels level_indices = set ( level_indices ) if 0 not in level_indices : level_indices . add ( 0 ) level_indices = level_indices & set ( range ( self . height ) ) level_indices = sorted ( level_indices ) [ : : - 1 ] # first, remove children for l in level_indices : for node in prev_levels [ l ] : node . remove_children ( reset_parent = False ) # then, connect to the right new parent for i in range ( len ( level_indices ) - 1 ) : l = level_indices [ i ] for node in prev_levels [ l ] : parent_node = node . ancestor ( l - level_indices [ i + 1 ] ) parent_node . add_child ( node )
Rearrange the tree rooted at this node to keep only the given levels .
280
16
226,145
def s2dctmat ( nfilt , ncep , freqstep ) : melcos = numpy . empty ( ( ncep , nfilt ) , 'double' ) for i in range ( 0 , ncep ) : freq = numpy . pi * float ( i ) / nfilt melcos [ i ] = numpy . cos ( freq * numpy . arange ( 0.5 , float ( nfilt ) + 0.5 , 1.0 , 'double' ) ) melcos [ : , 0 ] = melcos [ : , 0 ] * 0.5 return melcos
Return the legacy not - quite - DCT matrix used by Sphinx
136
14
226,146
def logspec2s2mfc ( logspec , ncep = 13 ) : nframes , nfilt = logspec . shape melcos = s2dctmat ( nfilt , ncep , 1. / nfilt ) return numpy . dot ( logspec , melcos . T ) / nfilt
Convert log - power - spectrum bins to MFCC using the legacy Sphinx transform
72
17
226,147
def dct ( input , K = 13 ) : nframes , N = input . shape freqstep = numpy . pi / N cosmat = dctmat ( N , K , freqstep ) return numpy . dot ( input , cosmat ) * numpy . sqrt ( 2.0 / N )
Convert log - power - spectrum to MFCC using the orthogonal DCT - II
68
19
226,148
def dct2 ( input , K = 13 ) : nframes , N = input . shape freqstep = numpy . pi / N cosmat = dctmat ( N , K , freqstep , False ) return numpy . dot ( input , cosmat ) * ( 2.0 / N )
Convert log - power - spectrum to MFCC using the normalized DCT - II
66
17
226,149
def read_properties ( self ) : self . log ( u"Reading properties..." ) # check the file can be read if not gf . file_can_be_read ( self . file_path ) : self . log_exc ( u"File '%s' cannot be read" % ( self . file_path ) , None , True , OSError ) # get the file size self . log ( [ u"Getting file size for '%s'" , self . file_path ] ) self . file_size = gf . file_size ( self . file_path ) self . log ( [ u"File size for '%s' is '%d'" , self . file_path , self . file_size ] ) # get the audio properties using FFPROBEWrapper try : self . log ( u"Reading properties with FFPROBEWrapper..." ) properties = FFPROBEWrapper ( rconf = self . rconf , logger = self . logger ) . read_properties ( self . file_path ) self . log ( u"Reading properties with FFPROBEWrapper... done" ) except FFPROBEPathError : self . log_exc ( u"Unable to call ffprobe executable" , None , True , AudioFileProbeError ) except ( FFPROBEUnsupportedFormatError , FFPROBEParsingError ) : self . log_exc ( u"Audio file format not supported by ffprobe" , None , True , AudioFileUnsupportedFormatError ) # save relevant properties in results inside the audiofile object self . audio_length = TimeValue ( properties [ FFPROBEWrapper . STDOUT_DURATION ] ) self . audio_format = properties [ FFPROBEWrapper . STDOUT_CODEC_NAME ] self . audio_sample_rate = gf . safe_int ( properties [ FFPROBEWrapper . STDOUT_SAMPLE_RATE ] ) self . audio_channels = gf . safe_int ( properties [ FFPROBEWrapper . STDOUT_CHANNELS ] ) self . log ( [ u"Stored audio_length: '%s'" , self . audio_length ] ) self . log ( [ u"Stored audio_format: '%s'" , self . audio_format ] ) self . log ( [ u"Stored audio_sample_rate: '%s'" , self . audio_sample_rate ] ) self . log ( [ u"Stored audio_channels: '%s'" , self . audio_channels ] ) self . log ( u"Reading properties... done" )
Populate this object by reading the audio properties of the file at the given path .
574
17
226,150
def preallocate_memory ( self , capacity ) : if capacity < 0 : raise ValueError ( u"The capacity value cannot be negative" ) if self . __samples is None : self . log ( u"Not initialized" ) self . __samples = numpy . zeros ( capacity ) self . __samples_length = 0 else : self . log ( [ u"Previous sample length was (samples): %d" , self . __samples_length ] ) self . log ( [ u"Previous sample capacity was (samples): %d" , self . __samples_capacity ] ) self . __samples = numpy . resize ( self . __samples , capacity ) self . __samples_length = min ( self . __samples_length , capacity ) self . __samples_capacity = capacity self . log ( [ u"Current sample capacity is (samples): %d" , self . __samples_capacity ] )
Preallocate memory to store audio samples to avoid repeated new allocations and copies while performing several consecutive append operations .
207
22
226,151
def minimize_memory ( self ) : if self . __samples is None : self . log ( u"Not initialized, returning" ) else : self . log ( u"Initialized, minimizing memory..." ) self . preallocate_memory ( self . __samples_length ) self . log ( u"Initialized, minimizing memory... done" )
Reduce the allocated memory to the minimum required to store the current audio samples .
73
16
226,152
def add_samples ( self , samples , reverse = False ) : self . log ( u"Adding samples..." ) samples_length = len ( samples ) current_length = self . __samples_length future_length = current_length + samples_length if ( self . __samples is None ) or ( self . __samples_capacity < future_length ) : self . preallocate_memory ( 2 * future_length ) if reverse : self . __samples [ current_length : future_length ] = samples [ : : - 1 ] else : self . __samples [ current_length : future_length ] = samples [ : ] self . __samples_length = future_length self . _update_length ( ) self . log ( u"Adding samples... done" )
Concatenate the given new samples to the current audio data .
171
14
226,153
def reverse ( self ) : if self . __samples is None : if self . file_path is None : self . log_exc ( u"AudioFile object not initialized" , None , True , AudioFileNotInitializedError ) else : self . read_samples_from_file ( ) self . log ( u"Reversing..." ) self . __samples [ 0 : self . __samples_length ] = numpy . flipud ( self . __samples [ 0 : self . __samples_length ] ) self . log ( u"Reversing... done" )
Reverse the audio data .
127
7
226,154
def trim ( self , begin = None , length = None ) : for variable , name in [ ( begin , "begin" ) , ( length , "length" ) ] : if ( variable is not None ) and ( not isinstance ( variable , TimeValue ) ) : raise TypeError ( u"%s is not None or TimeValue" % name ) self . log ( u"Trimming..." ) if ( begin is None ) and ( length is None ) : self . log ( u"begin and length are both None: nothing to do" ) else : if begin is None : begin = TimeValue ( "0.000" ) self . log ( [ u"begin was None, now set to %.3f" , begin ] ) begin = min ( max ( TimeValue ( "0.000" ) , begin ) , self . audio_length ) self . log ( [ u"begin is %.3f" , begin ] ) if length is None : length = self . audio_length - begin self . log ( [ u"length was None, now set to %.3f" , length ] ) length = min ( max ( TimeValue ( "0.000" ) , length ) , self . audio_length - begin ) self . log ( [ u"length is %.3f" , length ] ) begin_index = int ( begin * self . audio_sample_rate ) end_index = int ( ( begin + length ) * self . audio_sample_rate ) new_idx = end_index - begin_index self . __samples [ 0 : new_idx ] = self . __samples [ begin_index : end_index ] self . __samples_length = new_idx self . _update_length ( ) self . log ( u"Trimming... done" )
Get a slice of the audio data of length seconds starting from begin seconds .
390
15
226,155
def write ( self , file_path ) : if self . __samples is None : if self . file_path is None : self . log_exc ( u"AudioFile object not initialized" , None , True , AudioFileNotInitializedError ) else : self . read_samples_from_file ( ) self . log ( [ u"Writing audio file '%s'..." , file_path ] ) try : # our value is a float64 in [-1, 1] # scipy writes the sample as an int16_t, that is, a number in [-32768, 32767] data = ( self . audio_samples * 32768 ) . astype ( "int16" ) scipywavwrite ( file_path , self . audio_sample_rate , data ) except Exception as exc : self . log_exc ( u"Error writing audio file to '%s'" % ( file_path ) , exc , True , OSError ) self . log ( [ u"Writing audio file '%s'... done" , file_path ] )
Write the audio data to file . Return True on success or False otherwise .
233
15
226,156
def clear_data ( self ) : self . log ( u"Clear audio_data" ) self . __samples_capacity = 0 self . __samples_length = 0 self . __samples = None
Clear the audio data freeing memory .
45
7
226,157
def _update_length ( self ) : if ( self . audio_sample_rate is not None ) and ( self . __samples is not None ) : # NOTE computing TimeValue (... / ...) yields wrong results, # see issue #168 # self.audio_length = TimeValue(self.__samples_length / self.audio_sample_rate) self . audio_length = TimeValue ( self . __samples_length ) / TimeValue ( self . audio_sample_rate )
Update the audio length property according to the length of the current audio data and audio sample rate .
108
19
226,158
def masked_middle_mfcc ( self ) : begin , end = self . _masked_middle_begin_end ( ) return ( self . masked_mfcc ) [ : , begin : end ]
Return the MFCC speech frames in the MIDDLE portion of the wave .
46
16
226,159
def masked_middle_map ( self ) : begin , end = self . _masked_middle_begin_end ( ) return self . __mfcc_mask_map [ begin : end ]
Return the map from the MFCC speech frame indices in the MIDDLE portion of the wave to the MFCC FULL frame indices .
43
27
226,160
def _binary_search_intervals ( cls , intervals , index ) : start = 0 end = len ( intervals ) - 1 while start <= end : middle_index = start + ( ( end - start ) // 2 ) middle = intervals [ middle_index ] if ( middle [ 0 ] <= index ) and ( index < middle [ 1 ] ) : return middle elif middle [ 0 ] > index : end = middle_index - 1 else : start = middle_index + 1 return None
Binary search for the interval containing index assuming there is such an interval . This function should never return None .
104
22
226,161
def middle_begin ( self , index ) : if ( index < 0 ) or ( index > self . all_length ) : raise ValueError ( u"The given index is not valid" ) self . __middle_begin = index
Set the index where MIDDLE starts .
49
9
226,162
def _compute_mfcc_c_extension ( self ) : self . log ( u"Computing MFCCs using C extension..." ) try : self . log ( u"Importing cmfcc..." ) import aeneas . cmfcc . cmfcc self . log ( u"Importing cmfcc... done" ) self . __mfcc = ( aeneas . cmfcc . cmfcc . compute_from_data ( self . audio_file . audio_samples , self . audio_file . audio_sample_rate , self . rconf [ RuntimeConfiguration . MFCC_FILTERS ] , self . rconf [ RuntimeConfiguration . MFCC_SIZE ] , self . rconf [ RuntimeConfiguration . MFCC_FFT_ORDER ] , self . rconf [ RuntimeConfiguration . MFCC_LOWER_FREQUENCY ] , self . rconf [ RuntimeConfiguration . MFCC_UPPER_FREQUENCY ] , self . rconf [ RuntimeConfiguration . MFCC_EMPHASIS_FACTOR ] , self . rconf [ RuntimeConfiguration . MFCC_WINDOW_LENGTH ] , self . rconf [ RuntimeConfiguration . MFCC_WINDOW_SHIFT ] ) [ 0 ] ) . transpose ( ) self . log ( u"Computing MFCCs using C extension... done" ) return ( True , None ) except Exception as exc : self . log_exc ( u"An unexpected error occurred while running cmfcc" , exc , False , None ) return ( False , None )
Compute MFCCs using the Python C extension cmfcc .
338
14
226,163
def _compute_mfcc_pure_python ( self ) : self . log ( u"Computing MFCCs using pure Python code..." ) try : self . __mfcc = MFCC ( rconf = self . rconf , logger = self . logger ) . compute_from_data ( self . audio_file . audio_samples , self . audio_file . audio_sample_rate ) . transpose ( ) self . log ( u"Computing MFCCs using pure Python code... done" ) return ( True , None ) except Exception as exc : self . log_exc ( u"An unexpected error occurred while running pure Python code" , exc , False , None ) return ( False , None )
Compute MFCCs using the pure Python code .
155
11
226,164
def reverse ( self ) : self . log ( u"Reversing..." ) all_length = self . all_length self . __mfcc = self . __mfcc [ : , : : - 1 ] tmp = self . __middle_end self . __middle_end = all_length - self . __middle_begin self . __middle_begin = all_length - tmp if self . __mfcc_mask is not None : self . __mfcc_mask = self . __mfcc_mask [ : : - 1 ] # equivalent to # self.__mfcc_mask_map = ((all_length - 1) - self.__mfcc_mask_map)[::-1] # but done in place using NumPy view self . __mfcc_mask_map *= - 1 self . __mfcc_mask_map += all_length - 1 self . __mfcc_mask_map = self . __mfcc_mask_map [ : : - 1 ] self . __speech_intervals = [ ( all_length - i [ 1 ] , all_length - i [ 0 ] ) for i in self . __speech_intervals [ : : - 1 ] ] self . __nonspeech_intervals = [ ( all_length - i [ 1 ] , all_length - i [ 0 ] ) for i in self . __nonspeech_intervals [ : : - 1 ] ] self . is_reversed = not self . is_reversed self . log ( u"Reversing...done" )
Reverse the audio file .
349
7
226,165
def run_vad ( self , log_energy_threshold = None , min_nonspeech_length = None , extend_before = None , extend_after = None ) : def _compute_runs ( array ) : """ Compute runs as a list of arrays, each containing the indices of a contiguous run. :param array: the data array :type array: :class:`numpy.ndarray` (1D) :rtype: list of :class:`numpy.ndarray` (1D) """ if len ( array ) < 1 : return [ ] return numpy . split ( array , numpy . where ( numpy . diff ( array ) != 1 ) [ 0 ] + 1 ) self . log ( u"Creating VAD object" ) vad = VAD ( rconf = self . rconf , logger = self . logger ) self . log ( u"Running VAD..." ) self . __mfcc_mask = vad . run_vad ( wave_energy = self . __mfcc [ 0 ] , log_energy_threshold = log_energy_threshold , min_nonspeech_length = min_nonspeech_length , extend_before = extend_before , extend_after = extend_after ) self . __mfcc_mask_map = ( numpy . where ( self . __mfcc_mask ) ) [ 0 ] self . log ( u"Running VAD... done" ) self . log ( u"Storing speech and nonspeech intervals..." ) # where( == True) already computed, reusing # COMMENTED runs = _compute_runs((numpy.where(self.__mfcc_mask))[0]) runs = _compute_runs ( self . __mfcc_mask_map ) self . __speech_intervals = [ ( r [ 0 ] , r [ - 1 ] ) for r in runs ] # where( == False) not already computed, computing now runs = _compute_runs ( ( numpy . where ( ~ self . __mfcc_mask ) ) [ 0 ] ) self . __nonspeech_intervals = [ ( r [ 0 ] , r [ - 1 ] ) for r in runs ] self . log ( u"Storing speech and nonspeech intervals... done" )
Determine which frames contain speech and nonspeech and store the resulting boolean mask internally .
510
19
226,166
def set_head_middle_tail ( self , head_length = None , middle_length = None , tail_length = None ) : for variable , name in [ ( head_length , "head_length" ) , ( middle_length , "middle_length" ) , ( tail_length , "tail_length" ) ] : if ( variable is not None ) and ( not isinstance ( variable , TimeValue ) ) : raise TypeError ( u"%s is not None or TimeValue" % name ) if ( variable is not None ) and ( variable > self . audio_length ) : raise ValueError ( u"%s is greater than the length of the audio file" % name ) self . log ( u"Setting head middle tail..." ) mws = self . rconf . mws self . log ( [ u"Before: 0 %d %d %d" , self . middle_begin , self . middle_end , self . all_length ] ) if head_length is not None : self . middle_begin = int ( head_length / mws ) if middle_length is not None : self . middle_end = self . middle_begin + int ( middle_length / mws ) elif tail_length is not None : self . middle_end = self . all_length - int ( tail_length / mws ) self . log ( [ u"After: 0 %d %d %d" , self . middle_begin , self . middle_end , self . all_length ] ) self . log ( u"Setting head middle tail... done" )
Set the HEAD MIDDLE TAIL explicitly .
342
10
226,167
def chars ( self ) : if self . lines is None : return 0 return sum ( [ len ( line ) for line in self . lines ] )
Return the number of characters of the text fragment not including the line separators .
31
16
226,168
def children_not_empty ( self ) : children = [ ] for child_node in self . fragments_tree . children_not_empty : child_text_file = self . get_subtree ( child_node ) child_text_file . set_language ( child_node . value . language ) children . append ( child_text_file ) return children
Return the direct not empty children of the root of the fragments tree as TextFile objects .
78
18
226,169
def characters ( self ) : chars = 0 for fragment in self . fragments : chars += fragment . characters return chars
The number of characters in this text file .
23
9
226,170
def add_fragment ( self , fragment , as_last = True ) : if not isinstance ( fragment , TextFragment ) : self . log_exc ( u"fragment is not an instance of TextFragment" , None , True , TypeError ) self . fragments_tree . add_child ( Tree ( value = fragment ) , as_last = as_last )
Add the given text fragment as the first or last child of the root node of the text file tree .
83
21
226,171
def set_language ( self , language ) : self . log ( [ u"Setting language: '%s'" , language ] ) for fragment in self . fragments : fragment . language = language
Set the given language for all the text fragments .
40
10
226,172
def _read_from_file ( self ) : # test if we can read the given file if not gf . file_can_be_read ( self . file_path ) : self . log_exc ( u"File '%s' cannot be read" % ( self . file_path ) , None , True , OSError ) if self . file_format not in TextFileFormat . ALLOWED_VALUES : self . log_exc ( u"Text file format '%s' is not supported." % ( self . file_format ) , None , True , ValueError ) # read the contents of the file self . log ( [ u"Reading contents of file '%s'" , self . file_path ] ) with io . open ( self . file_path , "r" , encoding = "utf-8" ) as text_file : lines = text_file . readlines ( ) # clear text fragments self . clear ( ) # parse the contents map_read_function = { TextFileFormat . MPLAIN : self . _read_mplain , TextFileFormat . MUNPARSED : self . _read_munparsed , TextFileFormat . PARSED : self . _read_parsed , TextFileFormat . PLAIN : self . _read_plain , TextFileFormat . SUBTITLES : self . _read_subtitles , TextFileFormat . UNPARSED : self . _read_unparsed } map_read_function [ self . file_format ] ( lines ) # log the number of fragments self . log ( [ u"Parsed %d fragments" , len ( self . fragments ) ] )
Read text fragments from file .
364
6
226,173
def _mplain_word_separator ( self ) : word_separator = gf . safe_get ( self . parameters , gc . PPN_TASK_IS_TEXT_MPLAIN_WORD_SEPARATOR , u" " ) if ( word_separator is None ) or ( word_separator == "space" ) : return u" " elif word_separator == "equal" : return u"=" elif word_separator == "pipe" : return u"|" elif word_separator == "tab" : return u"\u0009" return word_separator
Get the word separator to split words in mplain format .
138
13
226,174
def _read_mplain ( self , lines ) : self . log ( u"Parsing fragments from subtitles text format" ) word_separator = self . _mplain_word_separator ( ) self . log ( [ u"Word separator is: '%s'" , word_separator ] ) lines = [ line . strip ( ) for line in lines ] pairs = [ ] i = 1 current = 0 tree = Tree ( ) while current < len ( lines ) : line_text = lines [ current ] if len ( line_text ) > 0 : sentences = [ line_text ] following = current + 1 while ( following < len ( lines ) ) and ( len ( lines [ following ] ) > 0 ) : sentences . append ( lines [ following ] ) following += 1 # here sentences holds the sentences for this paragraph # create paragraph node paragraph_identifier = u"p%06d" % i paragraph_lines = [ u" " . join ( sentences ) ] paragraph_fragment = TextFragment ( identifier = paragraph_identifier , lines = paragraph_lines , filtered_lines = paragraph_lines ) paragraph_node = Tree ( value = paragraph_fragment ) tree . add_child ( paragraph_node ) self . log ( [ u"Paragraph %s" , paragraph_identifier ] ) # create sentences nodes j = 1 for s in sentences : sentence_identifier = paragraph_identifier + u"s%06d" % j sentence_lines = [ s ] sentence_fragment = TextFragment ( identifier = sentence_identifier , lines = sentence_lines , filtered_lines = sentence_lines ) sentence_node = Tree ( value = sentence_fragment ) paragraph_node . add_child ( sentence_node ) j += 1 self . log ( [ u" Sentence %s" , sentence_identifier ] ) # create words nodes k = 1 for w in [ w for w in s . split ( word_separator ) if len ( w ) > 0 ] : word_identifier = sentence_identifier + u"w%06d" % k word_lines = [ w ] word_fragment = TextFragment ( identifier = word_identifier , lines = word_lines , filtered_lines = word_lines ) word_node = Tree ( value = word_fragment ) sentence_node . add_child ( word_node ) k += 1 self . log ( [ u" Word %s" , word_identifier ] ) # keep iterating current = following i += 1 current += 1 self . log ( u"Storing tree" ) self . fragments_tree = tree
Read text fragments from a multilevel format text file .
568
12
226,175
def _read_subtitles ( self , lines ) : self . log ( u"Parsing fragments from subtitles text format" ) id_format = self . _get_id_format ( ) lines = [ line . strip ( ) for line in lines ] pairs = [ ] i = 1 current = 0 while current < len ( lines ) : line_text = lines [ current ] if len ( line_text ) > 0 : fragment_lines = [ line_text ] following = current + 1 while ( following < len ( lines ) ) and ( len ( lines [ following ] ) > 0 ) : fragment_lines . append ( lines [ following ] ) following += 1 identifier = id_format % i pairs . append ( ( identifier , fragment_lines ) ) current = following i += 1 current += 1 self . _create_text_fragments ( pairs )
Read text fragments from a subtitles format text file .
183
10
226,176
def _read_parsed ( self , lines ) : self . log ( u"Parsing fragments from parsed text format" ) pairs = [ ] for line in lines : pieces = line . split ( gc . PARSED_TEXT_SEPARATOR ) if len ( pieces ) == 2 : identifier = pieces [ 0 ] . strip ( ) text = pieces [ 1 ] . strip ( ) if len ( identifier ) > 0 : pairs . append ( ( identifier , [ text ] ) ) self . _create_text_fragments ( pairs )
Read text fragments from a parsed format text file .
118
10
226,177
def _read_plain ( self , lines ) : self . log ( u"Parsing fragments from plain text format" ) id_format = self . _get_id_format ( ) lines = [ line . strip ( ) for line in lines ] pairs = [ ] i = 1 for line in lines : identifier = id_format % i text = line . strip ( ) pairs . append ( ( identifier , [ text ] ) ) i += 1 self . _create_text_fragments ( pairs )
Read text fragments from a plain format text file .
108
10
226,178
def _read_unparsed ( self , lines ) : from bs4 import BeautifulSoup def filter_attributes ( ) : """ Return a dict with the bs4 filter parameters """ attributes = { } for attribute_name , filter_name in [ ( "class" , gc . PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX ) , ( "id" , gc . PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX ) ] : if filter_name in self . parameters : regex_string = self . parameters [ filter_name ] if regex_string is not None : self . log ( [ u"Regex for %s: '%s'" , attribute_name , regex_string ] ) regex = re . compile ( r".*\b" + regex_string + r"\b.*" ) attributes [ attribute_name ] = regex return attributes # # TODO better and/or parametric parsing, # for example, removing tags but keeping text, etc. # self . log ( u"Parsing fragments from unparsed text format" ) # transform text in a soup object self . log ( u"Creating soup" ) soup = BeautifulSoup ( "\n" . join ( lines ) , "lxml" ) # extract according to class_regex and id_regex text_from_id = { } ids = [ ] filter_attributes = filter_attributes ( ) self . log ( [ u"Finding elements matching attributes '%s'" , filter_attributes ] ) nodes = soup . findAll ( attrs = filter_attributes ) for node in nodes : try : f_id = gf . safe_unicode ( node [ "id" ] ) f_text = gf . safe_unicode ( node . text ) text_from_id [ f_id ] = f_text ids . append ( f_id ) except KeyError : self . log_warn ( u"KeyError while parsing a node" ) # sort by ID as requested id_sort = gf . safe_get ( dictionary = self . parameters , key = gc . PPN_TASK_IS_TEXT_UNPARSED_ID_SORT , default_value = IDSortingAlgorithm . UNSORTED , can_return_none = False ) self . log ( [ u"Sorting text fragments using '%s'" , id_sort ] ) sorted_ids = IDSortingAlgorithm ( id_sort ) . sort ( ids ) # append to fragments self . log ( u"Appending fragments" ) self . _create_text_fragments ( [ ( key , [ text_from_id [ key ] ] ) for key in sorted_ids ] )
Read text fragments from an unparsed format text file .
609
12
226,179
def _get_id_format ( self ) : id_format = gf . safe_get ( self . parameters , gc . PPN_TASK_OS_FILE_ID_REGEX , self . DEFAULT_ID_FORMAT , can_return_none = False ) try : identifier = id_format % 1 except ( TypeError , ValueError ) as exc : self . log_exc ( u"String '%s' is not a valid id format" % ( id_format ) , exc , True , ValueError ) return id_format
Return the id regex from the parameters
121
7
226,180
def _create_text_fragments ( self , pairs ) : self . log ( u"Creating TextFragment objects" ) text_filter = self . _build_text_filter ( ) for pair in pairs : self . add_fragment ( TextFragment ( identifier = pair [ 0 ] , lines = pair [ 1 ] , filtered_lines = text_filter . apply_filter ( pair [ 1 ] ) ) )
Create text fragment objects and append them to this list .
92
11
226,181
def _build_text_filter ( self ) : text_filter = TextFilter ( logger = self . logger ) self . log ( u"Created TextFilter object" ) for key , cls , param_name in [ ( gc . PPN_TASK_IS_TEXT_FILE_IGNORE_REGEX , TextFilterIgnoreRegex , "regex" ) , ( gc . PPN_TASK_IS_TEXT_FILE_TRANSLITERATE_MAP , TextFilterTransliterate , "map_file_path" ) ] : cls_name = cls . __name__ param_value = gf . safe_get ( self . parameters , key , None ) if param_value is not None : self . log ( [ u"Creating %s object..." , cls_name ] ) params = { param_name : param_value , "logger" : self . logger } try : inner_filter = cls ( * * params ) text_filter . add_filter ( inner_filter ) self . log ( [ u"Creating %s object... done" , cls_name ] ) except ValueError as exc : self . log_exc ( u"Creating %s object failed" % ( cls_name ) , exc , False , None ) return text_filter
Build a suitable TextFilter object .
286
7
226,182
def add_filter ( self , new_filter , as_last = True ) : if as_last : self . filters . append ( new_filter ) else : self . filters = [ new_filter ] + self . filters
Compose this filter with the given new_filter filter .
48
12
226,183
def apply_filter ( self , strings ) : result = strings for filt in self . filters : result = filt . apply_filter ( result ) self . log ( [ u"Applying regex: '%s' => '%s'" , strings , result ] ) return result
Apply the text filter filter to the given list of strings .
60
12
226,184
def _build_map ( self ) : if gf . is_py2_narrow_build ( ) : self . log_warn ( u"Running on a Python 2 narrow build: be aware that Unicode chars above 0x10000 cannot be replaced correctly." ) self . trans_map = { } with io . open ( self . file_path , "r" , encoding = "utf-8" ) as file_obj : contents = file_obj . read ( ) . replace ( u"\t" , u" " ) for line in contents . splitlines ( ) : # ignore lines starting with "#" or blank (after stripping) if not line . startswith ( u"#" ) : line = line . strip ( ) if len ( line ) > 0 : self . _process_map_rule ( line )
Read the map file at path .
177
7
226,185
def _process_map_rule ( self , line ) : result = self . REPLACE_REGEX . match ( line ) if result is not None : what = self . _process_first_group ( result . group ( 1 ) ) replacement = self . _process_second_group ( result . group ( 2 ) ) for char in what : self . trans_map [ char ] = replacement self . log ( [ u"Adding rule: replace '%s' with '%s'" , char , replacement ] ) else : result = self . DELETE_REGEX . match ( line ) if result is not None : what = self . _process_first_group ( result . group ( 1 ) ) for char in what : self . trans_map [ char ] = "" self . log ( [ u"Adding rule: delete '%s'" , char ] )
Process the line string containing a map rule .
185
9
226,186
def _process_first_group ( self , group ) : if "-" in group : # range if len ( group . split ( "-" ) ) == 2 : arr = group . split ( "-" ) start = self . _parse_codepoint ( arr [ 0 ] ) end = self . _parse_codepoint ( arr [ 1 ] ) else : # single char/U+xxxx start = self . _parse_codepoint ( group ) end = start result = [ ] if ( start > - 1 ) and ( end >= start ) : for index in range ( start , end + 1 ) : result . append ( gf . safe_unichr ( index ) ) return result
Process the first group of a rule .
149
8
226,187
def load_job ( self , job ) : if not isinstance ( job , Job ) : self . log_exc ( u"job is not an instance of Job" , None , True , ExecuteJobInputError ) self . job = job
Load the job from the given Job object .
52
9
226,188
def execute ( self ) : self . log ( u"Executing job" ) if self . job is None : self . log_exc ( u"The job object is None" , None , True , ExecuteJobExecutionError ) if len ( self . job ) == 0 : self . log_exc ( u"The job has no tasks" , None , True , ExecuteJobExecutionError ) job_max_tasks = self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] if ( job_max_tasks > 0 ) and ( len ( self . job ) > job_max_tasks ) : self . log_exc ( u"The Job has %d Tasks, more than the maximum allowed (%d)." % ( len ( self . job ) , job_max_tasks ) , None , True , ExecuteJobExecutionError ) self . log ( [ u"Number of tasks: '%d'" , len ( self . job ) ] ) for task in self . job . tasks : try : custom_id = task . configuration [ "custom_id" ] self . log ( [ u"Executing task '%s'..." , custom_id ] ) executor = ExecuteTask ( task , rconf = self . rconf , logger = self . logger ) executor . execute ( ) self . log ( [ u"Executing task '%s'... done" , custom_id ] ) except Exception as exc : self . log_exc ( u"Error while executing task '%s'" % ( custom_id ) , exc , True , ExecuteJobExecutionError ) self . log ( u"Executing task: succeeded" ) self . log ( u"Executing job: succeeded" )
Execute the job that is execute all of its tasks .
377
12
226,189
def write_output_container ( self , output_directory_path ) : self . log ( u"Writing output container for this job" ) if self . job is None : self . log_exc ( u"The job object is None" , None , True , ExecuteJobOutputError ) if len ( self . job ) == 0 : self . log_exc ( u"The job has no tasks" , None , True , ExecuteJobOutputError ) self . log ( [ u"Number of tasks: '%d'" , len ( self . job ) ] ) # create temporary directory where the sync map files # will be created # this temporary directory will be compressed into # the output container self . tmp_directory = gf . tmp_directory ( root = self . rconf [ RuntimeConfiguration . TMP_PATH ] ) self . log ( [ u"Created temporary directory '%s'" , self . tmp_directory ] ) for task in self . job . tasks : custom_id = task . configuration [ "custom_id" ] # check if the task has sync map and sync map file path if task . sync_map_file_path is None : self . log_exc ( u"Task '%s' has sync_map_file_path not set" % ( custom_id ) , None , True , ExecuteJobOutputError ) if task . sync_map is None : self . log_exc ( u"Task '%s' has sync_map not set" % ( custom_id ) , None , True , ExecuteJobOutputError ) try : # output sync map self . log ( [ u"Outputting sync map for task '%s'..." , custom_id ] ) task . output_sync_map_file ( self . tmp_directory ) self . log ( [ u"Outputting sync map for task '%s'... done" , custom_id ] ) except Exception as exc : self . log_exc ( u"Error while outputting sync map for task '%s'" % ( custom_id ) , None , True , ExecuteJobOutputError ) # get output container info output_container_format = self . job . configuration [ "o_container_format" ] self . log ( [ u"Output container format: '%s'" , output_container_format ] ) output_file_name = self . job . configuration [ "o_name" ] if ( ( output_container_format != ContainerFormat . UNPACKED ) and ( not output_file_name . endswith ( output_container_format ) ) ) : self . log ( u"Adding extension to output_file_name" ) output_file_name += "." + output_container_format self . log ( [ u"Output file name: '%s'" , output_file_name ] ) output_file_path = gf . norm_join ( output_directory_path , output_file_name ) self . log ( [ u"Output file path: '%s'" , output_file_path ] ) try : self . log ( u"Compressing..." ) container = Container ( output_file_path , output_container_format , logger = self . logger ) container . compress ( self . tmp_directory ) self . log ( u"Compressing... done" ) self . log ( [ u"Created output file: '%s'" , output_file_path ] ) self . log ( u"Writing output container for this job: succeeded" ) self . clean ( False ) return output_file_path except Exception as exc : self . clean ( False ) self . log_exc ( u"Error while compressing" , exc , True , ExecuteJobOutputError ) return None
Write the output container for this job .
797
8
226,190
def clean ( self , remove_working_directory = True ) : if remove_working_directory is not None : self . log ( u"Removing working directory... " ) gf . delete_directory ( self . working_directory ) self . working_directory = None self . log ( u"Removing working directory... done" ) self . log ( u"Removing temporary directory... " ) gf . delete_directory ( self . tmp_directory ) self . tmp_directory = None self . log ( u"Removing temporary directory... done" )
Remove the temporary directory . If remove_working_directory is True remove the working directory as well otherwise just remove the temporary directory .
118
26
226,191
def _read_syncmap_file ( self , path , extension , text = False ) : syncmap = SyncMap ( logger = self . logger ) syncmap . read ( extension , path , parameters = None ) if text : return [ ( f . begin , f . end , u" " . join ( f . text_fragment . lines ) ) for f in syncmap . fragments ] return [ ( f . begin , f . end , f . text_fragment . identifier ) for f in syncmap . fragments ]
Read labels from a SyncMap file
113
7
226,192
def has_adjacent_leaves_only ( self ) : leaves = self . leaves ( ) for i in range ( len ( leaves ) - 1 ) : current_interval = leaves [ i ] . interval next_interval = leaves [ i + 1 ] . interval if not current_interval . is_adjacent_before ( next_interval ) : return False return True
Return True if the sync map fragments which are the leaves of the sync map tree are all adjacent .
82
20
226,193
def json_string ( self ) : def visit_children ( node ) : """ Recursively visit the fragments_tree """ output_fragments = [ ] for child in node . children_not_empty : fragment = child . value text = fragment . text_fragment output_fragments . append ( { "id" : text . identifier , "language" : text . language , "lines" : text . lines , "begin" : gf . time_to_ssmmm ( fragment . begin ) , "end" : gf . time_to_ssmmm ( fragment . end ) , "children" : visit_children ( child ) } ) return output_fragments output_fragments = visit_children ( self . fragments_tree ) return gf . safe_unicode ( json . dumps ( { "fragments" : output_fragments } , indent = 1 , sort_keys = True ) )
Return a JSON representation of the sync map .
202
9
226,194
def add_fragment ( self , fragment , as_last = True ) : if not isinstance ( fragment , SyncMapFragment ) : self . log_exc ( u"fragment is not an instance of SyncMapFragment" , None , True , TypeError ) self . fragments_tree . add_child ( Tree ( value = fragment ) , as_last = as_last )
Add the given sync map fragment as the first or last child of the root node of the sync map tree .
85
22
226,195
def output_html_for_tuning ( self , audio_file_path , output_file_path , parameters = None ) : if not gf . file_can_be_written ( output_file_path ) : self . log_exc ( u"Cannot output HTML file '%s'. Wrong permissions?" % ( output_file_path ) , None , True , OSError ) if parameters is None : parameters = { } audio_file_path_absolute = gf . fix_slash ( os . path . abspath ( audio_file_path ) ) template_path_absolute = gf . absolute_path ( self . FINETUNEAS_PATH , __file__ ) with io . open ( template_path_absolute , "r" , encoding = "utf-8" ) as file_obj : template = file_obj . read ( ) for repl in self . FINETUNEAS_REPLACEMENTS : template = template . replace ( repl [ 0 ] , repl [ 1 ] ) template = template . replace ( self . FINETUNEAS_REPLACE_AUDIOFILEPATH , u"audioFilePath = \"file://%s\";" % audio_file_path_absolute ) template = template . replace ( self . FINETUNEAS_REPLACE_FRAGMENTS , u"fragments = (%s).fragments;" % self . json_string ) if gc . PPN_TASK_OS_FILE_FORMAT in parameters : output_format = parameters [ gc . PPN_TASK_OS_FILE_FORMAT ] if output_format in self . FINETUNEAS_ALLOWED_FORMATS : template = template . replace ( self . FINETUNEAS_REPLACE_OUTPUT_FORMAT , u"outputFormat = \"%s\";" % output_format ) if output_format == "smil" : for key , placeholder , replacement in [ ( gc . PPN_TASK_OS_FILE_SMIL_AUDIO_REF , self . FINETUNEAS_REPLACE_SMIL_AUDIOREF , "audioref = \"%s\";" ) , ( gc . PPN_TASK_OS_FILE_SMIL_PAGE_REF , self . FINETUNEAS_REPLACE_SMIL_PAGEREF , "pageref = \"%s\";" ) , ] : if key in parameters : template = template . replace ( placeholder , replacement % parameters [ key ] ) with io . open ( output_file_path , "w" , encoding = "utf-8" ) as file_obj : file_obj . write ( template )
Output an HTML file for fine tuning the sync map manually .
590
12
226,196
def _create_dct_matrix ( self ) : self . s2dct = numpy . zeros ( ( self . mfcc_size , self . filter_bank_size ) ) for i in range ( 0 , self . mfcc_size ) : freq = numpy . pi * float ( i ) / self . filter_bank_size self . s2dct [ i ] = numpy . cos ( freq * numpy . arange ( 0.5 , 0.5 + self . filter_bank_size , 1.0 , 'float64' ) ) self . s2dct [ : , 0 ] *= 0.5 self . s2dct = self . s2dct . transpose ( )
Create the not - quite - DCT matrix as used by Sphinx and store it in self . s2dct .
164
25
226,197
def _create_mel_filter_bank ( self ) : self . filters = numpy . zeros ( ( 1 + ( self . fft_order // 2 ) , self . filter_bank_size ) , 'd' ) dfreq = float ( self . sample_rate ) / self . fft_order nyquist_frequency = self . sample_rate / 2 if self . upper_frequency > nyquist_frequency : self . log_exc ( u"Upper frequency %f exceeds Nyquist frequency %f" % ( self . upper_frequency , nyquist_frequency ) , None , True , ValueError ) melmax = MFCC . _hz2mel ( self . upper_frequency ) melmin = MFCC . _hz2mel ( self . lower_frequency ) dmelbw = ( melmax - melmin ) / ( self . filter_bank_size + 1 ) filt_edge = MFCC . _mel2hz ( melmin + dmelbw * numpy . arange ( self . filter_bank_size + 2 , dtype = 'd' ) ) # TODO can this code be written more numpy-style? # (the performance loss is negligible, it is just ugly to see) for whichfilt in range ( 0 , self . filter_bank_size ) : # int() casts to native int instead of working with numpy.float64 leftfr = int ( round ( filt_edge [ whichfilt ] / dfreq ) ) centerfr = int ( round ( filt_edge [ whichfilt + 1 ] / dfreq ) ) rightfr = int ( round ( filt_edge [ whichfilt + 2 ] / dfreq ) ) fwidth = ( rightfr - leftfr ) * dfreq height = 2.0 / fwidth if centerfr != leftfr : leftslope = height / ( centerfr - leftfr ) else : leftslope = 0 freq = leftfr + 1 while freq < centerfr : self . filters [ freq , whichfilt ] = ( freq - leftfr ) * leftslope freq = freq + 1 # the next if should always be true! if freq == centerfr : self . filters [ freq , whichfilt ] = height freq = freq + 1 if centerfr != rightfr : rightslope = height / ( centerfr - rightfr ) while freq < rightfr : self . filters [ freq , whichfilt ] = ( freq - rightfr ) * rightslope freq = freq + 1
Create the Mel filter bank and store it in self . filters .
559
13
226,198
def _pre_emphasis ( self ) : self . data = numpy . append ( self . data [ 0 ] , self . data [ 1 : ] - self . emphasis_factor * self . data [ : - 1 ] )
Pre - emphasize the entire signal at once by self . emphasis_factor overwriting self . data .
48
21
226,199
def compute_from_data ( self , data , sample_rate ) : def _process_frame ( self , frame ) : """ Process each frame, returning the log(power()) of it. """ # apply Hamming window frame *= self . hamming_window # compute RFFT fft = numpy . fft . rfft ( frame , self . fft_order ) # equivalent to power = fft.real * fft.real + fft.imag * fft.imag power = numpy . square ( numpy . absolute ( fft ) ) # # return the log(power()) of the transformed vector # v1 # COMMENTED logspec = numpy.log(numpy.dot(power, self.filters).clip(self.CUTOFF, numpy.inf)) # COMMENTED return numpy.dot(logspec, self.s2dct) / self.filter_bank_size # v2 return numpy . log ( numpy . dot ( power , self . filters ) . clip ( self . CUTOFF , numpy . inf ) ) if len ( data . shape ) != 1 : self . log_exc ( u"The audio data must be a 1D numpy array (mono)." , None , True , ValueError ) if len ( data ) < 1 : self . log_exc ( u"The audio data must not be empty." , None , True , ValueError ) self . data = data self . sample_rate = sample_rate # number of samples in the audio data_length = len ( self . data ) # frame length in number of samples frame_length = int ( self . window_length * self . sample_rate ) # frame length must be at least equal to the FFT order frame_length_padded = max ( frame_length , self . fft_order ) # frame shift in number of samples frame_shift = int ( self . window_shift * self . sample_rate ) # number of MFCC vectors (one for each frame) # this number includes the last shift, # where the data will be padded with zeros # if the remaining samples are less than frame_length_padded number_of_frames = int ( ( 1.0 * data_length ) / frame_shift ) # create Hamming window self . hamming_window = numpy . hamming ( frame_length_padded ) # build Mel filter bank self . _create_mel_filter_bank ( ) # pre-emphasize the entire audio data self . _pre_emphasis ( ) # allocate the MFCCs matrix # v1 # COMMENTED mfcc = numpy.zeros((number_of_frames, self.mfcc_size), 'float64') # v2 mfcc = numpy . zeros ( ( number_of_frames , self . filter_bank_size ) , 'float64' ) # compute MFCCs one frame at a time for frame_index in range ( number_of_frames ) : # COMMENTED print("Computing frame %d / %d" % (frame_index, number_of_frames)) # get the start and end indices for this frame, # do not overrun the data length frame_start = frame_index * frame_shift frame_end = min ( frame_start + frame_length_padded , data_length ) # frame is zero-padded if the remaining samples # are less than its length frame = numpy . zeros ( frame_length_padded ) frame [ 0 : ( frame_end - frame_start ) ] = self . data [ frame_start : frame_end ] # process the frame mfcc [ frame_index ] = _process_frame ( self , frame ) # v1 # COMMENTED return mfcc # v2 # return the dot product with the DCT matrix return numpy . dot ( mfcc , self . s2dct ) / self . filter_bank_size
Compute MFCCs for the given audio data .
858
11