idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
38,100
def _find_files ( self , entries , root , relative_path , file_name_regex ) : self . log ( [ u"Finding files within root: '%s'" , root ] ) target = root if relative_path is not None : self . log ( [ u"Joining relative path: '%s'" , relative_path ] ) target = gf . norm_join ( root , relative_path ) self . log ( [ u"Finding files within target: '%s'" , target ] ) files = [ ] target_len = len ( target ) for entry in entries : if entry . startswith ( target ) : self . log ( [ u"Examining entry: '%s'" , entry ] ) entry_suffix = entry [ target_len + 1 : ] self . log ( [ u"Examining entry suffix: '%s'" , entry_suffix ] ) if re . search ( file_name_regex , entry_suffix ) is not None : self . log ( [ u"Match: '%s'" , entry ] ) files . append ( entry ) else : self . log ( [ u"No match: '%s'" , entry ] ) return sorted ( files )
Return the elements in entries that
38,101
def _match_files_flat_hierarchy ( self , text_files , audio_files ) : self . log ( u"Matching files in flat hierarchy" ) self . log ( [ u"Text files: '%s'" , text_files ] ) self . log ( [ u"Audio files: '%s'" , audio_files ] ) d_text = { } d_audio = { } for text_file in text_files : text_file_no_ext = gf . file_name_without_extension ( text_file ) d_text [ text_file_no_ext ] = text_file self . log ( [ u"Added text file '%s' to key '%s'" , text_file , text_file_no_ext ] ) for audio_file in audio_files : audio_file_no_ext = gf . file_name_without_extension ( audio_file ) d_audio [ audio_file_no_ext ] = audio_file self . log ( [ u"Added audio file '%s' to key '%s'" , audio_file , audio_file_no_ext ] ) tasks = [ ] for key in d_text . keys ( ) : self . log ( [ u"Examining text key '%s'" , key ] ) if key in d_audio : self . log ( [ u"Key '%s' is also in audio" , key ] ) tasks . append ( [ key , d_text [ key ] , d_audio [ key ] ] ) self . log ( [ u"Added pair ('%s', '%s')" , d_text [ key ] , d_audio [ key ] ] ) return tasks
Match audio and text files in flat hierarchies .
38,102
def _match_directories ( self , entries , root , regex_string ) : self . log ( u"Matching directory names in paged hierarchy" ) self . log ( [ u"Matching within '%s'" , root ] ) self . log ( [ u"Matching regex '%s'" , regex_string ] ) regex = re . compile ( r"" + regex_string ) directories = set ( ) root_len = len ( root ) for entry in entries : if entry . startswith ( root ) : self . log ( [ u"Examining '%s'" , entry ] ) entry = entry [ root_len + 1 : ] entry_splitted = entry . split ( os . sep ) if ( ( len ( entry_splitted ) >= 2 ) and ( re . match ( regex , entry_splitted [ 0 ] ) is not None ) ) : directories . add ( entry_splitted [ 0 ] ) self . log ( [ u"Match: '%s'" , entry_splitted [ 0 ] ] ) else : self . log ( [ u"No match: '%s'" , entry ] ) return sorted ( directories )
Match directory names in paged hierarchies .
38,103
def load_task ( self , task ) : if not isinstance ( task , Task ) : self . log_exc ( u"task is not an instance of Task" , None , True , ExecuteTaskInputError ) self . task = task
Load the task from the given Task object .
38,104
def _step_begin ( self , label , log = True ) : if log : self . step_label = label self . step_begin_time = self . log ( u"STEP %d BEGIN (%s)" % ( self . step_index , label ) )
Log begin of a step
38,105
def _step_end ( self , log = True ) : if log : step_end_time = self . log ( u"STEP %d END (%s)" % ( self . step_index , self . step_label ) ) diff = ( step_end_time - self . step_begin_time ) diff = float ( diff . seconds + diff . microseconds / 1000000.0 ) self . step_total += diff self . log ( u"STEP %d DURATION %.3f (%s)" % ( self . step_index , diff , self . step_label ) ) self . step_index += 1
Log end of a step
38,106
def _step_failure ( self , exc ) : self . log_crit ( u"STEP %d (%s) FAILURE" % ( self . step_index , self . step_label ) ) self . step_index += 1 self . log_exc ( u"Unexpected error while executing task" , exc , True , ExecuteTaskExecutionError )
Log failure of a step
38,107
def execute ( self ) : self . log ( u"Executing task..." ) if self . task . audio_file is None : self . log_exc ( u"The task does not seem to have its audio file set" , None , True , ExecuteTaskInputError ) if ( ( self . task . audio_file . audio_length is None ) or ( self . task . audio_file . audio_length <= 0 ) ) : self . log_exc ( u"The task seems to have an invalid audio file" , None , True , ExecuteTaskInputError ) task_max_audio_length = self . rconf [ RuntimeConfiguration . TASK_MAX_AUDIO_LENGTH ] if ( ( task_max_audio_length > 0 ) and ( self . task . audio_file . audio_length > task_max_audio_length ) ) : self . log_exc ( u"The audio file of the task has length %.3f, more than the maximum allowed (%.3f)." % ( self . task . audio_file . audio_length , task_max_audio_length ) , None , True , ExecuteTaskInputError ) if self . task . text_file is None : self . log_exc ( u"The task does not seem to have its text file set" , None , True , ExecuteTaskInputError ) if len ( self . task . text_file ) == 0 : self . log_exc ( u"The task text file seems to have no text fragments" , None , True , ExecuteTaskInputError ) task_max_text_length = self . rconf [ RuntimeConfiguration . TASK_MAX_TEXT_LENGTH ] if ( ( task_max_text_length > 0 ) and ( len ( self . task . text_file ) > task_max_text_length ) ) : self . log_exc ( u"The text file of the task has %d fragments, more than the maximum allowed (%d)." % ( len ( self . task . text_file ) , task_max_text_length ) , None , True , ExecuteTaskInputError ) if self . task . text_file . chars == 0 : self . log_exc ( u"The task text file seems to have empty text" , None , True , ExecuteTaskInputError ) self . log ( u"Both audio and text input file are present" ) self . step_index = 1 self . step_total = 0.000 if self . task . text_file . file_format in TextFileFormat . MULTILEVEL_VALUES : self . _execute_multi_level_task ( ) else : self . _execute_single_level_task ( ) self . log ( u"Executing task... done" )
Execute the task . The sync map produced will be stored inside the task object .
38,108
def _execute_single_level_task ( self ) : self . log ( u"Executing single level task..." ) try : self . _step_begin ( u"extract MFCC real wave" ) real_wave_mfcc = self . _extract_mfcc ( file_path = self . task . audio_file_path_absolute , file_format = None , ) self . _step_end ( ) self . _step_begin ( u"compute head tail" ) ( head_length , process_length , tail_length ) = self . _compute_head_process_tail ( real_wave_mfcc ) real_wave_mfcc . set_head_middle_tail ( head_length , process_length , tail_length ) self . _step_end ( ) self . _set_synthesizer ( ) sync_root = Tree ( ) self . _execute_inner ( real_wave_mfcc , self . task . text_file , sync_root = sync_root , force_aba_auto = False , log = True , leaf_level = True ) self . _clear_cache_synthesizer ( ) self . _step_begin ( u"create sync map" ) self . _create_sync_map ( sync_root = sync_root ) self . _step_end ( ) self . _step_total ( ) self . log ( u"Executing single level task... done" ) except Exception as exc : self . _step_failure ( exc )
Execute a single - level task
38,109
def _execute_level ( self , level , audio_file_mfcc , text_files , sync_roots , force_aba_auto = False ) : self . _set_synthesizer ( ) next_level_text_files = [ ] next_level_sync_roots = [ ] for text_file_index , text_file in enumerate ( text_files ) : self . log ( [ u"Text level %d, fragment %d" , level , text_file_index ] ) self . log ( [ u" Len: %d" , len ( text_file ) ] ) sync_root = sync_roots [ text_file_index ] if ( level > 1 ) and ( len ( text_file ) == 1 ) : self . log ( u"Level > 1 and only one text fragment => return trivial tree" ) self . _append_trivial_tree ( text_file , sync_root ) elif ( level > 1 ) and ( sync_root . value . begin == sync_root . value . end ) : self . log ( u"Level > 1 and parent has begin == end => return trivial tree" ) self . _append_trivial_tree ( text_file , sync_root ) else : self . log ( u"Level == 1 or more than one text fragment with non-zero parent => compute tree" ) if not sync_root . is_empty : begin = sync_root . value . begin end = sync_root . value . end self . log ( [ u" Setting begin: %.3f" , begin ] ) self . log ( [ u" Setting end: %.3f" , end ] ) audio_file_mfcc . set_head_middle_tail ( head_length = begin , middle_length = ( end - begin ) ) else : self . log ( u" No begin or end to set" ) self . _execute_inner ( audio_file_mfcc , text_file , sync_root = sync_root , force_aba_auto = force_aba_auto , log = False , leaf_level = ( level == 3 ) ) next_level_text_files . extend ( text_file . children_not_empty ) next_level_sync_roots . extend ( sync_root . children [ 1 : - 1 ] ) self . _clear_cache_synthesizer ( ) return ( next_level_text_files , next_level_sync_roots )
Compute the alignment for all the nodes in the given level .
38,110
def _execute_inner ( self , audio_file_mfcc , text_file , sync_root = None , force_aba_auto = False , log = True , leaf_level = False ) : self . _step_begin ( u"synthesize text" , log = log ) synt_handler , synt_path , synt_anchors , synt_format = self . _synthesize ( text_file ) self . _step_end ( log = log ) self . _step_begin ( u"extract MFCC synt wave" , log = log ) synt_wave_mfcc = self . _extract_mfcc ( file_path = synt_path , file_format = synt_format , ) gf . delete_file ( synt_handler , synt_path ) self . _step_end ( log = log ) self . _step_begin ( u"align waves" , log = log ) indices = self . _align_waves ( audio_file_mfcc , synt_wave_mfcc , synt_anchors ) self . _step_end ( log = log ) self . _step_begin ( u"adjust boundaries" , log = log ) self . _adjust_boundaries ( indices , text_file , audio_file_mfcc , sync_root , force_aba_auto , leaf_level ) self . _step_end ( log = log )
Align a subinterval of the given AudioFileMFCC with the given TextFile .
38,111
def _load_audio_file ( self ) : self . _step_begin ( u"load audio file" ) audio_file = AudioFile ( file_path = self . task . audio_file_path_absolute , file_format = None , rconf = self . rconf , logger = self . logger ) audio_file . read_samples_from_file ( ) self . _step_end ( ) return audio_file
Load audio in memory .
38,112
def _clear_audio_file ( self , audio_file ) : self . _step_begin ( u"clear audio file" ) audio_file . clear_data ( ) audio_file = None self . _step_end ( )
Clear audio from memory .
38,113
def _extract_mfcc ( self , file_path = None , file_format = None , audio_file = None ) : audio_file_mfcc = AudioFileMFCC ( file_path = file_path , file_format = file_format , audio_file = audio_file , rconf = self . rconf , logger = self . logger ) if self . rconf . mmn : self . log ( u"Running VAD inside _extract_mfcc..." ) audio_file_mfcc . run_vad ( log_energy_threshold = self . rconf [ RuntimeConfiguration . MFCC_MASK_LOG_ENERGY_THRESHOLD ] , min_nonspeech_length = self . rconf [ RuntimeConfiguration . MFCC_MASK_MIN_NONSPEECH_LENGTH ] , extend_before = self . rconf [ RuntimeConfiguration . MFCC_MASK_EXTEND_SPEECH_INTERVAL_BEFORE ] , extend_after = self . rconf [ RuntimeConfiguration . MFCC_MASK_EXTEND_SPEECH_INTERVAL_AFTER ] ) self . log ( u"Running VAD inside _extract_mfcc... done" ) return audio_file_mfcc
Extract the MFCCs from the given audio file .
38,114
def _compute_head_process_tail ( self , audio_file_mfcc ) : head_length = self . task . configuration [ "i_a_head" ] process_length = self . task . configuration [ "i_a_process" ] tail_length = self . task . configuration [ "i_a_tail" ] head_max = self . task . configuration [ "i_a_head_max" ] head_min = self . task . configuration [ "i_a_head_min" ] tail_max = self . task . configuration [ "i_a_tail_max" ] tail_min = self . task . configuration [ "i_a_tail_min" ] if ( ( head_length is not None ) or ( process_length is not None ) or ( tail_length is not None ) ) : self . log ( u"Setting explicit head process tail" ) else : self . log ( u"Detecting head tail..." ) sd = SD ( audio_file_mfcc , self . task . text_file , rconf = self . rconf , logger = self . logger ) head_length = TimeValue ( "0.000" ) process_length = None tail_length = TimeValue ( "0.000" ) if ( head_min is not None ) or ( head_max is not None ) : self . log ( u"Detecting HEAD..." ) head_length = sd . detect_head ( head_min , head_max ) self . log ( [ u"Detected HEAD: %.3f" , head_length ] ) self . log ( u"Detecting HEAD... done" ) if ( tail_min is not None ) or ( tail_max is not None ) : self . log ( u"Detecting TAIL..." ) tail_length = sd . detect_tail ( tail_min , tail_max ) self . log ( [ u"Detected TAIL: %.3f" , tail_length ] ) self . log ( u"Detecting TAIL... done" ) self . log ( u"Detecting head tail... done" ) self . log ( [ u"Head: %s" , gf . safe_float ( head_length , None ) ] ) self . log ( [ u"Process: %s" , gf . safe_float ( process_length , None ) ] ) self . log ( [ u"Tail: %s" , gf . safe_float ( tail_length , None ) ] ) return ( head_length , process_length , tail_length )
Set the audio file head or tail by either reading the explicit values from the Task configuration or using SD to determine them .
38,115
def _clear_cache_synthesizer ( self ) : self . log ( u"Clearing synthesizer..." ) self . synthesizer . clear_cache ( ) self . log ( u"Clearing synthesizer... done" )
Clear the cache of the synthesizer
38,116
def _synthesize ( self , text_file ) : handler , path = gf . tmp_file ( suffix = u".wav" , root = self . rconf [ RuntimeConfiguration . TMP_PATH ] ) result = self . synthesizer . synthesize ( text_file , path ) return ( handler , path , result [ 0 ] , self . synthesizer . output_audio_format )
Synthesize text into a WAVE file .
38,117
def _align_waves ( self , real_wave_mfcc , synt_wave_mfcc , synt_anchors ) : self . log ( u"Creating DTWAligner..." ) aligner = DTWAligner ( real_wave_mfcc , synt_wave_mfcc , rconf = self . rconf , logger = self . logger ) self . log ( u"Creating DTWAligner... done" ) self . log ( u"Computing boundary indices..." ) boundary_indices = aligner . compute_boundaries ( synt_anchors ) self . log ( u"Computing boundary indices... done" ) return boundary_indices
Align two AudioFileMFCC objects representing WAVE files .
38,118
def _adjust_boundaries ( self , boundary_indices , text_file , real_wave_mfcc , sync_root , force_aba_auto = False , leaf_level = False ) : aba_parameters = self . task . configuration . aba_parameters ( ) if force_aba_auto : self . log ( u"Forced running algorithm: 'auto'" ) aba_parameters [ "algorithm" ] = ( AdjustBoundaryAlgorithm . AUTO , [ ] ) self . log ( [ u"ABA parameters: %s" , aba_parameters ] ) aba = AdjustBoundaryAlgorithm ( rconf = self . rconf , logger = self . logger ) aba . adjust ( aba_parameters = aba_parameters , real_wave_mfcc = real_wave_mfcc , boundary_indices = boundary_indices , text_file = text_file , allow_arbitrary_shift = leaf_level ) aba . append_fragment_list_to_sync_root ( sync_root = sync_root )
Adjust boundaries as requested by the user .
38,119
def _append_trivial_tree ( self , text_file , sync_root ) : interval = sync_root . value if len ( text_file ) == 1 : time_values = [ interval . begin , interval . begin , interval . end , interval . end ] else : time_values = [ interval . begin ] * ( 3 + len ( text_file ) ) aba = AdjustBoundaryAlgorithm ( rconf = self . rconf , logger = self . logger ) aba . intervals_to_fragment_list ( text_file = text_file , time_values = time_values ) aba . append_fragment_list_to_sync_root ( sync_root = sync_root )
Append trivial tree made by one HEAD one sync map fragment for each element of text_file and one TAIL .
38,120
def _create_sync_map ( self , sync_root ) : sync_map = SyncMap ( tree = sync_root , rconf = self . rconf , logger = self . logger ) if self . rconf . safety_checks : self . log ( u"Running sanity check on computed sync map..." ) if not sync_map . leaves_are_consistent : self . _step_failure ( ValueError ( u"The computed sync map contains inconsistent fragments" ) ) self . log ( u"Running sanity check on computed sync map... passed" ) else : self . log ( u"Not running sanity check on computed sync map" ) self . task . sync_map = sync_map
If requested check that the computed sync map is consistent . Then add it to the Task .
38,121
def detect_interval ( self , min_head_length = None , max_head_length = None , min_tail_length = None , max_tail_length = None ) : head = self . detect_head ( min_head_length , max_head_length ) tail = self . detect_tail ( min_tail_length , max_tail_length ) begin = head end = self . real_wave_mfcc . audio_length - tail self . log ( [ u"Audio length: %.3f" , self . real_wave_mfcc . audio_length ] ) self . log ( [ u"Head length: %.3f" , head ] ) self . log ( [ u"Tail length: %.3f" , tail ] ) self . log ( [ u"Begin: %.3f" , begin ] ) self . log ( [ u"End: %.3f" , end ] ) if ( begin >= TimeValue ( "0.000" ) ) and ( end > begin ) : self . log ( [ u"Returning %.3f %.3f" , begin , end ] ) return ( begin , end ) self . log ( u"Returning (0.000, 0.000)" ) return ( TimeValue ( "0.000" ) , TimeValue ( "0.000" ) )
Detect the interval of the audio file containing the fragments in the text file .
38,122
def detect_head ( self , min_head_length = None , max_head_length = None ) : return self . _detect ( min_head_length , max_head_length , tail = False )
Detect the audio head returning its duration in seconds .
38,123
def detect_tail ( self , min_tail_length = None , max_tail_length = None ) : return self . _detect ( min_tail_length , max_tail_length , tail = True )
Detect the audio tail returning its duration in seconds .
38,124
def _select_tts_engine ( self ) : self . log ( u"Selecting TTS engine..." ) requested_tts_engine = self . rconf [ RuntimeConfiguration . TTS ] if requested_tts_engine == self . CUSTOM : self . log ( u"TTS engine: custom" ) tts_path = self . rconf [ RuntimeConfiguration . TTS_PATH ] if tts_path is None : self . log_exc ( u"You must specify a value for tts_path" , None , True , ValueError ) if not gf . file_can_be_read ( tts_path ) : self . log_exc ( u"Cannot read tts_path" , None , True , OSError ) try : import imp self . log ( [ u"Loading CustomTTSWrapper module from '%s'..." , tts_path ] ) imp . load_source ( "CustomTTSWrapperModule" , tts_path ) self . log ( [ u"Loading CustomTTSWrapper module from '%s'... done" , tts_path ] ) self . log ( u"Importing CustomTTSWrapper..." ) from CustomTTSWrapperModule import CustomTTSWrapper self . log ( u"Importing CustomTTSWrapper... done" ) self . log ( u"Creating CustomTTSWrapper instance..." ) self . tts_engine = CustomTTSWrapper ( rconf = self . rconf , logger = self . logger ) self . log ( u"Creating CustomTTSWrapper instance... done" ) except Exception as exc : self . log_exc ( u"Unable to load custom TTS wrapper" , exc , True , OSError ) elif requested_tts_engine == self . AWS : try : import boto3 except ImportError as exc : self . log_exc ( u"Unable to import boto3 for AWS Polly TTS API wrapper" , exc , True , ImportError ) self . log ( u"TTS engine: AWS Polly TTS API" ) self . tts_engine = AWSTTSWrapper ( rconf = self . rconf , logger = self . logger ) elif requested_tts_engine == self . NUANCE : try : import requests except ImportError as exc : self . log_exc ( u"Unable to import requests for Nuance TTS API wrapper" , exc , True , ImportError ) self . log ( u"TTS engine: Nuance TTS API" ) self . tts_engine = NuanceTTSWrapper ( rconf = self . rconf , logger = self . logger ) elif requested_tts_engine == self . ESPEAKNG : self . log ( u"TTS engine: eSpeak-ng" ) self . tts_engine = ESPEAKNGTTSWrapper ( rconf = self . rconf , logger = self . logger ) elif requested_tts_engine == self . FESTIVAL : self . log ( u"TTS engine: Festival" ) self . tts_engine = FESTIVALTTSWrapper ( rconf = self . rconf , logger = self . logger ) elif requested_tts_engine == self . MACOS : self . log ( u"TTS engine: macOS" ) self . tts_engine = MacOSTTSWrapper ( rconf = self . rconf , logger = self . logger ) else : self . log ( u"TTS engine: eSpeak" ) self . tts_engine = ESPEAKTTSWrapper ( rconf = self . rconf , logger = self . logger ) self . log ( u"Selecting TTS engine... done" )
Select the TTS engine to be used by looking at the rconf object .
38,125
def check_shell_encoding ( cls ) : is_in_utf8 = True is_out_utf8 = True if sys . stdin . encoding not in [ "UTF-8" , "UTF8" ] : is_in_utf8 = False if sys . stdout . encoding not in [ "UTF-8" , "UTF8" ] : is_out_utf8 = False if ( is_in_utf8 ) and ( is_out_utf8 ) : gf . print_success ( u"shell encoding OK" ) else : gf . print_warning ( u"shell encoding WARNING" ) if not is_in_utf8 : gf . print_warning ( u" The default input encoding of your shell is not UTF-8" ) if not is_out_utf8 : gf . print_warning ( u" The default output encoding of your shell is not UTF-8" ) gf . print_info ( u" If you plan to use aeneas on the command line," ) if gf . is_posix ( ) : gf . print_info ( u" you might want to 'export PYTHONIOENCODING=UTF-8' in your shell" ) else : gf . print_info ( u" you might want to 'set PYTHONIOENCODING=UTF-8' in your shell" ) return True return False
Check whether sys . stdin and sys . stdout are UTF - 8 encoded .
38,126
def check_ffprobe ( cls ) : try : from aeneas . ffprobewrapper import FFPROBEWrapper file_path = gf . absolute_path ( u"tools/res/audio.mp3" , __file__ ) prober = FFPROBEWrapper ( ) properties = prober . read_properties ( file_path ) gf . print_success ( u"ffprobe OK" ) return False except : pass gf . print_error ( u"ffprobe ERROR" ) gf . print_info ( u" Please make sure you have ffprobe installed correctly" ) gf . print_info ( u" (usually it is provided by the ffmpeg installer)" ) gf . print_info ( u" and that its path is in your PATH environment variable" ) return True
Check whether ffprobe can be called .
38,127
def check_ffmpeg ( cls ) : try : from aeneas . ffmpegwrapper import FFMPEGWrapper input_file_path = gf . absolute_path ( u"tools/res/audio.mp3" , __file__ ) handler , output_file_path = gf . tmp_file ( suffix = u".wav" ) converter = FFMPEGWrapper ( ) result = converter . convert ( input_file_path , output_file_path ) gf . delete_file ( handler , output_file_path ) if result : gf . print_success ( u"ffmpeg OK" ) return False except : pass gf . print_error ( u"ffmpeg ERROR" ) gf . print_info ( u" Please make sure you have ffmpeg installed correctly" ) gf . print_info ( u" and that its path is in your PATH environment variable" ) return True
Check whether ffmpeg can be called .
38,128
def check_espeak ( cls ) : try : from aeneas . textfile import TextFile from aeneas . textfile import TextFragment from aeneas . ttswrappers . espeakttswrapper import ESPEAKTTSWrapper text = u"From fairest creatures we desire increase," text_file = TextFile ( ) text_file . add_fragment ( TextFragment ( language = u"eng" , lines = [ text ] , filtered_lines = [ text ] ) ) handler , output_file_path = gf . tmp_file ( suffix = u".wav" ) ESPEAKTTSWrapper ( ) . synthesize_multiple ( text_file , output_file_path ) gf . delete_file ( handler , output_file_path ) gf . print_success ( u"espeak OK" ) return False except : pass gf . print_error ( u"espeak ERROR" ) gf . print_info ( u" Please make sure you have espeak installed correctly" ) gf . print_info ( u" and that its path is in your PATH environment variable" ) gf . print_info ( u" You might also want to check that the espeak-data directory" ) gf . print_info ( u" is set up correctly, for example, it has the correct permissions" ) return True
Check whether espeak can be called .
38,129
def check_cdtw ( cls ) : if gf . can_run_c_extension ( "cdtw" ) : gf . print_success ( u"aeneas.cdtw AVAILABLE" ) return False gf . print_warning ( u"aeneas.cdtw NOT AVAILABLE" ) gf . print_info ( u" You can still run aeneas but it will be significantly slower" ) gf . print_info ( u" Please refer to the installation documentation for details" ) return True
Check whether Python C extension cdtw can be imported .
38,130
def check_cmfcc ( cls ) : if gf . can_run_c_extension ( "cmfcc" ) : gf . print_success ( u"aeneas.cmfcc AVAILABLE" ) return False gf . print_warning ( u"aeneas.cmfcc NOT AVAILABLE" ) gf . print_info ( u" You can still run aeneas but it will be significantly slower" ) gf . print_info ( u" Please refer to the installation documentation for details" ) return True
Check whether Python C extension cmfcc can be imported .
38,131
def check_cew ( cls ) : if gf . can_run_c_extension ( "cew" ) : gf . print_success ( u"aeneas.cew AVAILABLE" ) return False gf . print_warning ( u"aeneas.cew NOT AVAILABLE" ) gf . print_info ( u" You can still run aeneas but it will be a bit slower" ) gf . print_info ( u" Please refer to the installation documentation for details" ) return True
Check whether Python C extension cew can be imported .
38,132
def check_all ( cls , tools = True , encoding = True , c_ext = True ) : if cls . check_ffprobe ( ) : return ( True , False , False ) if cls . check_ffmpeg ( ) : return ( True , False , False ) if cls . check_espeak ( ) : return ( True , False , False ) if ( tools ) and ( cls . check_tools ( ) ) : return ( True , False , False ) warnings = False c_ext_warnings = False if encoding : warnings = cls . check_shell_encoding ( ) if c_ext : c_ext_warnings = cls . check_cdtw ( ) or c_ext_warnings c_ext_warnings = cls . check_cmfcc ( ) or c_ext_warnings c_ext_warnings = cls . check_cew ( ) or c_ext_warnings return ( False , warnings , c_ext_warnings )
Perform all checks .
38,133
def config_string ( self ) : return ( gc . CONFIG_STRING_SEPARATOR_SYMBOL ) . join ( [ u"%s%s%s" % ( fn , gc . CONFIG_STRING_ASSIGNMENT_SYMBOL , self . data [ fn ] ) for fn in sorted ( self . data . keys ( ) ) if self . data [ fn ] is not None ] )
Build the storable string corresponding to this configuration object .
38,134
def geq_multiple ( self , other ) : if other == TimeValue ( "0.000" ) : return self return int ( math . ceil ( other / self ) ) * self
Return the next multiple of this time value greater than or equal to other . If other is zero return this time value .
38,135
def starts_at ( self , time_point ) : if not isinstance ( time_point , TimeValue ) : raise TypeError ( u"time_point is not an instance of TimeValue" ) return self . begin == time_point
Returns True if this interval starts at the given time point .
38,136
def ends_at ( self , time_point ) : if not isinstance ( time_point , TimeValue ) : raise TypeError ( u"time_point is not an instance of TimeValue" ) return self . end == time_point
Returns True if this interval ends at the given time point .
38,137
def percent_value ( self , percent ) : if not isinstance ( percent , Decimal ) : raise TypeError ( u"percent is not an instance of Decimal" ) percent = Decimal ( max ( min ( percent , 100 ) , 0 ) / 100 ) return self . begin + self . length * percent
Returns the time value at percent of this interval .
38,138
def offset ( self , offset , allow_negative = False , min_begin_value = None , max_end_value = None ) : if not isinstance ( offset , TimeValue ) : raise TypeError ( u"offset is not an instance of TimeValue" ) self . begin += offset self . end += offset if not allow_negative : self . begin = max ( self . begin , TimeValue ( "0.000" ) ) self . end = max ( self . end , TimeValue ( "0.000" ) ) if ( min_begin_value is not None ) and ( max_end_value is not None ) : self . begin = min ( max ( self . begin , min_begin_value ) , max_end_value ) self . end = min ( self . end , max_end_value ) return self
Move this interval by the given shift offset .
38,139
def intersection ( self , other ) : relative_position = self . relative_position_of ( other ) if relative_position in [ self . RELATIVE_POSITION_PP_C , self . RELATIVE_POSITION_PI_LC , self . RELATIVE_POSITION_PI_LG , self . RELATIVE_POSITION_PI_CG , self . RELATIVE_POSITION_IP_B , self . RELATIVE_POSITION_II_LB , ] : return TimeInterval ( begin = self . begin , end = self . begin ) if relative_position in [ self . RELATIVE_POSITION_IP_E , self . RELATIVE_POSITION_II_EG , ] : return TimeInterval ( begin = self . end , end = self . end ) if relative_position in [ self . RELATIVE_POSITION_II_BI , self . RELATIVE_POSITION_II_BE , self . RELATIVE_POSITION_II_II , self . RELATIVE_POSITION_II_IE , ] : return TimeInterval ( begin = other . begin , end = other . end ) if relative_position in [ self . RELATIVE_POSITION_IP_I , self . RELATIVE_POSITION_II_LI , self . RELATIVE_POSITION_II_LE , self . RELATIVE_POSITION_II_LG , self . RELATIVE_POSITION_II_BG , self . RELATIVE_POSITION_II_IG , ] : begin = max ( self . begin , other . begin ) end = min ( self . end , other . end ) return TimeInterval ( begin = begin , end = end ) return None
Return the intersection between this time interval and the given time interval or None if the two intervals do not overlap .
38,140
def is_non_zero_before_non_zero ( self , other ) : return self . is_adjacent_before ( other ) and ( not self . has_zero_length ) and ( not other . has_zero_length )
Return True if this time interval ends when the given other time interval begins and both have non zero length .
38,141
def is_adjacent_before ( self , other ) : if not isinstance ( other , TimeInterval ) : raise TypeError ( u"other is not an instance of TimeInterval" ) return ( self . end == other . begin )
Return True if this time interval ends when the given other time interval begins .
38,142
def check_format ( self , sm_format ) : if sm_format not in SyncMapFormat . ALLOWED_VALUES : self . print_error ( u"Sync map format '%s' is not allowed" % ( sm_format ) ) self . print_info ( u"Allowed formats:" ) self . print_generic ( u" " . join ( SyncMapFormat . ALLOWED_VALUES ) ) return False return True
Return True if the given sync map format is allowed and False otherwise .
38,143
def _add_fragment ( cls , syncmap , identifier , lines , begin , end , language = None ) : syncmap . add_fragment ( SyncMapFragment ( text_fragment = TextFragment ( identifier = identifier , lines = lines , language = language ) , begin = begin , end = end ) )
Add a new fragment to syncmap .
38,144
def parse ( self , input_text , syncmap ) : self . log_exc ( u"%s is abstract and cannot be called directly" % ( self . TAG ) , None , True , NotImplementedError )
Parse the given input_text and append the extracted fragments to syncmap .
38,145
def format ( self , syncmap ) : self . log_exc ( u"%s is abstract and cannot be called directly" % ( self . TAG ) , None , True , NotImplementedError )
Format the given syncmap as a Unicode string .
38,146
def print_examples ( self , full = False ) : msg = [ ] i = 1 for key in sorted ( self . DEMOS . keys ( ) ) : example = self . DEMOS [ key ] if full or example [ "show" ] : msg . append ( u"Example %d (%s)" % ( i , example [ u"description" ] ) ) msg . append ( u" $ %s %s" % ( self . invoke , key ) ) msg . append ( u"" ) i += 1 self . print_generic ( u"\n" + u"\n" . join ( msg ) + u"\n" ) return self . HELP_EXIT_CODE
Print the examples and exit .
38,147
def print_values ( self , parameter ) : if parameter in self . VALUES : self . print_info ( u"Available values for parameter '%s':" % parameter ) self . print_generic ( u"\n" . join ( self . VALUES [ parameter ] ) ) return self . HELP_EXIT_CODE if parameter not in [ u"?" , u"" ] : self . print_error ( u"Invalid parameter name '%s'" % parameter ) self . print_info ( u"Parameters for which values can be listed:" ) self . print_generic ( u"\n" . join ( sorted ( self . VALUES . keys ( ) ) ) ) return self . HELP_EXIT_CODE
Print the list of values for the given parameter and exit .
38,148
def prepare_cew_for_windows ( ) : try : espeak_dll_win_path = "C:\\Windows\\System32\\espeak.dll" espeak_dll_dst_path = "aeneas\\cew\\espeak.dll" espeak_dll_src_paths = [ "C:\\aeneas\\eSpeak\\espeak_sapi.dll" , "C:\\sync\\eSpeak\\espeak_sapi.dll" , "C:\\Program Files\\eSpeak\\espeak_sapi.dll" , "C:\\Program Files (x86)\\eSpeak\\espeak_sapi.dll" , ] if os . path . exists ( espeak_dll_dst_path ) : print ( "[INFO] Found eSpeak DLL in %s" % espeak_dll_dst_path ) else : found = False copied = False for src_path in espeak_dll_src_paths : if os . path . exists ( src_path ) : found = True print ( "[INFO] Copying eSpeak DLL from %s into %s" % ( src_path , espeak_dll_dst_path ) ) try : shutil . copyfile ( src_path , espeak_dll_dst_path ) copied = True print ( "[INFO] Copied eSpeak DLL" ) except : pass break if not found : print ( "[WARN] Unable to find the eSpeak DLL, probably because you installed eSpeak in a non-standard location." ) print ( "[WARN] If you want to run aeneas with the C extension cew," ) print ( "[WARN] please copy espeak_sapi.dll from your eSpeak directory to %s" % espeak_dll_win_path ) elif not copied : print ( "[WARN] Unable to copy the eSpeak DLL, probably because you are not running with admin privileges." ) print ( "[WARN] If you want to run aeneas with the C extension cew," ) print ( "[WARN] please copy espeak_sapi.dll from your eSpeak directory to %s" % espeak_dll_win_path ) espeak_lib_src_path = os . path . join ( os . path . dirname ( __file__ ) , "thirdparty" , "espeak.lib" ) espeak_lib_dst_path = os . path . join ( os . path . dirname ( __file__ ) , "espeak.lib" ) if os . path . exists ( espeak_lib_dst_path ) : print ( "[INFO] Found eSpeak LIB in %s" % espeak_lib_dst_path ) else : try : print ( "[INFO] Copying eSpeak LIB into %s" % espeak_lib_dst_path ) shutil . copyfile ( espeak_lib_src_path , espeak_lib_dst_path ) print ( "[INFO] Copied eSpeak LIB" ) except : print ( "[WARN] Unable to copy the eSpeak LIB, probably because you are not running with admin privileges." ) print ( "[WARN] If you want to compile the C extension cew," ) print ( "[WARN] please copy espeak.lib from the thirdparty directory into %s" % espeak_lib_dst_path ) print ( "[WARN] and run the aeneas setup again." ) return False return True except Exception as e : print ( "[WARN] Unexpected exception while preparing cew: %s" % e ) return False
Copy files needed to compile the cew Python C extension on Windows .
38,149
def check_file_encoding ( self , input_file_path ) : self . log ( [ u"Checking encoding of file '%s'" , input_file_path ] ) self . result = ValidatorResult ( ) if self . _are_safety_checks_disabled ( u"check_file_encoding" ) : return self . result if not gf . file_can_be_read ( input_file_path ) : self . _failed ( u"File '%s' cannot be read." % ( input_file_path ) ) return self . result with io . open ( input_file_path , "rb" ) as file_object : bstring = file_object . read ( ) self . _check_utf8_encoding ( bstring ) return self . result
Check whether the given file is UTF - 8 encoded .
38,150
def check_config_xml ( self , contents ) : self . log ( u"Checking contents XML config file" ) self . result = ValidatorResult ( ) if self . _are_safety_checks_disabled ( u"check_config_xml" ) : return self . result contents = gf . safe_bytes ( contents ) self . log ( u"Checking that contents is well formed" ) self . check_raw_string ( contents , is_bstring = True ) if not self . result . passed : return self . result self . log ( u"Checking required parameters for job" ) job_parameters = gf . config_xml_to_dict ( contents , self . result , parse_job = True ) self . _check_required_parameters ( self . XML_JOB_REQUIRED_PARAMETERS , job_parameters ) if not self . result . passed : return self . result self . log ( u"Checking required parameters for task" ) tasks_parameters = gf . config_xml_to_dict ( contents , self . result , parse_job = False ) for parameters in tasks_parameters : self . log ( [ u"Checking required parameters for task: '%s'" , parameters ] ) self . _check_required_parameters ( self . XML_TASK_REQUIRED_PARAMETERS , parameters ) if not self . result . passed : return self . result return self . result
Check whether the given XML config file contents is well - formed and it has all the required parameters .
38,151
def check_container ( self , container_path , container_format = None , config_string = None ) : self . log ( [ u"Checking container '%s'" , container_path ] ) self . result = ValidatorResult ( ) if self . _are_safety_checks_disabled ( u"check_container" ) : return self . result if not ( gf . file_exists ( container_path ) or gf . directory_exists ( container_path ) ) : self . _failed ( u"Container '%s' not found." % container_path ) return self . result container = Container ( container_path , container_format ) try : self . log ( u"Checking container has config file" ) if config_string is not None : self . log ( u"Container with config string from wizard" ) self . check_config_txt ( config_string , is_config_string = True ) elif container . has_config_xml : self . log ( u"Container has XML config file" ) contents = container . read_entry ( container . entry_config_xml ) if contents is None : self . _failed ( u"Unable to read the contents of XML config file." ) return self . result self . check_config_xml ( contents ) elif container . has_config_txt : self . log ( u"Container has TXT config file" ) contents = container . read_entry ( container . entry_config_txt ) if contents is None : self . _failed ( u"Unable to read the contents of TXT config file." ) return self . result self . check_config_txt ( contents , is_config_string = False ) else : self . _failed ( u"Container does not have a TXT or XML configuration file." ) self . log ( u"Checking we have a valid job in the container" ) if not self . result . passed : return self . result self . log ( u"Analyze the contents of the container" ) analyzer = AnalyzeContainer ( container ) if config_string is not None : job = analyzer . analyze ( config_string = config_string ) else : job = analyzer . analyze ( ) self . _check_analyzed_job ( job , container ) except OSError : self . _failed ( u"Unable to read the contents of the container." ) return self . result
Check whether the given container is well - formed .
38,152
def _are_safety_checks_disabled ( self , caller = u"unknown_function" ) : if self . rconf . safety_checks : return False self . log_warn ( [ u"Safety checks disabled => %s passed" , caller ] ) return True
Return True if safety checks are disabled .
38,153
def _failed ( self , msg ) : self . log ( msg ) self . result . passed = False self . result . add_error ( msg ) self . log ( u"Failed" )
Log a validation failure .
38,154
def _check_utf8_encoding ( self , bstring ) : if not gf . is_bytes ( bstring ) : self . _failed ( u"The given string is not a sequence of bytes" ) return if not gf . is_utf8_encoded ( bstring ) : self . _failed ( u"The given string is not encoded in UTF-8." )
Check whether the given sequence of bytes is properly encoded in UTF - 8 .
38,155
def _check_reserved_characters ( self , ustring ) : forbidden = [ c for c in gc . CONFIG_RESERVED_CHARACTERS if c in ustring ] if len ( forbidden ) > 0 : self . _failed ( u"The given string contains the reserved characters '%s'." % u" " . join ( forbidden ) )
Check whether the given Unicode string contains reserved characters .
38,156
def _check_allowed_values ( self , parameters ) : for key , allowed_values in self . ALLOWED_VALUES : self . log ( [ u"Checking allowed values for parameter '%s'" , key ] ) if key in parameters : value = parameters [ key ] if value not in allowed_values : self . _failed ( u"Parameter '%s' has value '%s' which is not allowed." % ( key , value ) ) return self . log ( u"Passed" )
Check whether the given parameter value is allowed . Log messages into self . result .
38,157
def _check_implied_parameters ( self , parameters ) : for key , values , implied_keys in self . IMPLIED_PARAMETERS : self . log ( [ u"Checking implied parameters by '%s'='%s'" , key , values ] ) if ( key in parameters ) and ( parameters [ key ] in values ) : found = False for implied_key in implied_keys : if implied_key in parameters : found = True if not found : if len ( implied_keys ) == 1 : msg = u"Parameter '%s' is required when '%s'='%s'." % ( implied_keys [ 0 ] , key , parameters [ key ] ) else : msg = u"At least one of [%s] is required when '%s'='%s'." % ( "," . join ( implied_keys ) , key , parameters [ key ] ) self . _failed ( msg ) return self . log ( u"Passed" )
Check whether at least one of the keys in implied_keys is in parameters when a given key = value is present in parameters for some value in values . Log messages into self . result .
38,158
def _check_required_parameters ( self , required_parameters , parameters ) : self . log ( [ u"Checking required parameters '%s'" , required_parameters ] ) self . log ( u"Checking input parameters are not empty" ) if ( parameters is None ) or ( len ( parameters ) == 0 ) : self . _failed ( u"No parameters supplied." ) return self . log ( u"Checking no required parameter is missing" ) for req_param in required_parameters : if req_param not in parameters : self . _failed ( u"Required parameter '%s' not set." % req_param ) return self . log ( u"Checking all parameter values are allowed" ) self . _check_allowed_values ( parameters ) self . log ( u"Checking all implied parameters are present" ) self . _check_implied_parameters ( parameters ) return self . result
Check whether the given parameter dictionary contains all the required paramenters . Log messages into self . result .
38,159
def _check_analyzed_job ( self , job , container ) : self . log ( u"Checking the Job object generated from container" ) self . log ( u"Checking that the Job is not None" ) if job is None : self . _failed ( u"Unable to create a Job from the container." ) return self . log ( u"Checking that the Job has at least one Task" ) if len ( job ) == 0 : self . _failed ( u"Unable to create at least one Task from the container." ) return if self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] > 0 : self . log ( u"Checking that the Job does not have too many Tasks" ) if len ( job ) > self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] : self . _failed ( u"The Job has %d Tasks, more than the maximum allowed (%d)." % ( len ( job ) , self . rconf [ RuntimeConfiguration . JOB_MAX_TASKS ] ) ) return self . log ( u"Checking that each Task text file is well formed" ) for task in job . tasks : self . log ( [ u"Checking Task text file '%s'" , task . text_file_path ] ) text_file_bstring = container . read_entry ( task . text_file_path ) if ( text_file_bstring is None ) or ( len ( text_file_bstring ) == 0 ) : self . _failed ( u"Text file '%s' is empty" % task . text_file_path ) return self . _check_utf8_encoding ( text_file_bstring ) if not self . result . passed : self . _failed ( u"Text file '%s' is not encoded in UTF-8" % task . text_file_path ) return self . _check_not_empty ( text_file_bstring ) if not self . result . passed : self . _failed ( u"Text file '%s' is empty" % task . text_file_path ) return self . log ( [ u"Checking Task text file '%s': passed" , task . text_file_path ] ) self . log ( u"Checking each Task text file is well formed: passed" )
Check that the job object generated from the given container is well formed that it has at least one task and that the text file of each task has the correct encoding . Log messages into self . result .
38,160
def pretty_print ( self , warnings = False ) : msg = [ ] if ( warnings ) and ( len ( self . warnings ) > 0 ) : msg . append ( u"Warnings:" ) for warning in self . warnings : msg . append ( u" %s" % warning ) if len ( self . errors ) > 0 : msg . append ( u"Errors:" ) for error in self . errors : msg . append ( u" %s" % error ) return u"\n" . join ( msg )
Pretty print warnings and errors .
38,161
def convert ( self , input_file_path , output_file_path , head_length = None , process_length = None ) : if not gf . file_can_be_read ( input_file_path ) : self . log_exc ( u"Input file '%s' cannot be read" % ( input_file_path ) , None , True , OSError ) if not gf . file_can_be_written ( output_file_path ) : self . log_exc ( u"Output file '%s' cannot be written" % ( output_file_path ) , None , True , OSError ) arguments = [ self . rconf [ RuntimeConfiguration . FFMPEG_PATH ] ] arguments . extend ( [ "-i" , input_file_path ] ) if head_length is not None : arguments . extend ( [ "-ss" , head_length ] ) if process_length is not None : arguments . extend ( [ "-t" , process_length ] ) if self . rconf . sample_rate in self . FFMPEG_PARAMETERS_MAP : arguments . extend ( self . FFMPEG_PARAMETERS_MAP [ self . rconf . sample_rate ] ) else : arguments . extend ( self . FFMPEG_PARAMETERS_DEFAULT ) arguments . append ( output_file_path ) self . log ( [ u"Calling with arguments '%s'" , arguments ] ) try : proc = subprocess . Popen ( arguments , stdout = subprocess . PIPE , stdin = subprocess . PIPE , stderr = subprocess . PIPE ) proc . communicate ( ) proc . stdout . close ( ) proc . stdin . close ( ) proc . stderr . close ( ) except OSError as exc : self . log_exc ( u"Unable to call the '%s' ffmpeg executable" % ( self . rconf [ RuntimeConfiguration . FFMPEG_PATH ] ) , exc , True , FFMPEGPathError ) self . log ( u"Call completed" ) if not gf . file_exists ( output_file_path ) : self . log_exc ( u"Output file '%s' was not written" % ( output_file_path ) , None , True , OSError ) self . log ( [ u"Returning output file path '%s'" , output_file_path ] ) return output_file_path
Convert the audio file at input_file_path into output_file_path using the parameters set in the constructor or through the parameters property .
38,162
def rate_lack ( self , max_rate ) : if self . fragment_type == self . REGULAR : return self . chars / max_rate - self . length return TimeValue ( "0.000" )
The time interval that this fragment lacks to respect the given max rate .
38,163
def rate_slack ( self , max_rate ) : if self . fragment_type == self . REGULAR : return - self . rate_lack ( max_rate ) elif self . fragment_type == self . NONSPEECH : return self . length else : return TimeValue ( "0.000" )
The maximum time interval that can be stolen to this fragment while keeping it respecting the given max rate .
38,164
def write_to_file ( self , output_file_path , intervals , template ) : msg = [ template % ( interval ) for interval in intervals ] if output_file_path is None : self . print_info ( u"Intervals detected:" ) for line in msg : self . print_generic ( line ) else : with io . open ( output_file_path , "w" , encoding = "utf-8" ) as output_file : output_file . write ( u"\n" . join ( msg ) ) self . print_success ( u"Created file '%s'" % output_file_path )
Write intervals to file .
38,165
def print_parameters ( self ) : self . print_info ( u"Available parameters:" ) self . print_generic ( u"\n" + u"\n" . join ( self . PARAMETERS ) + u"\n" ) return self . HELP_EXIT_CODE
Print the list of parameters and exit .
38,166
def main ( ) : if len ( sys . argv ) < 6 : print ( "You must pass five arguments: QUIT_AFTER BACKWARDS TEXT_FILE_PATH AUDIO_FILE_PATH DATA_FILE_PATH" ) return 1 c_quit_after = float ( sys . argv [ 1 ] ) c_backwards = int ( sys . argv [ 2 ] ) text_file_path = sys . argv [ 3 ] audio_file_path = sys . argv [ 4 ] data_file_path = sys . argv [ 5 ] s_text = [ ] with io . open ( text_file_path , "r" , encoding = "utf-8" ) as text : for line in text . readlines ( ) : line = line . replace ( u"\n" , u"" ) . replace ( u"\r" , u"" ) idx = line . find ( " " ) if idx > 0 : f_voice_code = line [ : idx ] f_text = line [ ( idx + 1 ) : ] s_text . append ( ( f_voice_code , f_text ) ) c_text = [ ] if gf . PY2 : for f_voice_code , f_text in s_text : c_text . append ( ( gf . safe_bytes ( f_voice_code ) , gf . safe_bytes ( f_text ) ) ) else : for f_voice_code , f_text in s_text : c_text . append ( ( gf . safe_unicode ( f_voice_code ) , gf . safe_unicode ( f_text ) ) ) try : import aeneas . cew . cew sr , sf , intervals = aeneas . cew . cew . synthesize_multiple ( audio_file_path , c_quit_after , c_backwards , c_text ) with io . open ( data_file_path , "w" , encoding = "utf-8" ) as data : data . write ( u"%d\n" % ( sr ) ) data . write ( u"%d\n" % ( sf ) ) data . write ( u"\n" . join ( [ u"%.3f %.3f" % ( i [ 0 ] , i [ 1 ] ) for i in intervals ] ) ) except Exception as exc : print ( u"Unexpected error: %s" % str ( exc ) )
Run aeneas . cew reading input text from file and writing audio and interval data to file .
38,167
def parse ( self , input_text , syncmap ) : from lxml import etree smil_ns = "{http://www.w3.org/ns/SMIL}" root = etree . fromstring ( gf . safe_bytes ( input_text ) ) for par in root . iter ( smil_ns + "par" ) : for child in par : if child . tag == ( smil_ns + "text" ) : identifier = gf . safe_unicode ( gf . split_url ( child . get ( "src" ) ) [ 1 ] ) elif child . tag == ( smil_ns + "audio" ) : begin_text = child . get ( "clipBegin" ) if ":" in begin_text : begin = gf . time_from_hhmmssmmm ( begin_text ) else : begin = gf . time_from_ssmmm ( begin_text ) end_text = child . get ( "clipEnd" ) if ":" in end_text : end = gf . time_from_hhmmssmmm ( end_text ) else : end = gf . time_from_ssmmm ( end_text ) self . _add_fragment ( syncmap = syncmap , identifier = identifier , lines = [ u"" ] , begin = begin , end = end )
Read from SMIL file .
38,168
def _is_valid_index ( self , index ) : if isinstance ( index , int ) : return ( index >= 0 ) and ( index < len ( self ) ) if isinstance ( index , list ) : valid = True for i in index : valid = valid or self . _is_valid_index ( i ) return valid return False
Return True if and only if the given index is valid .
38,169
def _check_boundaries ( self , fragment ) : if not isinstance ( fragment , SyncMapFragment ) : raise TypeError ( u"fragment is not an instance of SyncMapFragment" ) interval = fragment . interval if not isinstance ( interval , TimeInterval ) : raise TypeError ( u"interval is not an instance of TimeInterval" ) if ( self . begin is not None ) and ( interval . begin < self . begin ) : raise ValueError ( u"interval.begin is before self.begin" ) if ( self . end is not None ) and ( interval . end > self . end ) : raise ValueError ( u"interval.end is after self.end" )
Check that the interval of the given fragment is within the boundaries of the list . Raises an error if not OK .
38,170
def remove ( self , indices ) : if not self . _is_valid_index ( indices ) : self . log_exc ( u"The given list of indices is not valid" , None , True , ValueError ) new_fragments = [ ] sorted_indices = sorted ( indices ) i = 0 j = 0 while ( i < len ( self ) ) and ( j < len ( sorted_indices ) ) : if i != sorted_indices [ j ] : new_fragments . append ( self [ i ] ) else : j += 1 i += 1 while i < len ( self ) : new_fragments . append ( self [ i ] ) i += 1 self . __fragments = new_fragments
Remove the fragments corresponding to the given list of indices .
38,171
def sort ( self ) : if self . is_guaranteed_sorted : self . log ( u"Already sorted, returning" ) return self . log ( u"Sorting..." ) self . __fragments = sorted ( self . __fragments ) self . log ( u"Sorting... done" ) self . log ( u"Checking relative positions..." ) for i in range ( len ( self ) - 1 ) : current_interval = self [ i ] . interval next_interval = self [ i + 1 ] . interval if current_interval . relative_position_of ( next_interval ) not in self . ALLOWED_POSITIONS : self . log ( u"Found overlapping fragments:" ) self . log ( [ u" Index %d => %s" , i , current_interval ] ) self . log ( [ u" Index %d => %s" , i + 1 , next_interval ] ) self . log_exc ( u"The list contains two fragments overlapping in a forbidden way" , None , True , ValueError ) self . log ( u"Checking relative positions... done" ) self . __sorted = True
Sort the fragments in the list .
38,172
def remove_nonspeech_fragments ( self , zero_length_only = False ) : self . log ( u"Removing nonspeech fragments..." ) nonspeech = list ( self . nonspeech_fragments ) if zero_length_only : nonspeech = [ ( i , f ) for i , f in nonspeech if f . has_zero_length ] nonspeech_indices = [ i for i , f in nonspeech ] self . remove ( nonspeech_indices ) if zero_length_only : for i , f in list ( self . nonspeech_fragments ) : f . fragment_type = SyncMapFragment . REGULAR self . log ( u"Removing nonspeech fragments... done" )
Remove NONSPEECH fragments from the list .
38,173
def has_zero_length_fragments ( self , min_index = None , max_index = None ) : min_index , max_index = self . _check_min_max_indices ( min_index , max_index ) zero = [ i for i in range ( min_index , max_index ) if self [ i ] . has_zero_length ] self . log ( [ u"Fragments with zero length: %s" , zero ] ) return ( len ( zero ) > 0 )
Return True if the list has at least one interval with zero length withing min_index and max_index . If the latter are not specified check all intervals .
38,174
def has_adjacent_fragments_only ( self , min_index = None , max_index = None ) : min_index , max_index = self . _check_min_max_indices ( min_index , max_index ) for i in range ( min_index , max_index - 1 ) : current_interval = self [ i ] . interval next_interval = self [ i + 1 ] . interval if not current_interval . is_adjacent_before ( next_interval ) : self . log ( u"Found non adjacent fragments" ) self . log ( [ u" Index %d => %s" , i , current_interval ] ) self . log ( [ u" Index %d => %s" , i + 1 , next_interval ] ) return False return True
Return True if the list contains only adjacent fragments that is if it does not have gaps .
38,175
def offset ( self , offset ) : self . log ( u"Applying offset to all fragments..." ) self . log ( [ u" Offset %.3f" , offset ] ) for fragment in self . fragments : fragment . interval . offset ( offset = offset , allow_negative = False , min_begin_value = self . begin , max_end_value = self . end ) self . log ( u"Applying offset to all fragments... done" )
Move all the intervals in the list by the given offset .
38,176
def move_transition_point ( self , fragment_index , value ) : self . log ( u"Called move_transition_point with" ) self . log ( [ u" fragment_index %d" , fragment_index ] ) self . log ( [ u" value %.3f" , value ] ) if ( fragment_index < 0 ) or ( fragment_index > ( len ( self ) - 3 ) ) : self . log ( u"Bad fragment_index, returning" ) return current_interval = self [ fragment_index ] . interval next_interval = self [ fragment_index + 1 ] . interval if value > next_interval . end : self . log ( u"Bad value, returning" ) return if not current_interval . is_non_zero_before_non_zero ( next_interval ) : self . log ( u"Bad interval configuration, returning" ) return current_interval . end = value next_interval . begin = value self . log ( u"Moved transition point" )
Change the transition point between fragment fragment_index and the next fragment to the time value value .
38,177
def inject_long_nonspeech_fragments ( self , pairs , replacement_string ) : self . log ( u"Called inject_long_nonspeech_fragments" ) if replacement_string in [ None , gc . PPV_TASK_ADJUST_BOUNDARY_NONSPEECH_REMOVE ] : self . log ( u" Remove long nonspeech" ) lines = [ ] else : self . log ( [ u" Replace long nonspeech with '%s'" , replacement_string ] ) lines = [ replacement_string ] self . log ( u" First pass: making room..." ) for nsi , index in pairs : self [ index ] . interval . end = nsi . begin self [ index + 1 ] . interval . begin = nsi . end self . log ( u" First pass: making room... done" ) self . log ( u" Second pass: append nonspeech intervals..." ) for i , ( nsi , index ) in enumerate ( pairs , 1 ) : identifier = u"n%06d" % i self . add ( SyncMapFragment ( text_fragment = TextFragment ( identifier = identifier , language = None , lines = lines , filtered_lines = lines ) , interval = nsi , fragment_type = SyncMapFragment . NONSPEECH ) , sort = False ) self . log ( u" Second pass: append nonspeech intervals... done" ) self . log ( u" Third pass: sorting..." ) self . sort ( ) self . log ( u" Third pass: sorting... done" )
Inject nonspeech fragments corresponding to the given intervals in this fragment list .
38,178
def compute_accumulated_cost_matrix ( self ) : self . _setup_dtw ( ) if self . dtw is None : self . log ( u"Inner self.dtw is None => returning None" ) return None self . log ( u"Returning accumulated cost matrix" ) return self . dtw . compute_accumulated_cost_matrix ( )
Compute the accumulated cost matrix and return it .
38,179
def compute_path ( self ) : self . _setup_dtw ( ) if self . dtw is None : self . log ( u"Inner self.dtw is None => returning None" ) return None self . log ( u"Computing path..." ) wave_path = self . dtw . compute_path ( ) self . log ( u"Computing path... done" ) self . log ( u"Translating path to full wave indices..." ) real_indices = numpy . array ( [ t [ 0 ] for t in wave_path ] ) synt_indices = numpy . array ( [ t [ 1 ] for t in wave_path ] ) if self . rconf . mmn : self . log ( u"Translating real indices with masked_middle_map..." ) real_indices = self . real_wave_mfcc . masked_middle_map [ real_indices ] real_indices [ 0 ] = self . real_wave_mfcc . head_length self . log ( u"Translating real indices with masked_middle_map... done" ) self . log ( u"Translating synt indices with masked_middle_map..." ) synt_indices = self . synt_wave_mfcc . masked_middle_map [ synt_indices ] self . log ( u"Translating synt indices with masked_middle_map... done" ) else : self . log ( u"Translating real indices by adding head_length..." ) real_indices += self . real_wave_mfcc . head_length self . log ( u"Translating real indices by adding head_length... done" ) self . log ( u"Nothing to do with synt indices" ) self . log ( u"Translating path to full wave indices... done" ) return ( real_indices , synt_indices )
Compute the min cost path between the two waves and return it .
38,180
def compute_boundaries ( self , synt_anchors ) : self . _setup_dtw ( ) if self . dtw is None : self . log ( u"Inner self.dtw is None => returning artificial boundary indices" ) begin = self . real_wave_mfcc . middle_begin end = self . real_wave_mfcc . tail_begin n = len ( synt_anchors ) step = float ( end - begin ) / n boundary_indices = [ begin + int ( i * step ) for i in range ( n ) ] + [ end ] return numpy . array ( boundary_indices ) self . log ( u"Computing path..." ) real_indices , synt_indices = self . compute_path ( ) self . log ( u"Computing path... done" ) self . log ( u"Computing boundary indices..." ) self . log ( [ u"Fragments: %d" , len ( synt_anchors ) ] ) self . log ( [ u"Path length: %d" , len ( real_indices ) ] ) mws = self . rconf . mws sample_rate = self . rconf . sample_rate samples_per_mws = mws * sample_rate if samples_per_mws . is_integer : anchor_indices = numpy . array ( [ int ( a [ 0 ] / mws ) for a in synt_anchors ] ) else : self . log_warn ( u"The number of samples in each window shift is not an integer, time drift might occur." ) anchor_indices = numpy . array ( [ ( int ( a [ 0 ] * sample_rate / mws ) / sample_rate ) for a in synt_anchors ] ) begin_indices = numpy . clip ( numpy . searchsorted ( synt_indices , anchor_indices , side = "right" ) , 0 , len ( synt_indices ) - 1 ) begin_indices [ 0 ] = 0 boundary_indices = numpy . append ( real_indices [ begin_indices ] , self . real_wave_mfcc . tail_begin ) self . log ( [ u"Boundary indices: %d" , len ( boundary_indices ) ] ) self . log ( u"Computing boundary indices... done" ) return boundary_indices
Compute the min cost path between the two waves and return a list of boundary points representing the argmin values with respect to the provided synt_anchors timings .
38,181
def _setup_dtw ( self ) : if self . dtw is not None : return if ( self . real_wave_mfcc is None ) or ( self . real_wave_mfcc . middle_mfcc is None ) : self . log_exc ( u"The real wave MFCCs are not initialized" , None , True , DTWAlignerNotInitialized ) if ( self . synt_wave_mfcc is None ) or ( self . synt_wave_mfcc . middle_mfcc is None ) : self . log_exc ( u"The synt wave MFCCs are not initialized" , None , True , DTWAlignerNotInitialized ) algorithm = self . rconf [ RuntimeConfiguration . DTW_ALGORITHM ] delta = int ( 2 * self . rconf . dtw_margin / self . rconf [ RuntimeConfiguration . MFCC_WINDOW_SHIFT ] ) mfcc2_length = self . synt_wave_mfcc . middle_length self . log ( [ u"Requested algorithm: '%s'" , algorithm ] ) self . log ( [ u"delta = %d" , delta ] ) self . log ( [ u"m = %d" , mfcc2_length ] ) if mfcc2_length <= delta : self . log ( u"We have mfcc2_length <= delta" ) if ( self . rconf [ RuntimeConfiguration . C_EXTENSIONS ] ) and ( gf . can_run_c_extension ( ) ) : self . log ( u"C extensions enabled and loaded: not selecting EXACT algorithm" ) else : self . log ( u"Selecting EXACT algorithm" ) algorithm = DTWAlgorithm . EXACT if self . rconf . mmn : self . log ( u"Using masked MFCC" ) real_mfcc = self . real_wave_mfcc . masked_middle_mfcc synt_mfcc = self . synt_wave_mfcc . masked_middle_mfcc else : self . log ( u"Using unmasked MFCC" ) real_mfcc = self . real_wave_mfcc . middle_mfcc synt_mfcc = self . synt_wave_mfcc . middle_mfcc n = real_mfcc . shape [ 1 ] m = synt_mfcc . shape [ 1 ] self . log ( [ u" Number of MFCC frames in real wave: %d" , n ] ) self . log ( [ u" Number of MFCC frames in synt wave: %d" , m ] ) if ( n == 0 ) or ( m == 0 ) : self . log ( u"Setting self.dtw to None" ) self . dtw = None else : if algorithm == DTWAlgorithm . EXACT : self . log ( u"Computing with EXACT algo" ) self . dtw = DTWExact ( m1 = real_mfcc , m2 = synt_mfcc , rconf = self . rconf , logger = self . logger ) else : self . log ( u"Computing with STRIPE algo" ) self . dtw = DTWStripe ( m1 = real_mfcc , m2 = synt_mfcc , delta = delta , rconf = self . rconf , logger = self . logger )
Set the DTW object up .
38,182
def check_import ( ) : try : import aeneas print_success ( u"aeneas OK" ) return False except ImportError : print_error ( u"aeneas ERROR" ) print_info ( u" Unable to load the aeneas Python package" ) print_info ( u" This error is probably caused by:" ) print_info ( u" A. you did not download/git-clone the aeneas package properly; or" ) print_info ( u" B. you did not install the required Python packages:" ) print_info ( u" 1. BeautifulSoup4" ) print_info ( u" 2. lxml" ) print_info ( u" 3. numpy" ) except Exception as e : print_error ( e ) return True
Try to import the aeneas package and return True if that fails .
38,183
def main ( ) : if check_import ( ) : sys . exit ( 1 ) from aeneas . diagnostics import Diagnostics errors , warnings , c_ext_warnings = Diagnostics . check_all ( ) if errors : sys . exit ( 1 ) if c_ext_warnings : print_warning ( u"All required dependencies are met but at least one Python C extension is not available" ) print_warning ( u"You can still run aeneas but it will be slower" ) print_warning ( u"Enjoy running aeneas!" ) sys . exit ( 2 ) else : print_success ( u"All required dependencies are met and all available Python C extensions are working" ) print_success ( u"Enjoy running aeneas!" ) sys . exit ( 0 )
The entry point for this module
38,184
def is_pleasant ( self ) : levels = sorted ( [ n . level for n in self . leaves ] ) return levels [ 0 ] == levels [ - 1 ]
Return True if all the leaves in the subtree rooted at this node are at the same level .
38,185
def add_child ( self , node , as_last = True ) : if not isinstance ( node , Tree ) : self . log_exc ( u"node is not an instance of Tree" , None , True , TypeError ) if as_last : self . __children . append ( node ) else : self . __children = [ node ] + self . __children node . __parent = self new_height = 1 + self . level for n in node . subtree : n . __level += new_height
Add the given child to the current list of children .
38,186
def remove_child ( self , index ) : if index < 0 : index = index + len ( self ) self . __children = self . __children [ 0 : index ] + self . __children [ ( index + 1 ) : ]
Remove the child at the given index from the current list of children .
38,187
def remove ( self ) : if self . parent is not None : for i , child in enumerate ( self . parent . children ) : if id ( child ) == id ( self ) : self . parent . remove_child ( i ) self . parent = None break
Remove this node from the list of children of its current parent if the current parent is not None otherwise do nothing .
38,188
def remove_children ( self , reset_parent = True ) : if reset_parent : for child in self . children : child . parent = None self . __children = [ ]
Remove all the children of this node .
38,189
def leaves_not_empty ( self ) : return [ n for n in self . dfs if ( ( n . is_leaf ) and ( not n . is_empty ) ) ]
Return the list of leaves not empty in the tree rooted at this node in DFS order .
38,190
def height ( self ) : return max ( [ n . level for n in self . subtree ] ) - self . level + 1
Return the height of the tree rooted at this node that is the difference between the level of a deepest leaf and the level of this node . Return 1 for a single - node tree 2 for a two - levels tree etc .
38,191
def levels ( self ) : ret = [ [ ] for i in range ( self . height ) ] for node in self . subtree : ret [ node . level - self . level ] . append ( node ) return ret
Return a list of lists of nodes . The outer list is indexed by the level . Each inner list contains the nodes at that level in DFS order .
38,192
def level_at_index ( self , index ) : if not isinstance ( index , int ) : self . log_exc ( u"Index is not an integer" , None , True , TypeError ) levels = self . levels if ( index < 0 ) or ( index >= len ( levels ) ) : self . log_exc ( u"The given level index '%d' is not valid" % ( index ) , None , True , ValueError ) return self . levels [ index ]
Return the list of nodes at level index in DFS order .
38,193
def ancestor ( self , index ) : if not isinstance ( index , int ) : self . log_exc ( u"index is not an integer" , None , True , TypeError ) if index < 0 : self . log_exc ( u"index cannot be negative" , None , True , ValueError ) parent_node = self for i in range ( index ) : if parent_node is None : break parent_node = parent_node . parent return parent_node
Return the index - th ancestor .
38,194
def keep_levels ( self , level_indices ) : if not isinstance ( level_indices , list ) : self . log_exc ( u"level_indices is not an instance of list" , None , True , TypeError ) for l in level_indices : if not isinstance ( l , int ) : self . log_exc ( u"level_indices contains an element not int" , None , True , TypeError ) prev_levels = self . levels level_indices = set ( level_indices ) if 0 not in level_indices : level_indices . add ( 0 ) level_indices = level_indices & set ( range ( self . height ) ) level_indices = sorted ( level_indices ) [ : : - 1 ] for l in level_indices : for node in prev_levels [ l ] : node . remove_children ( reset_parent = False ) for i in range ( len ( level_indices ) - 1 ) : l = level_indices [ i ] for node in prev_levels [ l ] : parent_node = node . ancestor ( l - level_indices [ i + 1 ] ) parent_node . add_child ( node )
Rearrange the tree rooted at this node to keep only the given levels .
38,195
def s2dctmat ( nfilt , ncep , freqstep ) : melcos = numpy . empty ( ( ncep , nfilt ) , 'double' ) for i in range ( 0 , ncep ) : freq = numpy . pi * float ( i ) / nfilt melcos [ i ] = numpy . cos ( freq * numpy . arange ( 0.5 , float ( nfilt ) + 0.5 , 1.0 , 'double' ) ) melcos [ : , 0 ] = melcos [ : , 0 ] * 0.5 return melcos
Return the legacy not - quite - DCT matrix used by Sphinx
38,196
def logspec2s2mfc ( logspec , ncep = 13 ) : nframes , nfilt = logspec . shape melcos = s2dctmat ( nfilt , ncep , 1. / nfilt ) return numpy . dot ( logspec , melcos . T ) / nfilt
Convert log - power - spectrum bins to MFCC using the legacy Sphinx transform
38,197
def dct ( input , K = 13 ) : nframes , N = input . shape freqstep = numpy . pi / N cosmat = dctmat ( N , K , freqstep ) return numpy . dot ( input , cosmat ) * numpy . sqrt ( 2.0 / N )
Convert log - power - spectrum to MFCC using the orthogonal DCT - II
38,198
def dct2 ( input , K = 13 ) : nframes , N = input . shape freqstep = numpy . pi / N cosmat = dctmat ( N , K , freqstep , False ) return numpy . dot ( input , cosmat ) * ( 2.0 / N )
Convert log - power - spectrum to MFCC using the normalized DCT - II
38,199
def read_properties ( self ) : self . log ( u"Reading properties..." ) if not gf . file_can_be_read ( self . file_path ) : self . log_exc ( u"File '%s' cannot be read" % ( self . file_path ) , None , True , OSError ) self . log ( [ u"Getting file size for '%s'" , self . file_path ] ) self . file_size = gf . file_size ( self . file_path ) self . log ( [ u"File size for '%s' is '%d'" , self . file_path , self . file_size ] ) try : self . log ( u"Reading properties with FFPROBEWrapper..." ) properties = FFPROBEWrapper ( rconf = self . rconf , logger = self . logger ) . read_properties ( self . file_path ) self . log ( u"Reading properties with FFPROBEWrapper... done" ) except FFPROBEPathError : self . log_exc ( u"Unable to call ffprobe executable" , None , True , AudioFileProbeError ) except ( FFPROBEUnsupportedFormatError , FFPROBEParsingError ) : self . log_exc ( u"Audio file format not supported by ffprobe" , None , True , AudioFileUnsupportedFormatError ) self . audio_length = TimeValue ( properties [ FFPROBEWrapper . STDOUT_DURATION ] ) self . audio_format = properties [ FFPROBEWrapper . STDOUT_CODEC_NAME ] self . audio_sample_rate = gf . safe_int ( properties [ FFPROBEWrapper . STDOUT_SAMPLE_RATE ] ) self . audio_channels = gf . safe_int ( properties [ FFPROBEWrapper . STDOUT_CHANNELS ] ) self . log ( [ u"Stored audio_length: '%s'" , self . audio_length ] ) self . log ( [ u"Stored audio_format: '%s'" , self . audio_format ] ) self . log ( [ u"Stored audio_sample_rate: '%s'" , self . audio_sample_rate ] ) self . log ( [ u"Stored audio_channels: '%s'" , self . audio_channels ] ) self . log ( u"Reading properties... done" )
Populate this object by reading the audio properties of the file at the given path .