idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
32,600
def parse ( v , country_code = None ) : if isinstance ( v , int ) : res = by_code_num ( v ) return [ ] if not res else [ res ] if not isinstance ( v , ( str , unicode ) ) : raise ValueError ( '`v` of incorrect type {}. Only accepts str, bytes, unicode and int.' ) if re . match ( '^[A-Z]{3}$' , v ) : res = by_alpha3 ( v...
Try parse v to currencies ; filter by country_code
32,601
def open_json ( file_name ) : with open ( file_name , "r" ) as json_data : data = json . load ( json_data ) return data
returns json contents as string
32,602
def merge ( left , right ) : if isinstance ( left , dict ) and isinstance ( right , dict ) : for key , value in right . items ( ) : if key not in left : left [ key ] = value elif left [ key ] is None : left [ key ] = value else : left [ key ] = merge ( left [ key ] , value ) return left
deep merge dictionary on the left with the one on the right .
32,603
def shutdown ( self ) : for connection in self . connections . copy ( ) : connection . shutdown ( ) self . connections = None self . context . term ( ) self . context = None if self . trigger : try : self . reactor . removeSystemEventTrigger ( self . trigger ) except Exception : pass
Shutdown factory .
32,604
def publish ( self , message , tag = b'' ) : self . send ( tag + b'\0' + message )
Publish message with specified tag .
32,605
def _getNextId ( self ) : if not self . _uuids : for _ in range ( self . UUID_POOL_GEN_SIZE ) : self . _uuids . append ( uuid . uuid4 ( ) . bytes ) return self . _uuids . pop ( )
Returns an unique id .
32,606
def _releaseId ( self , msgId ) : self . _uuids . append ( msgId ) if len ( self . _uuids ) > 2 * self . UUID_POOL_GEN_SIZE : self . _uuids [ - self . UUID_POOL_GEN_SIZE : ] = [ ]
Release message ID to the pool .
32,607
def _cancel ( self , msgId ) : _ , canceller = self . _requests . pop ( msgId , ( None , None ) ) if canceller is not None and canceller . active ( ) : canceller . cancel ( )
Cancel outstanding REQ drop reply silently .
32,608
def _timeoutRequest ( self , msgId ) : d , _ = self . _requests . pop ( msgId , ( None , None ) ) if not d . called : d . errback ( ZmqRequestTimeoutError ( msgId ) )
Cancel timedout request .
32,609
def sendMsg ( self , * messageParts , ** kwargs ) : messageId = self . _getNextId ( ) d = defer . Deferred ( canceller = lambda _ : self . _cancel ( messageId ) ) timeout = kwargs . pop ( 'timeout' , None ) if timeout is None : timeout = self . defaultRequestTimeout assert len ( kwargs ) == 0 , "Unsupported keyword arg...
Send request and deliver response back when available .
32,610
def reply ( self , messageId , * messageParts ) : routingInfo = self . _routingInfo . pop ( messageId ) self . send ( routingInfo + [ messageId , b'' ] + list ( messageParts ) )
Send reply to request with specified messageId .
32,611
def load ( self , path ) : missing_files = self . _check_for_missing_files ( path ) if len ( missing_files ) > 0 : raise IOError ( 'Invalid data set of type {}: files {} not found at {}' . format ( self . type ( ) , ' ' . join ( missing_files ) , path ) ) return self . _load ( path )
Load and return the corpus from the given path .
32,612
def get_ids_from_folder ( path , part_name ) : valid_ids = set ( { } ) for xml_file in glob . glob ( os . path . join ( path , '*.xml' ) ) : idx = os . path . splitext ( os . path . basename ( xml_file ) ) [ 0 ] if idx not in BAD_FILES [ part_name ] : valid_ids . add ( idx ) return valid_ids
Return all ids from the given folder which have a corresponding beamformedSignal file .
32,613
def load_file ( folder_path , idx , corpus ) : xml_path = os . path . join ( folder_path , '{}.xml' . format ( idx ) ) wav_paths = glob . glob ( os . path . join ( folder_path , '{}_*.wav' . format ( idx ) ) ) if len ( wav_paths ) == 0 : return [ ] xml_file = open ( xml_path , 'r' , encoding = 'utf-8' ) soup = Beautifu...
Load speaker file utterance labels for the file with the given id .
32,614
def read_file ( path ) : gen = textfile . read_separated_lines_generator ( path , max_columns = 6 , ignore_lines_starting_with = [ ';;' ] ) utterances = collections . defaultdict ( list ) for record in gen : values = record [ 1 : len ( record ) ] for i in range ( len ( values ) ) : if i == 1 or i == 2 or i == 4 : value...
Reads a ctm file .
32,615
def split_identifiers ( identifiers = [ ] , proportions = { } ) : abs_proportions = absolute_proportions ( proportions , len ( identifiers ) ) parts = { } start_index = 0 for idx , proportion in abs_proportions . items ( ) : parts [ idx ] = identifiers [ start_index : start_index + proportion ] start_index += proportio...
Split the given identifiers by the given proportions .
32,616
def select_balanced_subset ( items , select_count , categories , select_count_values = None , seed = None ) : rand = random . Random ( ) rand . seed ( seed ) if select_count_values is None : select_count_values = { item_id : 1 for item_id in items . keys ( ) } if sum ( select_count_values . values ( ) ) < select_count ...
Select items so the summed category weights are balanced . Each item has a dictionary containing the category weights . Items are selected until select_count is reached . The value that is added to select_count for an item can be defined in the dictionary select_count_values . If this is not defined it is assumed to be...
32,617
def length_of_overlap ( first_start , first_end , second_start , second_end ) : if first_end <= second_start or first_start >= second_end : return 0.0 if first_start < second_start : if first_end < second_end : return abs ( first_end - second_start ) else : return abs ( second_end - second_start ) if first_start > seco...
Find the length of the overlapping part of two segments .
32,618
def find_missing_projections ( label_list , projections ) : unmapped_combinations = set ( ) if WILDCARD_COMBINATION in projections : return [ ] for labeled_segment in label_list . ranges ( ) : combination = tuple ( sorted ( [ label . value for label in labeled_segment [ 2 ] ] ) ) if combination not in projections : unm...
Finds all combinations of labels in label_list that are not covered by an entry in the dictionary of projections . Returns a list containing tuples of uncovered label combinations or en empty list if there are none . All uncovered label combinations are naturally sorted .
32,619
def load_projections ( projections_file ) : projections = { } for parts in textfile . read_separated_lines_generator ( projections_file , '|' ) : combination = tuple ( sorted ( [ label . strip ( ) for label in parts [ 0 ] . split ( ' ' ) ] ) ) new_label = parts [ 1 ] . strip ( ) projections [ combination ] = new_label ...
Loads projections defined in the given projections_file .
32,620
def random_subset_by_duration ( self , relative_duration , balance_labels = False , label_list_ids = None ) : total_duration = self . corpus . total_duration subset_duration = relative_duration * total_duration utterance_durations = { utt_idx : utt . duration for utt_idx , utt in self . corpus . utterances . items ( ) ...
Create a subview of random utterances with a approximate duration relative to the full corpus . Random utterances are selected so that the sum of all utterance durations equals to the relative duration of the full corpus .
32,621
def random_subsets ( self , relative_sizes , by_duration = False , balance_labels = False , label_list_ids = None ) : resulting_sets = { } next_bigger_subset = self . corpus for relative_size in reversed ( relative_sizes ) : generator = SubsetGenerator ( next_bigger_subset , random_seed = self . random_seed ) if by_dur...
Create a bunch of subsets with the given sizes relative to the size or duration of the full corpus . Basically the same as calling random_subset or random_subset_by_duration multiple times with different values . But this method makes sure that every subset contains only utterances that are also contained in the next b...
32,622
def remove_punctuation ( text , exceptions = [ ] ) : all_but = [ r'\w' , r'\s' ] all_but . extend ( exceptions ) pattern = '[^{}]' . format ( '' . join ( all_but ) ) return re . sub ( pattern , '' , text )
Return a string with punctuation removed .
32,623
def starts_with_prefix_in_list ( text , prefixes ) : for prefix in prefixes : if text . startswith ( prefix ) : return True return False
Return True if the given string starts with one of the prefixes in the given list otherwise return False .
32,624
def _load_audio_list ( self , path ) : result = { } for entry in textfile . read_separated_lines_generator ( path , separator = '\t' , max_columns = 4 ) : for i in range ( len ( entry ) ) : if entry [ i ] == '\\N' : entry [ i ] = None if len ( entry ) < 4 : entry . extend ( [ None ] * ( 4 - len ( entry ) ) ) if not sel...
Load and filter the audio list .
32,625
def _load_sentence_list ( self , path ) : result = { } for entry in textfile . read_separated_lines_generator ( path , separator = '\t' , max_columns = 3 ) : if self . include_languages is None or entry [ 1 ] in self . include_languages : result [ entry [ 0 ] ] = entry [ 1 : ] return result
Load and filter the sentence list .
32,626
def _download_audio_files ( self , records , target_path ) : for record in records : audio_folder = os . path . join ( target_path , 'audio' , record [ 2 ] ) audio_file = os . path . join ( audio_folder , '{}.mp3' . format ( record [ 0 ] ) ) os . makedirs ( audio_folder , exist_ok = True ) download_url = 'https://audio...
Download all audio files based on the given records .
32,627
def split_by_proportionally_distribute_labels ( self , proportions = { } , use_lengths = True ) : identifiers = { } for utterance in self . corpus . utterances . values ( ) : if use_lengths : identifiers [ utterance . idx ] = { l : int ( d * 100 ) for l , d in utterance . label_total_duration ( ) . items ( ) } else : i...
Split the corpus into subsets so the occurrence of the labels is distributed amongst the subsets according to the given proportions .
32,628
def _subviews_from_utterance_splits ( self , splits ) : subviews = { } for idx , subview_utterances in splits . items ( ) : filter = subview . MatchingUtteranceIdxFilter ( utterance_idxs = subview_utterances ) split = subview . Subview ( self . corpus , filter_criteria = filter ) subviews [ idx ] = split return subview...
Create subviews from a dict containing utterance - ids for each subview .
32,629
def sampling_rate ( self ) : with self . container . open_if_needed ( mode = 'r' ) as cnt : return cnt . get ( self . key ) [ 1 ]
Return the sampling rate .
32,630
def duration ( self ) : with self . container . open_if_needed ( mode = 'r' ) as cnt : samples , sr = cnt . get ( self . key ) return samples . shape [ 0 ] / sr
Return the duration in seconds .
32,631
def read_samples ( self , sr = None , offset = 0 , duration = None ) : with self . container . open_if_needed ( mode = 'r' ) as cnt : samples , native_sr = cnt . get ( self . key ) start_sample_index = int ( offset * native_sr ) if duration is None : end_sample_index = samples . shape [ 0 ] else : end_sample_index = in...
Return the samples from the track in the container . Uses librosa for resampling if needed .
32,632
def write_json_to_file ( path , data ) : with open ( path , 'w' , encoding = 'utf-8' ) as f : json . dump ( data , f )
Writes data as json to file .
32,633
def read_json_file ( path ) : with open ( path , 'r' , encoding = 'utf-8' ) as f : data = json . load ( f ) return data
Reads and return the data from the json file at the given path .
32,634
def add ( self , label ) : label . label_list = self self . label_tree . addi ( label . start , label . end , label )
Add a label to the end of the list .
32,635
def merge_overlaps ( self , threshold = 0.0 ) : updated_labels = [ ] all_intervals = self . label_tree . copy ( ) def recursive_overlaps ( interval ) : range_start = interval . begin - threshold range_end = interval . end + threshold direct_overlaps = all_intervals . overlap ( range_start , range_end ) all_overlaps = [...
Merge overlapping labels with the same value . Two labels are considered overlapping if l2 . start - l1 . end < threshold .
32,636
def label_total_duration ( self ) : durations = collections . defaultdict ( float ) for label in self : durations [ label . value ] += label . duration return durations
Return for each distinct label value the total duration of all occurrences .
32,637
def label_values ( self ) : all_labels = set ( [ l . value for l in self ] ) return sorted ( all_labels )
Return a list of all occuring label values .
32,638
def label_count ( self ) : occurrences = collections . defaultdict ( int ) for label in self : occurrences [ label . value ] += 1 return occurrences
Return for each label the number of occurrences within the list .
32,639
def all_tokens ( self , delimiter = ' ' ) : tokens = set ( ) for label in self : tokens = tokens . union ( set ( label . tokenized ( delimiter = delimiter ) ) ) return tokens
Return a list of all tokens occurring in the label - list .
32,640
def join ( self , delimiter = ' ' , overlap_threshold = 0.1 ) : sorted_by_start = sorted ( self . labels ) concat_values = [ ] last_label_end = None for label in sorted_by_start : if last_label_end is None or ( last_label_end - label . start < overlap_threshold and last_label_end > 0 ) : concat_values . append ( label ...
Return a string with all labels concatenated together . The order of the labels is defined by the start of the label . If the overlapping between two labels is greater than overlap_threshold an Exception is thrown .
32,641
def separated ( self ) : separated_lls = collections . defaultdict ( LabelList ) for label in self . labels : separated_lls [ label . value ] . add ( label ) for ll in separated_lls . values ( ) : ll . idx = self . idx return separated_lls
Create a separate Label - List for every distinct label - value .
32,642
def labels_in_range ( self , start , end , fully_included = False ) : if fully_included : intervals = self . label_tree . envelop ( start , end ) else : intervals = self . label_tree . overlap ( start , end ) return [ iv . data for iv in intervals ]
Return a list of labels that are within the given range . Also labels that only overlap are included .
32,643
def ranges ( self , yield_ranges_without_labels = False , include_labels = None ) : tree_copy = self . label_tree . copy ( ) if include_labels is not None : for iv in list ( tree_copy ) : if iv . data . value not in include_labels : tree_copy . remove ( iv ) def reduce ( x , y ) : x . append ( y ) return x tree_copy . ...
Generate all ranges of the label - list . A range is defined as a part of the label - list for which the same labels are defined .
32,644
def create_single ( cls , value , idx = 'default' ) : return LabelList ( idx = idx , labels = [ Label ( value = value ) ] )
Create a label - list with a single label containing the given value .
32,645
def get_folders ( path ) : folder_paths = [ ] for item in os . listdir ( path ) : folder_path = os . path . join ( path , item ) if os . path . isdir ( folder_path ) : folder_paths . append ( folder_path ) return folder_paths
Return a list of all subfolder - paths in the given path .
32,646
def load_tag ( corpus , path ) : tag_idx = os . path . basename ( path ) data_path = os . path . join ( path , 'by_book' ) tag_utt_ids = [ ] for gender_path in MailabsReader . get_folders ( data_path ) : if os . path . basename ( gender_path ) == 'mix' : utt_ids = MailabsReader . load_books_of_speaker ( corpus , gender...
Iterate over all speakers on load them . Collect all utterance - idx and create a subset of them .
32,647
def load_speaker ( corpus , path ) : base_path , speaker_name = os . path . split ( path ) base_path , gender_desc = os . path . split ( base_path ) base_path , __ = os . path . split ( base_path ) base_path , tag = os . path . split ( base_path ) gender = issuers . Gender . UNKNOWN if gender_desc == 'male' : gender = ...
Create a speaker instance for the given path .
32,648
def load_books_of_speaker ( corpus , path , speaker ) : utt_ids = [ ] for book_path in MailabsReader . get_folders ( path ) : meta_path = os . path . join ( book_path , 'metadata.csv' ) wavs_path = os . path . join ( book_path , 'wavs' ) meta = textfile . read_separated_lines ( meta_path , separator = '|' , max_columns...
Load all utterances for the speaker at the given path .
32,649
def open ( self , mode = None ) : if mode is None : mode = self . mode elif mode not in [ 'r' , 'w' , 'a' ] : raise ValueError ( 'Invalid mode! Modes: [\'a\', \'r\', \'w\']' ) if self . _file is None : self . _file = h5py . File ( self . path , mode = mode )
Open the container file .
32,650
def open_if_needed ( self , mode = None ) : was_open = self . is_open ( ) if not was_open : self . open ( mode = mode ) try : yield self finally : if not was_open : self . close ( )
Convenience context - manager for the use with with . Opens the container if not already done . Only closes the container if it was opened within this context .
32,651
def get ( self , key , mem_map = True ) : self . raise_error_if_not_open ( ) if key in self . _file : data = self . _file [ key ] if not mem_map : data = data [ ( ) ] return data else : return None
Read and return the data stored for the given key .
32,652
def remove ( self , key ) : self . raise_error_if_not_open ( ) if key in self . _file : del self . _file [ key ]
Remove the data stored for the given key .
32,653
def end_abs ( self ) : if self . end == float ( 'inf' ) : return self . track . duration else : return self . end
Return the absolute end of the utterance relative to the signal .
32,654
def num_samples ( self , sr = None ) : native_sr = self . sampling_rate num_samples = units . seconds_to_sample ( self . duration , native_sr ) if sr is not None : ratio = float ( sr ) / native_sr num_samples = int ( np . ceil ( num_samples * ratio ) ) return num_samples
Return the number of samples .
32,655
def read_samples ( self , sr = None , offset = 0 , duration = None ) : read_duration = self . duration if offset > 0 and read_duration is not None : read_duration -= offset if duration is not None : if read_duration is None : read_duration = duration else : read_duration = min ( duration , read_duration ) return self ....
Read the samples of the utterance .
32,656
def set_label_list ( self , label_lists ) : if isinstance ( label_lists , annotations . LabelList ) : label_lists = [ label_lists ] for label_list in label_lists : if label_list . idx is None : label_list . idx = 'default' label_list . utterance = self self . label_lists [ label_list . idx ] = label_list
Set the given label - list for this utterance . If the label - list - idx is not set default is used . If there is already a label - list with the given idx it will be overriden .
32,657
def all_label_values ( self , label_list_ids = None ) : values = set ( ) for label_list in self . label_lists . values ( ) : if label_list_ids is None or label_list . idx in label_list_ids : values = values . union ( label_list . label_values ( ) ) return values
Return a set of all label - values occurring in this utterance .
32,658
def label_count ( self , label_list_ids = None ) : count = collections . defaultdict ( int ) for label_list in self . label_lists . values ( ) : if label_list_ids is None or label_list . idx in label_list_ids : for label_value , label_count in label_list . label_count ( ) . items ( ) : count [ label_value ] += label_co...
Return a dictionary containing the number of times every label - value in this utterance is occurring .
32,659
def all_tokens ( self , delimiter = ' ' , label_list_ids = None ) : tokens = set ( ) for label_list in self . label_lists . values ( ) : if label_list_ids is None or label_list . idx in label_list_ids : tokens = tokens . union ( label_list . all_tokens ( delimiter = delimiter ) ) return tokens
Return a list of all tokens occurring in one of the labels in the label - lists .
32,660
def label_total_duration ( self , label_list_ids = None ) : duration = collections . defaultdict ( float ) for label_list in self . label_lists . values ( ) : if label_list_ids is None or label_list . idx in label_list_ids : for label_value , label_duration in label_list . label_total_duration ( ) . items ( ) : duratio...
Return a dictionary containing the number of seconds every label - value is occurring in this utterance .
32,661
def stats ( self ) : self . raise_error_if_not_open ( ) per_key_stats = self . stats_per_key ( ) return stats . DataStats . concatenate ( per_key_stats . values ( ) )
Return statistics calculated overall features in the container .
32,662
def stats_per_key ( self ) : self . raise_error_if_not_open ( ) all_stats = { } for key , data in self . _file . items ( ) : data = data [ ( ) ] all_stats [ key ] = stats . DataStats ( float ( np . mean ( data ) ) , float ( np . var ( data ) ) , np . min ( data ) , np . max ( data ) , data . size ) return all_stats
Return statistics calculated for each key in the container .
32,663
def all_label_values ( self , label_list_ids = None ) : values = set ( ) for utterance in self . utterances . values ( ) : values = values . union ( utterance . all_label_values ( label_list_ids = label_list_ids ) ) return values
Return a set of all label - values occurring in this corpus .
32,664
def label_count ( self , label_list_ids = None ) : count = collections . defaultdict ( int ) for utterance in self . utterances . values ( ) : for label_value , utt_count in utterance . label_count ( label_list_ids = label_list_ids ) . items ( ) : count [ label_value ] += utt_count return count
Return a dictionary containing the number of times every label - value in this corpus is occurring .
32,665
def label_durations ( self , label_list_ids = None ) : duration = collections . defaultdict ( int ) for utterance in self . utterances . values ( ) : for label_value , utt_count in utterance . label_total_duration ( label_list_ids = label_list_ids ) . items ( ) : duration [ label_value ] += utt_count return duration
Return a dictionary containing the total duration every label - value in this corpus is occurring .
32,666
def all_tokens ( self , delimiter = ' ' , label_list_ids = None ) : tokens = set ( ) for utterance in self . utterances . values ( ) : tokens = tokens . union ( utterance . all_tokens ( delimiter = delimiter , label_list_ids = label_list_ids ) ) return tokens
Return a list of all tokens occurring in one of the labels in the corpus .
32,667
def total_duration ( self ) : duration = 0 for utterance in self . utterances . values ( ) : duration += utterance . duration return duration
Return the total amount of audio summed over all utterances in the corpus in seconds .
32,668
def stats ( self ) : per_utt_stats = self . stats_per_utterance ( ) return stats . DataStats . concatenate ( per_utt_stats . values ( ) )
Return statistics calculated overall samples of all utterances in the corpus .
32,669
def stats_per_utterance ( self ) : all_stats = { } for utterance in self . utterances . values ( ) : data = utterance . read_samples ( ) all_stats [ utterance . idx ] = stats . DataStats ( float ( np . mean ( data ) ) , float ( np . var ( data ) ) , np . min ( data ) , np . max ( data ) , data . size ) return all_stats
Return statistics calculated for all samples of each utterance in the corpus .
32,670
def split_utterances_to_max_time ( self , max_time = 60.0 , overlap = 0.0 ) : from audiomate . corpus import Corpus result = Corpus ( ) tracks = copy . deepcopy ( list ( self . tracks . values ( ) ) ) result . import_tracks ( tracks ) issuers = copy . deepcopy ( list ( self . issuers . values ( ) ) ) result . import_is...
Create a new corpus where all the utterances are of given maximal duration . Utterance longer than max_time are split up into multiple utterances .
32,671
def get_utt_regions ( self ) : regions = [ ] current_offset = 0 for utt_idx in sorted ( self . utt_ids ) : offset = current_offset num_frames = [ ] refs = [ ] for cnt in self . containers : num_frames . append ( cnt . get ( utt_idx ) . shape [ 0 ] ) refs . append ( cnt . get ( utt_idx , mem_map = True ) ) if len ( set ...
Return the regions of all utterances assuming all utterances are concatenated . It is assumed that the utterances are sorted in ascending order for concatenation .
32,672
def get ( self , key , mem_map = True ) : self . raise_error_if_not_open ( ) if key in self . _file : data = self . _file [ key ] sampling_rate = data . attrs [ SAMPLING_RATE_ATTR ] if not mem_map : data = data [ ( ) ] data = np . float32 ( data ) / MAX_INT16_VALUE return data , sampling_rate
Return the samples for the given key and the sampling - rate .
32,673
def set ( self , key , samples , sampling_rate ) : if not np . issubdtype ( samples . dtype , np . floating ) : raise ValueError ( 'Samples are required as np.float32!' ) if len ( samples . shape ) > 1 : raise ValueError ( 'Only single channel supported!' ) self . raise_error_if_not_open ( ) if key in self . _file : de...
Set the samples and sampling - rate for the given key . Existing data will be overwritten . The samples have to have np . float32 datatype and values in the range of - 1 . 0 and 1 . 0 .
32,674
def append ( self , key , samples , sampling_rate ) : if not np . issubdtype ( samples . dtype , np . floating ) : raise ValueError ( 'Samples are required as np.float32!' ) if len ( samples . shape ) > 1 : raise ValueError ( 'Only single channel supported!' ) existing = self . get ( key , mem_map = True ) samples = ( ...
Append the given samples to the data that already exists in the container for the given key .
32,675
def available_files ( url ) : req = requests . get ( url ) if req . status_code != 200 : raise base . FailedDownloadException ( 'Failed to download data (status {}) from {}!' . format ( req . status_code , url ) ) page_content = req . text link_pattern = re . compile ( r'<a href="(.*?)">(.*?)</a>' ) available_files = [...
Extract and return urls for all available . tgz files .
32,676
def download_files ( file_urls , target_path ) : os . makedirs ( target_path , exist_ok = True ) downloaded_files = [ ] for file_url in file_urls : req = requests . get ( file_url ) if req . status_code != 200 : raise base . FailedDownloadException ( 'Failed to download file {} (status {})!' . format ( req . status_cod...
Download all files and store to the given path .
32,677
def extract_files ( file_paths , target_path ) : os . makedirs ( target_path , exist_ok = True ) extracted = [ ] for file_path in file_paths : with tarfile . open ( file_path , 'r' ) as archive : archive . extractall ( target_path ) file_name = os . path . splitext ( os . path . basename ( file_path ) ) [ 0 ] extracted...
Unpack all files to the given path .
32,678
def index_name_if_in_list ( name , name_list , suffix = '' , prefix = '' ) : new_name = '{}' . format ( name ) index = 1 while new_name in name_list : new_name = '{}_{}{}{}' . format ( name , prefix , index , suffix ) index += 1 return new_name
Find a unique name by adding an index to the name so it is unique within the given list .
32,679
def generate_name ( length = 15 , not_in = None ) : value = '' . join ( random . choice ( string . ascii_lowercase ) for i in range ( length ) ) while ( not_in is not None ) and ( value in not_in ) : value = '' . join ( random . choice ( string . ascii_lowercase ) for i in range ( length ) ) return value
Generates a random string of lowercase letters with the given length .
32,680
def create_downloader_of_type ( type_name ) : downloaders = available_downloaders ( ) if type_name not in downloaders . keys ( ) : raise UnknownDownloaderException ( 'Unknown downloader: %s' % ( type_name , ) ) return downloaders [ type_name ] ( )
Create an instance of the downloader with the given name .
32,681
def create_reader_of_type ( type_name ) : readers = available_readers ( ) if type_name not in readers . keys ( ) : raise UnknownReaderException ( 'Unknown reader: %s' % ( type_name , ) ) return readers [ type_name ] ( )
Create an instance of the reader with the given name .
32,682
def create_writer_of_type ( type_name ) : writers = available_writers ( ) if type_name not in writers . keys ( ) : raise UnknownWriterException ( 'Unknown writer: %s' % ( type_name , ) ) return writers [ type_name ] ( )
Create an instance of the writer with the given name .
32,683
def serialize ( self ) : lines = [ ] for criterion in self . filter_criteria : lines . append ( criterion . name ( ) ) lines . append ( criterion . serialize ( ) ) return '\n' . join ( lines )
Return a string representing the subview with all of its filter criteria .
32,684
def stft_from_frames ( frames , window = 'hann' , dtype = np . complex64 ) : win_length = frames . shape [ 0 ] n_fft = win_length fft_window = filters . get_window ( window , win_length , fftbins = True ) fft_window = fft_window . reshape ( ( - 1 , 1 ) ) stft_matrix = np . empty ( ( int ( 1 + n_fft // 2 ) , frames . sh...
Variation of the librosa . core . stft function that computes the short - time - fourier - transfrom from frames instead from the signal .
32,685
def validate ( self , corpus ) : passed = True results = { } for validator in self . validators : sub_result = validator . validate ( corpus ) results [ validator . name ( ) ] = sub_result if not sub_result . passed : passed = False return CombinedValidationResult ( passed , results )
Perform validation on the given corpus .
32,686
def feature_scp_generator ( path ) : scp_entries = textfile . read_key_value_lines ( path , separator = ' ' ) for utterance_id , rx_specifier in scp_entries . items ( ) : yield utterance_id , KaldiWriter . read_float_matrix ( rx_specifier )
Return a generator over all feature matrices defined in a scp .
32,687
def read_float_matrix ( rx_specifier ) : path , offset = rx_specifier . strip ( ) . split ( ':' , maxsplit = 1 ) offset = int ( offset ) sample_format = 4 with open ( path , 'rb' ) as f : f . seek ( offset ) binary = f . read ( 2 ) assert ( binary == b'\x00B' ) format = f . read ( 3 ) assert ( format == b'FM ' ) f . re...
Return float matrix as np array for the given rx specifier .
32,688
def reload ( self ) : utt_ids = sorted ( self . utt_ids ) if self . shuffle : self . rand . shuffle ( utt_ids ) partitions = [ ] current_partition = PartitionInfo ( ) for utt_id in utt_ids : utt_size = self . utt_sizes [ utt_id ] utt_lengths = self . utt_lengths [ utt_id ] if current_partition . size + utt_size > self ...
Create a new partition scheme . A scheme defines which utterances are in which partition . The scheme only changes after every call if self . shuffle == True .
32,689
def load_partition_data ( self , index ) : info = self . partitions [ index ] data = PartitionData ( info ) for utt_id in info . utt_ids : utt_data = [ c . _file [ utt_id ] [ : ] for c in self . containers ] data . utt_data . append ( utt_data ) return data
Load and return the partition with the given index .
32,690
def _raise_error_if_container_is_missing_an_utterance ( self ) : expected_keys = frozenset ( self . utt_ids ) for cnt in self . containers : keys = set ( cnt . keys ( ) ) if not keys . issuperset ( expected_keys ) : raise ValueError ( 'Container is missing data for some utterances!' )
Check if there is a dataset for every utterance in every container otherwise raise an error .
32,691
def _scan ( self ) : utt_sizes = { } for dset_name in self . utt_ids : per_container = [ ] for cnt in self . containers : dset = cnt . _file [ dset_name ] dtype_size = dset . dtype . itemsize record_size = dtype_size * dset . size per_container . append ( record_size ) utt_size = sum ( per_container ) if utt_size > sel...
For every utterance calculate the size it will need in memory .
32,692
def _get_all_lengths ( self ) : utt_lengths = { } for utt_idx in self . utt_ids : per_container = [ c . _file [ utt_idx ] . shape [ 0 ] for c in self . containers ] utt_lengths [ utt_idx ] = tuple ( per_container ) return utt_lengths
For every utterance get the length of the data in every container . Return a list of tuples .
32,693
def update ( self , data , offset , is_last , buffer_index = 0 ) : if buffer_index >= self . num_buffers : raise ValueError ( 'Expected buffer index < {} but got index {}.' . format ( self . num_buffers , buffer_index ) ) if self . buffers [ buffer_index ] is not None and self . buffers [ buffer_index ] . shape [ 0 ] >...
Update the buffer at the given index .
32,694
def get ( self ) : chunk_size = self . _smallest_buffer ( ) all_full = self . _all_full ( ) if all_full : right_context = 0 num_frames = chunk_size - self . current_left_context else : right_context = self . right_context num_frames = self . min_frames chunk_size_needed = num_frames + self . current_left_context + righ...
Get a new chunk if available .
32,695
def _smallest_buffer ( self ) : smallest = np . inf for buffer in self . buffers : if buffer is None : return 0 elif buffer . shape [ 0 ] < smallest : smallest = buffer . shape [ 0 ] return smallest
Get the size of the smallest buffer .
32,696
def process_frames ( self , data , sampling_rate , offset = 0 , last = False , utterance = None , corpus = None ) : if offset == 0 : self . steps_sorted = list ( nx . algorithms . dag . topological_sort ( self . graph ) ) self . _create_buffers ( ) self . _define_output_buffers ( ) self . _update_buffers ( None , data ...
Execute the processing of this step and all dependent parent steps .
32,697
def _update_buffers ( self , from_step , data , offset , is_last ) : for to_step , buffer in self . target_buffers [ from_step ] : parent_index = 0 if isinstance ( to_step , Reduction ) : parent_index = to_step . parents . index ( from_step ) buffer . update ( data , offset , is_last , buffer_index = parent_index )
Update the buffers of all steps that need data from from_step . If from_step is None it means the data is the input data .
32,698
def _define_output_buffers ( self ) : self . target_buffers = { None : [ ( step , self . buffers [ step ] ) for step in self . _get_input_steps ( ) ] } for step in self . steps_sorted : if step != self : child_steps = [ edge [ 1 ] for edge in self . graph . out_edges ( step ) ] self . target_buffers [ step ] = [ ( chil...
Prepare a dictionary so we know what buffers have to be update with the the output of every step .
32,699
def _get_input_steps ( self ) : input_steps = [ ] for step in self . steps_sorted : parent_steps = self . _parent_steps ( step ) if len ( parent_steps ) == 0 : input_steps . append ( step ) return input_steps
Search and return all steps that have no parents . These are the steps that are get the input data .