idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
20,000 | def _fast_hit_windows ( ref , est , window ) : ref = np . asarray ( ref ) est = np . asarray ( est ) ref_idx = np . argsort ( ref ) ref_sorted = ref [ ref_idx ] left_idx = np . searchsorted ( ref_sorted , est - window , side = 'left' ) right_idx = np . searchsorted ( ref_sorted , est + window , side = 'right' ) hit_ref , hit_est = [ ] , [ ] for j , ( start , end ) in enumerate ( zip ( left_idx , right_idx ) ) : hit_ref . extend ( ref_idx [ start : end ] ) hit_est . extend ( [ j ] * ( end - start ) ) return hit_ref , hit_est | Fast calculation of windowed hits for time events . |
20,001 | def validate_events ( events , max_time = 30000. ) : if ( events > max_time ) . any ( ) : raise ValueError ( 'An event at time {} was found which is greater than ' 'the maximum allowable time of max_time = {} (did you' ' supply event times in ' 'seconds?)' . format ( events . max ( ) , max_time ) ) if events . ndim != 1 : raise ValueError ( 'Event times should be 1-d numpy ndarray, ' 'but shape={}' . format ( events . shape ) ) if ( np . diff ( events ) < 0 ) . any ( ) : raise ValueError ( 'Events should be in increasing order.' ) | Checks that a 1 - d event location ndarray is well - formed and raises errors if not . |
20,002 | def validate_frequencies ( frequencies , max_freq , min_freq , allow_negatives = False ) : if allow_negatives : frequencies = np . abs ( frequencies ) if ( np . abs ( frequencies ) > max_freq ) . any ( ) : raise ValueError ( 'A frequency of {} was found which is greater than ' 'the maximum allowable value of max_freq = {} (did ' 'you supply frequency values in ' 'Hz?)' . format ( frequencies . max ( ) , max_freq ) ) if ( np . abs ( frequencies ) < min_freq ) . any ( ) : raise ValueError ( 'A frequency of {} was found which is less than the ' 'minimum allowable value of min_freq = {} (did you ' 'supply frequency values in ' 'Hz?)' . format ( frequencies . min ( ) , min_freq ) ) if frequencies . ndim != 1 : raise ValueError ( 'Frequencies should be 1-d numpy ndarray, ' 'but shape={}' . format ( frequencies . shape ) ) | Checks that a 1 - d frequency ndarray is well - formed and raises errors if not . |
20,003 | def intervals_to_durations ( intervals ) : validate_intervals ( intervals ) return np . abs ( np . diff ( intervals , axis = - 1 ) ) . flatten ( ) | Converts an array of n intervals to their n durations . |
20,004 | def validate ( reference_sources , estimated_sources ) : if reference_sources . shape != estimated_sources . shape : raise ValueError ( 'The shape of estimated sources and the true ' 'sources should match. reference_sources.shape ' '= {}, estimated_sources.shape ' '= {}' . format ( reference_sources . shape , estimated_sources . shape ) ) if reference_sources . ndim > 3 or estimated_sources . ndim > 3 : raise ValueError ( 'The number of dimensions is too high (must be less ' 'than 3). reference_sources.ndim = {}, ' 'estimated_sources.ndim ' '= {}' . format ( reference_sources . ndim , estimated_sources . ndim ) ) if reference_sources . size == 0 : warnings . warn ( "reference_sources is empty, should be of size " "(nsrc, nsample). sdr, sir, sar, and perm will all " "be empty np.ndarrays" ) elif _any_source_silent ( reference_sources ) : raise ValueError ( 'All the reference sources should be non-silent (not ' 'all-zeros), but at least one of the reference ' 'sources is all 0s, which introduces ambiguity to the' ' evaluation. (Otherwise we can add infinitely many ' 'all-zero sources.)' ) if estimated_sources . size == 0 : warnings . warn ( "estimated_sources is empty, should be of size " "(nsrc, nsample). sdr, sir, sar, and perm will all " "be empty np.ndarrays" ) elif _any_source_silent ( estimated_sources ) : raise ValueError ( 'All the estimated sources should be non-silent (not ' 'all-zeros), but at least one of the estimated ' 'sources is all 0s. Since we require each reference ' 'source to be non-silent, having a silent estimated ' 'source will result in an underdetermined system.' ) if ( estimated_sources . shape [ 0 ] > MAX_SOURCES or reference_sources . shape [ 0 ] > MAX_SOURCES ) : raise ValueError ( 'The supplied matrices should be of shape (nsrc,' ' nsampl) but reference_sources.shape[0] = {} and ' 'estimated_sources.shape[0] = {} which is greater ' 'than mir_eval.separation.MAX_SOURCES = {}. To ' 'override this check, set ' 'mir_eval.separation.MAX_SOURCES to a ' 'larger value.' . format ( reference_sources . shape [ 0 ] , estimated_sources . shape [ 0 ] , MAX_SOURCES ) ) | Checks that the input data to a metric are valid and throws helpful errors if not . |
20,005 | def _any_source_silent ( sources ) : return np . any ( np . all ( np . sum ( sources , axis = tuple ( range ( 2 , sources . ndim ) ) ) == 0 , axis = 1 ) ) | Returns true if the parameter sources has any silent first dimensions |
20,006 | def bss_eval_sources ( reference_sources , estimated_sources , compute_permutation = True ) : if estimated_sources . ndim == 1 : estimated_sources = estimated_sources [ np . newaxis , : ] if reference_sources . ndim == 1 : reference_sources = reference_sources [ np . newaxis , : ] validate ( reference_sources , estimated_sources ) if reference_sources . size == 0 or estimated_sources . size == 0 : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) nsrc = estimated_sources . shape [ 0 ] if compute_permutation : sdr = np . empty ( ( nsrc , nsrc ) ) sir = np . empty ( ( nsrc , nsrc ) ) sar = np . empty ( ( nsrc , nsrc ) ) for jest in range ( nsrc ) : for jtrue in range ( nsrc ) : s_true , e_spat , e_interf , e_artif = _bss_decomp_mtifilt ( reference_sources , estimated_sources [ jest ] , jtrue , 512 ) sdr [ jest , jtrue ] , sir [ jest , jtrue ] , sar [ jest , jtrue ] = _bss_source_crit ( s_true , e_spat , e_interf , e_artif ) perms = list ( itertools . permutations ( list ( range ( nsrc ) ) ) ) mean_sir = np . empty ( len ( perms ) ) dum = np . arange ( nsrc ) for ( i , perm ) in enumerate ( perms ) : mean_sir [ i ] = np . mean ( sir [ perm , dum ] ) popt = perms [ np . argmax ( mean_sir ) ] idx = ( popt , dum ) return ( sdr [ idx ] , sir [ idx ] , sar [ idx ] , np . asarray ( popt ) ) else : sdr = np . empty ( nsrc ) sir = np . empty ( nsrc ) sar = np . empty ( nsrc ) for j in range ( nsrc ) : s_true , e_spat , e_interf , e_artif = _bss_decomp_mtifilt ( reference_sources , estimated_sources [ j ] , j , 512 ) sdr [ j ] , sir [ j ] , sar [ j ] = _bss_source_crit ( s_true , e_spat , e_interf , e_artif ) popt = np . arange ( nsrc ) return ( sdr , sir , sar , popt ) | Ordering and measurement of the separation quality for estimated source signals in terms of filtered true source interference and artifacts . |
20,007 | def bss_eval_sources_framewise ( reference_sources , estimated_sources , window = 30 * 44100 , hop = 15 * 44100 , compute_permutation = False ) : if estimated_sources . ndim == 1 : estimated_sources = estimated_sources [ np . newaxis , : ] if reference_sources . ndim == 1 : reference_sources = reference_sources [ np . newaxis , : ] validate ( reference_sources , estimated_sources ) if reference_sources . size == 0 or estimated_sources . size == 0 : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) nsrc = reference_sources . shape [ 0 ] nwin = int ( np . floor ( ( reference_sources . shape [ 1 ] - window + hop ) / hop ) ) if nwin < 2 : result = bss_eval_sources ( reference_sources , estimated_sources , compute_permutation ) return [ np . expand_dims ( score , - 1 ) for score in result ] sdr = np . empty ( ( nsrc , nwin ) ) sir = np . empty ( ( nsrc , nwin ) ) sar = np . empty ( ( nsrc , nwin ) ) perm = np . empty ( ( nsrc , nwin ) ) for k in range ( nwin ) : win_slice = slice ( k * hop , k * hop + window ) ref_slice = reference_sources [ : , win_slice ] est_slice = estimated_sources [ : , win_slice ] if ( not _any_source_silent ( ref_slice ) and not _any_source_silent ( est_slice ) ) : sdr [ : , k ] , sir [ : , k ] , sar [ : , k ] , perm [ : , k ] = bss_eval_sources ( ref_slice , est_slice , compute_permutation ) else : sdr [ : , k ] = sir [ : , k ] = sar [ : , k ] = perm [ : , k ] = np . nan return sdr , sir , sar , perm | Framewise computation of bss_eval_sources |
20,008 | def bss_eval_images_framewise ( reference_sources , estimated_sources , window = 30 * 44100 , hop = 15 * 44100 , compute_permutation = False ) : estimated_sources = np . atleast_3d ( estimated_sources ) reference_sources = np . atleast_3d ( reference_sources ) validate ( reference_sources , estimated_sources ) if reference_sources . size == 0 or estimated_sources . size == 0 : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) nsrc = reference_sources . shape [ 0 ] nwin = int ( np . floor ( ( reference_sources . shape [ 1 ] - window + hop ) / hop ) ) if nwin < 2 : result = bss_eval_images ( reference_sources , estimated_sources , compute_permutation ) return [ np . expand_dims ( score , - 1 ) for score in result ] sdr = np . empty ( ( nsrc , nwin ) ) isr = np . empty ( ( nsrc , nwin ) ) sir = np . empty ( ( nsrc , nwin ) ) sar = np . empty ( ( nsrc , nwin ) ) perm = np . empty ( ( nsrc , nwin ) ) for k in range ( nwin ) : win_slice = slice ( k * hop , k * hop + window ) ref_slice = reference_sources [ : , win_slice , : ] est_slice = estimated_sources [ : , win_slice , : ] if ( not _any_source_silent ( ref_slice ) and not _any_source_silent ( est_slice ) ) : sdr [ : , k ] , isr [ : , k ] , sir [ : , k ] , sar [ : , k ] , perm [ : , k ] = bss_eval_images ( ref_slice , est_slice , compute_permutation ) else : sdr [ : , k ] = sir [ : , k ] = sar [ : , k ] = perm [ : , k ] = np . nan return sdr , isr , sir , sar , perm | Framewise computation of bss_eval_images |
20,009 | def _project ( reference_sources , estimated_source , flen ) : nsrc = reference_sources . shape [ 0 ] nsampl = reference_sources . shape [ 1 ] reference_sources = np . hstack ( ( reference_sources , np . zeros ( ( nsrc , flen - 1 ) ) ) ) estimated_source = np . hstack ( ( estimated_source , np . zeros ( flen - 1 ) ) ) n_fft = int ( 2 ** np . ceil ( np . log2 ( nsampl + flen - 1. ) ) ) sf = scipy . fftpack . fft ( reference_sources , n = n_fft , axis = 1 ) sef = scipy . fftpack . fft ( estimated_source , n = n_fft ) G = np . zeros ( ( nsrc * flen , nsrc * flen ) ) for i in range ( nsrc ) : for j in range ( nsrc ) : ssf = sf [ i ] * np . conj ( sf [ j ] ) ssf = np . real ( scipy . fftpack . ifft ( ssf ) ) ss = toeplitz ( np . hstack ( ( ssf [ 0 ] , ssf [ - 1 : - flen : - 1 ] ) ) , r = ssf [ : flen ] ) G [ i * flen : ( i + 1 ) * flen , j * flen : ( j + 1 ) * flen ] = ss G [ j * flen : ( j + 1 ) * flen , i * flen : ( i + 1 ) * flen ] = ss . T D = np . zeros ( nsrc * flen ) for i in range ( nsrc ) : ssef = sf [ i ] * np . conj ( sef ) ssef = np . real ( scipy . fftpack . ifft ( ssef ) ) D [ i * flen : ( i + 1 ) * flen ] = np . hstack ( ( ssef [ 0 ] , ssef [ - 1 : - flen : - 1 ] ) ) try : C = np . linalg . solve ( G , D ) . reshape ( flen , nsrc , order = 'F' ) except np . linalg . linalg . LinAlgError : C = np . linalg . lstsq ( G , D ) [ 0 ] . reshape ( flen , nsrc , order = 'F' ) sproj = np . zeros ( nsampl + flen - 1 ) for i in range ( nsrc ) : sproj += fftconvolve ( C [ : , i ] , reference_sources [ i ] ) [ : nsampl + flen - 1 ] return sproj | Least - squares projection of estimated source on the subspace spanned by delayed versions of reference sources with delays between 0 and flen - 1 |
20,010 | def _bss_image_crit ( s_true , e_spat , e_interf , e_artif ) : sdr = _safe_db ( np . sum ( s_true ** 2 ) , np . sum ( ( e_spat + e_interf + e_artif ) ** 2 ) ) isr = _safe_db ( np . sum ( s_true ** 2 ) , np . sum ( e_spat ** 2 ) ) sir = _safe_db ( np . sum ( ( s_true + e_spat ) ** 2 ) , np . sum ( e_interf ** 2 ) ) sar = _safe_db ( np . sum ( ( s_true + e_spat + e_interf ) ** 2 ) , np . sum ( e_artif ** 2 ) ) return ( sdr , isr , sir , sar ) | Measurement of the separation quality for a given image in terms of filtered true source spatial error interference and artifacts . |
20,011 | def _safe_db ( num , den ) : if den == 0 : return np . Inf return 10 * np . log10 ( num / den ) | Properly handle the potential + Inf db SIR instead of raising a RuntimeWarning . Only denominator is checked because the numerator can never be 0 . |
20,012 | def evaluate ( reference_sources , estimated_sources , ** kwargs ) : scores = collections . OrderedDict ( ) sdr , isr , sir , sar , perm = util . filter_kwargs ( bss_eval_images , reference_sources , estimated_sources , ** kwargs ) scores [ 'Images - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Images - Image to Spatial' ] = isr . tolist ( ) scores [ 'Images - Source to Interference' ] = sir . tolist ( ) scores [ 'Images - Source to Artifact' ] = sar . tolist ( ) scores [ 'Images - Source permutation' ] = perm . tolist ( ) sdr , isr , sir , sar , perm = util . filter_kwargs ( bss_eval_images_framewise , reference_sources , estimated_sources , ** kwargs ) scores [ 'Images Frames - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Images Frames - Image to Spatial' ] = isr . tolist ( ) scores [ 'Images Frames - Source to Interference' ] = sir . tolist ( ) scores [ 'Images Frames - Source to Artifact' ] = sar . tolist ( ) scores [ 'Images Frames - Source permutation' ] = perm . tolist ( ) if reference_sources . ndim < 3 and estimated_sources . ndim < 3 : sdr , sir , sar , perm = util . filter_kwargs ( bss_eval_sources_framewise , reference_sources , estimated_sources , ** kwargs ) scores [ 'Sources Frames - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Sources Frames - Source to Interference' ] = sir . tolist ( ) scores [ 'Sources Frames - Source to Artifact' ] = sar . tolist ( ) scores [ 'Sources Frames - Source permutation' ] = perm . tolist ( ) sdr , sir , sar , perm = util . filter_kwargs ( bss_eval_sources , reference_sources , estimated_sources , ** kwargs ) scores [ 'Sources - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Sources - Source to Interference' ] = sir . tolist ( ) scores [ 'Sources - Source to Artifact' ] = sar . tolist ( ) scores [ 'Sources - Source permutation' ] = perm . tolist ( ) return scores | Compute all metrics for the given reference and estimated signals . |
20,013 | def clicks ( times , fs , click = None , length = None ) : if click is None : click = np . sin ( 2 * np . pi * np . arange ( fs * .1 ) * 1000 / ( 1. * fs ) ) click *= np . exp ( - np . arange ( fs * .1 ) / ( fs * .01 ) ) if length is None : length = int ( times . max ( ) * fs + click . shape [ 0 ] + 1 ) click_signal = np . zeros ( length ) for time in times : start = int ( time * fs ) end = start + click . shape [ 0 ] if start >= length : break if end >= length : click_signal [ start : ] = click [ : length - start ] break click_signal [ start : end ] = click return click_signal | Returns a signal with the signal click placed at each specified time |
20,014 | def time_frequency ( gram , frequencies , times , fs , function = np . sin , length = None , n_dec = 1 ) : if times . ndim == 1 : times = util . boundaries_to_intervals ( times ) if length is None : length = int ( times [ - 1 , 1 ] * fs ) times , _ = util . adjust_intervals ( times , t_max = length ) n_times = gram . shape [ 1 ] times = times [ : n_times ] def _fast_synthesize ( frequency ) : frequency = np . round ( frequency , n_dec ) n_samples = int ( 10.0 ** n_dec * fs ) short_signal = function ( 2.0 * np . pi * np . arange ( n_samples ) * frequency / fs ) n_repeats = int ( np . ceil ( length / float ( short_signal . shape [ 0 ] ) ) ) long_signal = as_strided ( short_signal , shape = ( n_repeats , len ( short_signal ) ) , strides = ( 0 , short_signal . itemsize ) ) return long_signal . flat def _const_interpolator ( value ) : def __interpolator ( x ) : return value return __interpolator gram = np . maximum ( gram , 0 ) output = np . zeros ( length ) time_centers = np . mean ( times , axis = 1 ) * float ( fs ) for n , frequency in enumerate ( frequencies ) : wave = _fast_synthesize ( frequency ) if len ( time_centers ) > 1 : gram_interpolator = interp1d ( time_centers , gram [ n , : ] , kind = 'linear' , bounds_error = False , fill_value = 0.0 ) else : gram_interpolator = _const_interpolator ( gram [ n , 0 ] ) for m , ( start , end ) in enumerate ( ( times * fs ) . astype ( int ) ) : start , end = max ( start , 0 ) , min ( end , length ) output [ start : end ] += ( wave [ start : end ] * gram_interpolator ( np . arange ( start , end ) ) ) norm = np . abs ( output ) . max ( ) if norm >= np . finfo ( output . dtype ) . tiny : output /= norm return output | Reverse synthesis of a time - frequency representation of a signal |
20,015 | def pitch_contour ( times , frequencies , fs , amplitudes = None , function = np . sin , length = None , kind = 'linear' ) : fs = float ( fs ) if length is None : length = int ( times . max ( ) * fs ) frequencies = np . maximum ( frequencies , 0.0 ) f_interp = interp1d ( times * fs , 2 * np . pi * frequencies / fs , kind = kind , fill_value = 0.0 , bounds_error = False , copy = False ) f_est = f_interp ( np . arange ( length ) ) if amplitudes is None : a_est = np . ones ( ( length , ) ) else : a_interp = interp1d ( times * fs , amplitudes , kind = kind , fill_value = 0.0 , bounds_error = False , copy = False ) a_est = a_interp ( np . arange ( length ) ) return a_est * function ( np . cumsum ( f_est ) ) | Sonify a pitch contour . |
20,016 | def chords ( chord_labels , intervals , fs , ** kwargs ) : util . validate_intervals ( intervals ) roots , interval_bitmaps , _ = chord . encode_many ( chord_labels ) chromagram = np . array ( [ np . roll ( interval_bitmap , root ) for ( interval_bitmap , root ) in zip ( interval_bitmaps , roots ) ] ) . T return chroma ( chromagram , intervals , fs , ** kwargs ) | Synthesizes chord labels |
20,017 | def validate ( reference_onsets , estimated_onsets ) : if reference_onsets . size == 0 : warnings . warn ( "Reference onsets are empty." ) if estimated_onsets . size == 0 : warnings . warn ( "Estimated onsets are empty." ) for onsets in [ reference_onsets , estimated_onsets ] : util . validate_events ( onsets , MAX_TIME ) | Checks that the input annotations to a metric look like valid onset time arrays and throws helpful errors if not . |
20,018 | def f_measure ( reference_onsets , estimated_onsets , window = .05 ) : validate ( reference_onsets , estimated_onsets ) if reference_onsets . size == 0 or estimated_onsets . size == 0 : return 0. , 0. , 0. matching = util . match_events ( reference_onsets , estimated_onsets , window ) precision = float ( len ( matching ) ) / len ( estimated_onsets ) recall = float ( len ( matching ) ) / len ( reference_onsets ) return util . f_measure ( precision , recall ) , precision , recall | Compute the F - measure of correct vs incorrectly predicted onsets . Corectness is determined over a small window . |
20,019 | def validate ( ref_intervals , ref_pitches , est_intervals , est_pitches ) : validate_intervals ( ref_intervals , est_intervals ) if not ref_intervals . shape [ 0 ] == ref_pitches . shape [ 0 ] : raise ValueError ( 'Reference intervals and pitches have different ' 'lengths.' ) if not est_intervals . shape [ 0 ] == est_pitches . shape [ 0 ] : raise ValueError ( 'Estimated intervals and pitches have different ' 'lengths.' ) if ref_pitches . size > 0 and np . min ( ref_pitches ) <= 0 : raise ValueError ( "Reference contains at least one non-positive pitch " "value" ) if est_pitches . size > 0 and np . min ( est_pitches ) <= 0 : raise ValueError ( "Estimate contains at least one non-positive pitch " "value" ) | Checks that the input annotations to a metric look like time intervals and a pitch list and throws helpful errors if not . |
20,020 | def validate_intervals ( ref_intervals , est_intervals ) : if ref_intervals . size == 0 : warnings . warn ( "Reference notes are empty." ) if est_intervals . size == 0 : warnings . warn ( "Estimated notes are empty." ) util . validate_intervals ( ref_intervals ) util . validate_intervals ( est_intervals ) | Checks that the input annotations to a metric look like time intervals and throws helpful errors if not . |
20,021 | def match_note_offsets ( ref_intervals , est_intervals , offset_ratio = 0.2 , offset_min_tolerance = 0.05 , strict = False ) : if strict : cmp_func = np . less else : cmp_func = np . less_equal offset_distances = np . abs ( np . subtract . outer ( ref_intervals [ : , 1 ] , est_intervals [ : , 1 ] ) ) offset_distances = np . around ( offset_distances , decimals = N_DECIMALS ) ref_durations = util . intervals_to_durations ( ref_intervals ) offset_tolerances = np . maximum ( offset_ratio * ref_durations , offset_min_tolerance ) offset_hit_matrix = ( cmp_func ( offset_distances , offset_tolerances . reshape ( - 1 , 1 ) ) ) hits = np . where ( offset_hit_matrix ) G = { } for ref_i , est_i in zip ( * hits ) : if est_i not in G : G [ est_i ] = [ ] G [ est_i ] . append ( ref_i ) matching = sorted ( util . _bipartite_match ( G ) . items ( ) ) return matching | Compute a maximum matching between reference and estimated notes only taking note offsets into account . |
20,022 | def match_note_onsets ( ref_intervals , est_intervals , onset_tolerance = 0.05 , strict = False ) : if strict : cmp_func = np . less else : cmp_func = np . less_equal onset_distances = np . abs ( np . subtract . outer ( ref_intervals [ : , 0 ] , est_intervals [ : , 0 ] ) ) onset_distances = np . around ( onset_distances , decimals = N_DECIMALS ) onset_hit_matrix = cmp_func ( onset_distances , onset_tolerance ) hits = np . where ( onset_hit_matrix ) G = { } for ref_i , est_i in zip ( * hits ) : if est_i not in G : G [ est_i ] = [ ] G [ est_i ] . append ( ref_i ) matching = sorted ( util . _bipartite_match ( G ) . items ( ) ) return matching | Compute a maximum matching between reference and estimated notes only taking note onsets into account . |
20,023 | def validate_voicing ( ref_voicing , est_voicing ) : if ref_voicing . size == 0 : warnings . warn ( "Reference voicing array is empty." ) if est_voicing . size == 0 : warnings . warn ( "Estimated voicing array is empty." ) if ref_voicing . sum ( ) == 0 : warnings . warn ( "Reference melody has no voiced frames." ) if est_voicing . sum ( ) == 0 : warnings . warn ( "Estimated melody has no voiced frames." ) if ref_voicing . shape [ 0 ] != est_voicing . shape [ 0 ] : raise ValueError ( 'Reference and estimated voicing arrays should ' 'be the same length.' ) for voicing in [ ref_voicing , est_voicing ] : if np . logical_and ( voicing != 0 , voicing != 1 ) . any ( ) : raise ValueError ( 'Voicing arrays must be boolean.' ) | Checks that voicing inputs to a metric are in the correct format . |
20,024 | def hz2cents ( freq_hz , base_frequency = 10.0 ) : freq_cent = np . zeros ( freq_hz . shape [ 0 ] ) freq_nonz_ind = np . flatnonzero ( freq_hz ) normalized_frequency = np . abs ( freq_hz [ freq_nonz_ind ] ) / base_frequency freq_cent [ freq_nonz_ind ] = 1200 * np . log2 ( normalized_frequency ) return freq_cent | Convert an array of frequency values in Hz to cents . 0 values are left in place . |
20,025 | def constant_hop_timebase ( hop , end_time ) : end_time = np . round ( end_time , 10 ) times = np . linspace ( 0 , hop * int ( np . floor ( end_time / hop ) ) , int ( np . floor ( end_time / hop ) ) + 1 ) times = np . round ( times , 10 ) return times | Generates a time series from 0 to end_time with times spaced hop apart |
20,026 | def detection ( reference_intervals , estimated_intervals , window = 0.5 , beta = 1.0 , trim = False ) : validate_boundary ( reference_intervals , estimated_intervals , trim ) reference_boundaries = util . intervals_to_boundaries ( reference_intervals ) estimated_boundaries = util . intervals_to_boundaries ( estimated_intervals ) if trim : reference_boundaries = reference_boundaries [ 1 : - 1 ] estimated_boundaries = estimated_boundaries [ 1 : - 1 ] if len ( reference_boundaries ) == 0 or len ( estimated_boundaries ) == 0 : return 0.0 , 0.0 , 0.0 matching = util . match_events ( reference_boundaries , estimated_boundaries , window ) precision = float ( len ( matching ) ) / len ( estimated_boundaries ) recall = float ( len ( matching ) ) / len ( reference_boundaries ) f_measure = util . f_measure ( precision , recall , beta = beta ) return precision , recall , f_measure | Boundary detection hit - rate . |
20,027 | def deviation ( reference_intervals , estimated_intervals , trim = False ) : validate_boundary ( reference_intervals , estimated_intervals , trim ) reference_boundaries = util . intervals_to_boundaries ( reference_intervals ) estimated_boundaries = util . intervals_to_boundaries ( estimated_intervals ) if trim : reference_boundaries = reference_boundaries [ 1 : - 1 ] estimated_boundaries = estimated_boundaries [ 1 : - 1 ] if len ( reference_boundaries ) == 0 or len ( estimated_boundaries ) == 0 : return np . nan , np . nan dist = np . abs ( np . subtract . outer ( reference_boundaries , estimated_boundaries ) ) estimated_to_reference = np . median ( dist . min ( axis = 0 ) ) reference_to_estimated = np . median ( dist . min ( axis = 1 ) ) return reference_to_estimated , estimated_to_reference | Compute the median deviations between reference and estimated boundary times . |
20,028 | def pairwise ( reference_intervals , reference_labels , estimated_intervals , estimated_labels , frame_size = 0.1 , beta = 1.0 ) : validate_structure ( reference_intervals , reference_labels , estimated_intervals , estimated_labels ) if reference_intervals . size == 0 or estimated_intervals . size == 0 : return 0. , 0. , 0. y_ref = util . intervals_to_samples ( reference_intervals , reference_labels , sample_size = frame_size ) [ - 1 ] y_ref = util . index_labels ( y_ref ) [ 0 ] y_est = util . intervals_to_samples ( estimated_intervals , estimated_labels , sample_size = frame_size ) [ - 1 ] y_est = util . index_labels ( y_est ) [ 0 ] agree_ref = np . equal . outer ( y_ref , y_ref ) n_agree_ref = ( agree_ref . sum ( ) - len ( y_ref ) ) / 2.0 agree_est = np . equal . outer ( y_est , y_est ) n_agree_est = ( agree_est . sum ( ) - len ( y_est ) ) / 2.0 matches = np . logical_and ( agree_ref , agree_est ) n_matches = ( matches . sum ( ) - len ( y_ref ) ) / 2.0 precision = n_matches / n_agree_est recall = n_matches / n_agree_ref f_measure = util . f_measure ( precision , recall , beta = beta ) return precision , recall , f_measure | Frame - clustering segmentation evaluation by pair - wise agreement . |
20,029 | def _contingency_matrix ( reference_indices , estimated_indices ) : ref_classes , ref_class_idx = np . unique ( reference_indices , return_inverse = True ) est_classes , est_class_idx = np . unique ( estimated_indices , return_inverse = True ) n_ref_classes = ref_classes . shape [ 0 ] n_est_classes = est_classes . shape [ 0 ] return scipy . sparse . coo_matrix ( ( np . ones ( ref_class_idx . shape [ 0 ] ) , ( ref_class_idx , est_class_idx ) ) , shape = ( n_ref_classes , n_est_classes ) , dtype = np . int ) . toarray ( ) | Computes the contingency matrix of a true labeling vs an estimated one . |
20,030 | def _adjusted_rand_index ( reference_indices , estimated_indices ) : n_samples = len ( reference_indices ) ref_classes = np . unique ( reference_indices ) est_classes = np . unique ( estimated_indices ) if ( ref_classes . shape [ 0 ] == est_classes . shape [ 0 ] == 1 or ref_classes . shape [ 0 ] == est_classes . shape [ 0 ] == 0 or ( ref_classes . shape [ 0 ] == est_classes . shape [ 0 ] == len ( reference_indices ) ) ) : return 1.0 contingency = _contingency_matrix ( reference_indices , estimated_indices ) sum_comb_c = sum ( scipy . special . comb ( n_c , 2 , exact = 1 ) for n_c in contingency . sum ( axis = 1 ) ) sum_comb_k = sum ( scipy . special . comb ( n_k , 2 , exact = 1 ) for n_k in contingency . sum ( axis = 0 ) ) sum_comb = sum ( ( scipy . special . comb ( n_ij , 2 , exact = 1 ) for n_ij in contingency . flatten ( ) ) ) prod_comb = ( sum_comb_c * sum_comb_k ) / float ( scipy . special . comb ( n_samples , 2 ) ) mean_comb = ( sum_comb_k + sum_comb_c ) / 2. return ( sum_comb - prod_comb ) / ( mean_comb - prod_comb ) | Compute the Rand index adjusted for change . |
20,031 | def _mutual_info_score ( reference_indices , estimated_indices , contingency = None ) : if contingency is None : contingency = _contingency_matrix ( reference_indices , estimated_indices ) . astype ( float ) contingency_sum = np . sum ( contingency ) pi = np . sum ( contingency , axis = 1 ) pj = np . sum ( contingency , axis = 0 ) outer = np . outer ( pi , pj ) nnz = contingency != 0.0 contingency_nm = contingency [ nnz ] log_contingency_nm = np . log ( contingency_nm ) contingency_nm /= contingency_sum log_outer = - np . log ( outer [ nnz ] ) + np . log ( pi . sum ( ) ) + np . log ( pj . sum ( ) ) mi = ( contingency_nm * ( log_contingency_nm - np . log ( contingency_sum ) ) + contingency_nm * log_outer ) return mi . sum ( ) | Compute the mutual information between two sequence labelings . |
20,032 | def _entropy ( labels ) : if len ( labels ) == 0 : return 1.0 label_idx = np . unique ( labels , return_inverse = True ) [ 1 ] pi = np . bincount ( label_idx ) . astype ( np . float ) pi = pi [ pi > 0 ] pi_sum = np . sum ( pi ) return - np . sum ( ( pi / pi_sum ) * ( np . log ( pi ) - np . log ( pi_sum ) ) ) | Calculates the entropy for a labeling . |
20,033 | def validate_tempi ( tempi , reference = True ) : if tempi . size != 2 : raise ValueError ( 'tempi must have exactly two values' ) if not np . all ( np . isfinite ( tempi ) ) or np . any ( tempi < 0 ) : raise ValueError ( 'tempi={} must be non-negative numbers' . format ( tempi ) ) if reference and np . all ( tempi == 0 ) : raise ValueError ( 'reference tempi={} must have one' ' value greater than zero' . format ( tempi ) ) | Checks that there are two non - negative tempi . For a reference value at least one tempo has to be greater than zero . |
20,034 | def validate ( reference_tempi , reference_weight , estimated_tempi ) : validate_tempi ( reference_tempi , reference = True ) validate_tempi ( estimated_tempi , reference = False ) if reference_weight < 0 or reference_weight > 1 : raise ValueError ( 'Reference weight must lie in range [0, 1]' ) | Checks that the input annotations to a metric look like valid tempo annotations . |
20,035 | def detection ( reference_tempi , reference_weight , estimated_tempi , tol = 0.08 ) : validate ( reference_tempi , reference_weight , estimated_tempi ) if tol < 0 or tol > 1 : raise ValueError ( 'invalid tolerance {}: must lie in the range ' '[0, 1]' . format ( tol ) ) if tol == 0. : warnings . warn ( 'A tolerance of 0.0 may not ' 'lead to the results you expect.' ) hits = [ False , False ] for i , ref_t in enumerate ( reference_tempi ) : if ref_t > 0 : f_ref_t = float ( ref_t ) relative_error = np . min ( np . abs ( ref_t - estimated_tempi ) / f_ref_t ) hits [ i ] = relative_error <= tol p_score = reference_weight * hits [ 0 ] + ( 1.0 - reference_weight ) * hits [ 1 ] one_correct = bool ( np . max ( hits ) ) both_correct = bool ( np . min ( hits ) ) return p_score , one_correct , both_correct | Compute the tempo detection accuracy metric . |
20,036 | def validate ( ref_time , ref_freqs , est_time , est_freqs ) : util . validate_events ( ref_time , max_time = MAX_TIME ) util . validate_events ( est_time , max_time = MAX_TIME ) if ref_time . size == 0 : warnings . warn ( "Reference times are empty." ) if ref_time . ndim != 1 : raise ValueError ( "Reference times have invalid dimension" ) if len ( ref_freqs ) == 0 : warnings . warn ( "Reference frequencies are empty." ) if est_time . size == 0 : warnings . warn ( "Estimated times are empty." ) if est_time . ndim != 1 : raise ValueError ( "Estimated times have invalid dimension" ) if len ( est_freqs ) == 0 : warnings . warn ( "Estimated frequencies are empty." ) if ref_time . size != len ( ref_freqs ) : raise ValueError ( 'Reference times and frequencies have unequal ' 'lengths.' ) if est_time . size != len ( est_freqs ) : raise ValueError ( 'Estimate times and frequencies have unequal ' 'lengths.' ) for freq in ref_freqs : util . validate_frequencies ( freq , max_freq = MAX_FREQ , min_freq = MIN_FREQ , allow_negatives = False ) for freq in est_freqs : util . validate_frequencies ( freq , max_freq = MAX_FREQ , min_freq = MIN_FREQ , allow_negatives = False ) | Checks that the time and frequency inputs are well - formed . |
20,037 | def resample_multipitch ( times , frequencies , target_times ) : if target_times . size == 0 : return [ ] if times . size == 0 : return [ np . array ( [ ] ) ] * len ( target_times ) n_times = len ( frequencies ) frequency_index = np . arange ( 0 , n_times ) new_frequency_index = scipy . interpolate . interp1d ( times , frequency_index , kind = 'nearest' , bounds_error = False , assume_sorted = True , fill_value = n_times ) ( target_times ) freq_vals = frequencies + [ np . array ( [ ] ) ] frequencies_resampled = [ freq_vals [ i ] for i in new_frequency_index . astype ( int ) ] return frequencies_resampled | Resamples multipitch time series to a new timescale . Values in target_times outside the range of times return no pitch estimate . |
20,038 | def compute_num_true_positives ( ref_freqs , est_freqs , window = 0.5 , chroma = False ) : n_frames = len ( ref_freqs ) true_positives = np . zeros ( ( n_frames , ) ) for i , ( ref_frame , est_frame ) in enumerate ( zip ( ref_freqs , est_freqs ) ) : if chroma : matching = util . match_events ( ref_frame , est_frame , window , distance = util . _outer_distance_mod_n ) else : matching = util . match_events ( ref_frame , est_frame , window ) true_positives [ i ] = len ( matching ) return true_positives | Compute the number of true positives in an estimate given a reference . A frequency is correct if it is within a quartertone of the correct frequency . |
20,039 | def compute_accuracy ( true_positives , n_ref , n_est ) : true_positive_sum = float ( true_positives . sum ( ) ) n_est_sum = n_est . sum ( ) if n_est_sum > 0 : precision = true_positive_sum / n_est . sum ( ) else : warnings . warn ( "Estimate frequencies are all empty." ) precision = 0.0 n_ref_sum = n_ref . sum ( ) if n_ref_sum > 0 : recall = true_positive_sum / n_ref . sum ( ) else : warnings . warn ( "Reference frequencies are all empty." ) recall = 0.0 acc_denom = ( n_est + n_ref - true_positives ) . sum ( ) if acc_denom > 0 : acc = true_positive_sum / acc_denom else : acc = 0.0 return precision , recall , acc | Compute accuracy metrics . |
20,040 | def compute_err_score ( true_positives , n_ref , n_est ) : n_ref_sum = float ( n_ref . sum ( ) ) if n_ref_sum == 0 : warnings . warn ( "Reference frequencies are all empty." ) return 0. , 0. , 0. , 0. e_sub = ( np . min ( [ n_ref , n_est ] , axis = 0 ) - true_positives ) . sum ( ) / n_ref_sum e_miss_numerator = n_ref - n_est e_miss_numerator [ e_miss_numerator < 0 ] = 0 e_miss = e_miss_numerator . sum ( ) / n_ref_sum e_fa_numerator = n_est - n_ref e_fa_numerator [ e_fa_numerator < 0 ] = 0 e_fa = e_fa_numerator . sum ( ) / n_ref_sum e_tot = ( np . max ( [ n_ref , n_est ] , axis = 0 ) - true_positives ) . sum ( ) / n_ref_sum return e_sub , e_miss , e_fa , e_tot | Compute error score metrics . |
20,041 | def _hierarchy_bounds ( intervals_hier ) : boundaries = list ( itertools . chain ( * list ( itertools . chain ( * intervals_hier ) ) ) ) return min ( boundaries ) , max ( boundaries ) | Compute the covered time range of a hierarchical segmentation . |
20,042 | def _align_intervals ( int_hier , lab_hier , t_min = 0.0 , t_max = None ) : return [ list ( _ ) for _ in zip ( * [ util . adjust_intervals ( np . asarray ( ival ) , labels = lab , t_min = t_min , t_max = t_max ) for ival , lab in zip ( int_hier , lab_hier ) ] ) ] | Align a hierarchical annotation to span a fixed start and end time . |
20,043 | def _compare_frame_rankings ( ref , est , transitive = False ) : idx = np . argsort ( ref ) ref_sorted = ref [ idx ] est_sorted = est [ idx ] levels , positions , counts = np . unique ( ref_sorted , return_index = True , return_counts = True ) positions = list ( positions ) positions . append ( len ( ref_sorted ) ) index = collections . defaultdict ( lambda : slice ( 0 ) ) ref_map = collections . defaultdict ( lambda : 0 ) for level , cnt , start , end in zip ( levels , counts , positions [ : - 1 ] , positions [ 1 : ] ) : index [ level ] = slice ( start , end ) ref_map [ level ] = cnt if transitive : level_pairs = itertools . combinations ( levels , 2 ) else : level_pairs = [ ( i , i + 1 ) for i in levels ] level_pairs , lcounter = itertools . tee ( level_pairs ) normalizer = float ( sum ( [ ref_map [ i ] * ref_map [ j ] for ( i , j ) in lcounter ] ) ) if normalizer == 0 : return 0 , 0.0 inversions = 0 for level_1 , level_2 in level_pairs : inversions += _count_inversions ( est_sorted [ index [ level_1 ] ] , est_sorted [ index [ level_2 ] ] ) return inversions , float ( normalizer ) | Compute the number of ranking disagreements in two lists . |
20,044 | def validate_hier_intervals ( intervals_hier ) : label_top = util . generate_labels ( intervals_hier [ 0 ] ) boundaries = set ( util . intervals_to_boundaries ( intervals_hier [ 0 ] ) ) for level , intervals in enumerate ( intervals_hier [ 1 : ] , 1 ) : label_current = util . generate_labels ( intervals ) validate_structure ( intervals_hier [ 0 ] , label_top , intervals , label_current ) new_bounds = set ( util . intervals_to_boundaries ( intervals ) ) if boundaries - new_bounds : warnings . warn ( 'Segment hierarchy is inconsistent ' 'at level {:d}' . format ( level ) ) boundaries |= new_bounds | Validate a hierarchical segment annotation . |
20,045 | def evaluate ( ref_intervals_hier , ref_labels_hier , est_intervals_hier , est_labels_hier , ** kwargs ) : _ , t_end = _hierarchy_bounds ( ref_intervals_hier ) ref_intervals_hier , ref_labels_hier = _align_intervals ( ref_intervals_hier , ref_labels_hier , t_min = 0.0 , t_max = None ) est_intervals_hier , est_labels_hier = _align_intervals ( est_intervals_hier , est_labels_hier , t_min = 0.0 , t_max = t_end ) scores = collections . OrderedDict ( ) kwargs [ 'transitive' ] = False ( scores [ 'T-Precision reduced' ] , scores [ 'T-Recall reduced' ] , scores [ 'T-Measure reduced' ] ) = util . filter_kwargs ( tmeasure , ref_intervals_hier , est_intervals_hier , ** kwargs ) kwargs [ 'transitive' ] = True ( scores [ 'T-Precision full' ] , scores [ 'T-Recall full' ] , scores [ 'T-Measure full' ] ) = util . filter_kwargs ( tmeasure , ref_intervals_hier , est_intervals_hier , ** kwargs ) ( scores [ 'L-Precision' ] , scores [ 'L-Recall' ] , scores [ 'L-Measure' ] ) = util . filter_kwargs ( lmeasure , ref_intervals_hier , ref_labels_hier , est_intervals_hier , est_labels_hier , ** kwargs ) return scores | Compute all hierarchical structure metrics for the given reference and estimated annotations . |
20,046 | def __expand_limits ( ax , limits , which = 'x' ) : if which == 'x' : getter , setter = ax . get_xlim , ax . set_xlim elif which == 'y' : getter , setter = ax . get_ylim , ax . set_ylim else : raise ValueError ( 'invalid axis: {}' . format ( which ) ) old_lims = getter ( ) new_lims = list ( limits ) if np . isfinite ( old_lims [ 0 ] ) : new_lims [ 0 ] = min ( old_lims [ 0 ] , limits [ 0 ] ) if np . isfinite ( old_lims [ 1 ] ) : new_lims [ 1 ] = max ( old_lims [ 1 ] , limits [ 1 ] ) setter ( new_lims ) | Helper function to expand axis limits |
20,047 | def __get_axes ( ax = None , fig = None ) : new_axes = False if ax is not None : return ax , new_axes if fig is None : import matplotlib . pyplot as plt fig = plt . gcf ( ) if not fig . get_axes ( ) : new_axes = True return fig . gca ( ) , new_axes | Get or construct the target axes object for a new plot . |
20,048 | def segments ( intervals , labels , base = None , height = None , text = False , text_kw = None , ax = None , ** kwargs ) : if text_kw is None : text_kw = dict ( ) text_kw . setdefault ( 'va' , 'top' ) text_kw . setdefault ( 'clip_on' , True ) text_kw . setdefault ( 'bbox' , dict ( boxstyle = 'round' , facecolor = 'white' ) ) intervals = np . atleast_2d ( intervals ) seg_def_style = dict ( linewidth = 1 ) ax , new_axes = __get_axes ( ax = ax ) if new_axes : ax . set_ylim ( [ 0 , 1 ] ) if base is None : base = ax . get_ylim ( ) [ 0 ] if height is None : height = ax . get_ylim ( ) [ 1 ] cycler = ax . _get_patches_for_fill . prop_cycler seg_map = dict ( ) for lab in labels : if lab in seg_map : continue style = next ( cycler ) seg_map [ lab ] = seg_def_style . copy ( ) seg_map [ lab ] . update ( style ) seg_map [ lab ] [ 'facecolor' ] = seg_map [ lab ] . pop ( 'color' ) seg_map [ lab ] . update ( kwargs ) seg_map [ lab ] [ 'label' ] = lab for ival , lab in zip ( intervals , labels ) : rect = Rectangle ( ( ival [ 0 ] , base ) , ival [ 1 ] - ival [ 0 ] , height , ** seg_map [ lab ] ) ax . add_patch ( rect ) seg_map [ lab ] . pop ( 'label' , None ) if text : ann = ax . annotate ( lab , xy = ( ival [ 0 ] , height ) , xycoords = 'data' , xytext = ( 8 , - 10 ) , textcoords = 'offset points' , ** text_kw ) ann . set_clip_path ( rect ) if new_axes : ax . set_yticks ( [ ] ) if intervals . size : __expand_limits ( ax , [ intervals . min ( ) , intervals . max ( ) ] , which = 'x' ) return ax | Plot a segmentation as a set of disjoint rectangles . |
20,049 | def labeled_intervals ( intervals , labels , label_set = None , base = None , height = None , extend_labels = True , ax = None , tick = True , ** kwargs ) : ax , _ = __get_axes ( ax = ax ) intervals = np . atleast_2d ( intervals ) if label_set is None : label_set = [ _ . get_text ( ) for _ in ax . get_yticklabels ( ) ] if not any ( label_set ) : label_set = [ ] else : label_set = list ( label_set ) if extend_labels : ticks = label_set + sorted ( set ( labels ) - set ( label_set ) ) elif label_set : ticks = label_set else : ticks = sorted ( set ( labels ) ) style = dict ( linewidth = 1 ) style . update ( next ( ax . _get_patches_for_fill . prop_cycler ) ) style [ 'facecolor' ] = style . pop ( 'color' ) style . update ( kwargs ) if base is None : base = np . arange ( len ( ticks ) ) if height is None : height = 1 if np . isscalar ( height ) : height = height * np . ones_like ( base ) seg_y = dict ( ) for ybase , yheight , lab in zip ( base , height , ticks ) : seg_y [ lab ] = ( ybase , yheight ) xvals = defaultdict ( list ) for ival , lab in zip ( intervals , labels ) : if lab not in seg_y : continue xvals [ lab ] . append ( ( ival [ 0 ] , ival [ 1 ] - ival [ 0 ] ) ) for lab in seg_y : ax . add_collection ( BrokenBarHCollection ( xvals [ lab ] , seg_y [ lab ] , ** style ) ) style . pop ( 'label' , None ) if label_set != ticks : ax . axhline ( len ( label_set ) , color = 'k' , alpha = 0.5 ) if tick : ax . grid ( True , axis = 'y' ) ax . set_yticks ( [ ] ) ax . set_yticks ( base ) ax . set_yticklabels ( ticks , va = 'bottom' ) ax . yaxis . set_major_formatter ( IntervalFormatter ( base , ticks ) ) if base . size : __expand_limits ( ax , [ base . min ( ) , ( base + height ) . max ( ) ] , which = 'y' ) if intervals . size : __expand_limits ( ax , [ intervals . min ( ) , intervals . max ( ) ] , which = 'x' ) return ax | Plot labeled intervals with each label on its own row . |
20,050 | def hierarchy ( intervals_hier , labels_hier , levels = None , ax = None , ** kwargs ) : if levels is None : levels = list ( range ( len ( intervals_hier ) ) ) ax , _ = __get_axes ( ax = ax ) n_patches = len ( ax . patches ) for ints , labs , key in zip ( intervals_hier [ : : - 1 ] , labels_hier [ : : - 1 ] , levels [ : : - 1 ] ) : labeled_intervals ( ints , labs , label = key , ax = ax , ** kwargs ) ax . patches [ n_patches : ] = ax . patches [ n_patches : ] [ : : - 1 ] return ax | Plot a hierarchical segmentation |
20,051 | def events ( times , labels = None , base = None , height = None , ax = None , text_kw = None , ** kwargs ) : if text_kw is None : text_kw = dict ( ) text_kw . setdefault ( 'va' , 'top' ) text_kw . setdefault ( 'clip_on' , True ) text_kw . setdefault ( 'bbox' , dict ( boxstyle = 'round' , facecolor = 'white' ) ) times = np . asarray ( times ) ax , new_axes = __get_axes ( ax = ax ) if new_axes : if base is None : base = 0 if height is None : height = 1 ax . set_ylim ( [ base , height ] ) else : if base is None : base = ax . get_ylim ( ) [ 0 ] if height is None : height = ax . get_ylim ( ) [ 1 ] cycler = ax . _get_patches_for_fill . prop_cycler style = next ( cycler ) . copy ( ) style . update ( kwargs ) if 'colors' in style : style . pop ( 'color' , None ) lines = ax . vlines ( times , base , base + height , ** style ) if labels : for path , lab in zip ( lines . get_paths ( ) , labels ) : ax . annotate ( lab , xy = ( path . vertices [ 0 ] [ 0 ] , height ) , xycoords = 'data' , xytext = ( 8 , - 10 ) , textcoords = 'offset points' , ** text_kw ) if new_axes : ax . set_yticks ( [ ] ) __expand_limits ( ax , [ base , base + height ] , which = 'y' ) if times . size : __expand_limits ( ax , [ times . min ( ) , times . max ( ) ] , which = 'x' ) return ax | Plot event times as a set of vertical lines |
20,052 | def pitch ( times , frequencies , midi = False , unvoiced = False , ax = None , ** kwargs ) : ax , _ = __get_axes ( ax = ax ) times = np . asarray ( times ) frequencies , voicings = freq_to_voicing ( np . asarray ( frequencies , dtype = np . float ) ) v_changes = 1 + np . flatnonzero ( voicings [ 1 : ] != voicings [ : - 1 ] ) v_changes = np . unique ( np . concatenate ( [ [ 0 ] , v_changes , [ len ( voicings ) ] ] ) ) v_slices , u_slices = [ ] , [ ] for start , end in zip ( v_changes , v_changes [ 1 : ] ) : idx = slice ( start , end ) if voicings [ start ] : v_slices . append ( idx ) elif frequencies [ idx ] . all ( ) : u_slices . append ( idx ) style = dict ( ) style . update ( next ( ax . _get_lines . prop_cycler ) ) style . update ( kwargs ) if midi : idx = frequencies > 0 frequencies [ idx ] = hz_to_midi ( frequencies [ idx ] ) ax . yaxis . set_minor_locator ( MultipleLocator ( 1 ) ) for idx in v_slices : ax . plot ( times [ idx ] , frequencies [ idx ] , ** style ) style . pop ( 'label' , None ) if unvoiced : style [ 'alpha' ] = style . get ( 'alpha' , 1.0 ) * 0.5 for idx in u_slices : ax . plot ( times [ idx ] , frequencies [ idx ] , ** style ) return ax | Visualize pitch contours |
20,053 | def multipitch ( times , frequencies , midi = False , unvoiced = False , ax = None , ** kwargs ) : ax , _ = __get_axes ( ax = ax ) style_voiced = dict ( ) style_voiced . update ( next ( ax . _get_lines . prop_cycler ) ) style_voiced . update ( kwargs ) style_unvoiced = style_voiced . copy ( ) style_unvoiced . pop ( 'label' , None ) style_unvoiced [ 'alpha' ] = style_unvoiced . get ( 'alpha' , 1.0 ) * 0.5 voiced_times = [ ] voiced_freqs = [ ] unvoiced_times = [ ] unvoiced_freqs = [ ] for t , freqs in zip ( times , frequencies ) : if not len ( freqs ) : continue freqs , voicings = freq_to_voicing ( np . asarray ( freqs , dtype = np . float ) ) idx = freqs > 0 freqs = freqs [ idx ] voicings = voicings [ idx ] if midi : freqs = hz_to_midi ( freqs ) n_voiced = sum ( voicings ) voiced_times . extend ( [ t ] * n_voiced ) voiced_freqs . extend ( freqs [ voicings ] ) unvoiced_times . extend ( [ t ] * ( len ( freqs ) - n_voiced ) ) unvoiced_freqs . extend ( freqs [ ~ voicings ] ) ax . scatter ( voiced_times , voiced_freqs , ** style_voiced ) if unvoiced : ax . scatter ( unvoiced_times , unvoiced_freqs , ** style_unvoiced ) if midi : ax . yaxis . set_minor_locator ( MultipleLocator ( 1 ) ) return ax | Visualize multiple f0 measurements |
20,054 | def piano_roll ( intervals , pitches = None , midi = None , ax = None , ** kwargs ) : if midi is None : if pitches is None : raise ValueError ( 'At least one of `midi` or `pitches` ' 'must be provided.' ) midi = hz_to_midi ( pitches ) scale = np . arange ( 128 ) ax = labeled_intervals ( intervals , np . round ( midi ) . astype ( int ) , label_set = scale , tick = False , ax = ax , ** kwargs ) ax . yaxis . set_minor_locator ( MultipleLocator ( 1 ) ) ax . axis ( 'auto' ) return ax | Plot a quantized piano roll as intervals |
20,055 | def separation ( sources , fs = 22050 , labels = None , alpha = 0.75 , ax = None , ** kwargs ) : ax , new_axes = __get_axes ( ax = ax ) sources = np . atleast_2d ( sources ) if labels is None : labels = [ 'Source {:d}' . format ( _ ) for _ in range ( len ( sources ) ) ] kwargs . setdefault ( 'scaling' , 'spectrum' ) cumspec = None specs = [ ] for i , src in enumerate ( sources ) : freqs , times , spec = spectrogram ( src , fs = fs , ** kwargs ) specs . append ( spec ) if cumspec is None : cumspec = spec . copy ( ) else : cumspec += spec ref_max = cumspec . max ( ) ref_min = ref_max * 1e-6 color_conv = ColorConverter ( ) for i , spec in enumerate ( specs ) : color = next ( ax . _get_lines . prop_cycler ) [ 'color' ] color = color_conv . to_rgba ( color , alpha = alpha ) cmap = LinearSegmentedColormap . from_list ( labels [ i ] , [ ( 1.0 , 1.0 , 1.0 , 0.0 ) , color ] ) ax . pcolormesh ( times , freqs , spec , cmap = cmap , norm = LogNorm ( vmin = ref_min , vmax = ref_max ) , shading = 'gouraud' , label = labels [ i ] ) ax . add_patch ( Rectangle ( ( 0 , 0 ) , 0 , 0 , color = color , label = labels [ i ] ) ) if new_axes : ax . axis ( 'tight' ) return ax | Source - separation visualization |
20,056 | def __ticker_midi_note ( x , pos ) : NOTES = [ 'C' , 'C#' , 'D' , 'D#' , 'E' , 'F' , 'F#' , 'G' , 'G#' , 'A' , 'A#' , 'B' ] cents = float ( np . mod ( x , 1.0 ) ) if cents >= 0.5 : cents = cents - 1.0 x = x + 0.5 idx = int ( x % 12 ) octave = int ( x / 12 ) - 1 if cents == 0 : return '{:s}{:2d}' . format ( NOTES [ idx ] , octave ) return '{:s}{:2d}{:+02d}' . format ( NOTES [ idx ] , octave , int ( cents * 100 ) ) | A ticker function for midi notes . |
20,057 | def ticker_notes ( ax = None ) : ax , _ = __get_axes ( ax = ax ) ax . yaxis . set_major_formatter ( FMT_MIDI_NOTE ) for tick in ax . yaxis . get_ticklabels ( ) : tick . set_verticalalignment ( 'baseline' ) | Set the y - axis of the given axes to MIDI notes |
20,058 | def ticker_pitch ( ax = None ) : ax , _ = __get_axes ( ax = ax ) ax . yaxis . set_major_formatter ( FMT_MIDI_HZ ) | Set the y - axis of the given axes to MIDI frequencies |
20,059 | def run ( self , ** import_params ) : if self . file : import_params [ "url" ] = self . file self . id_field = "id" if "connection" in import_params : self . fields . append ( "connector" ) self . update_from_dict ( import_params [ "connection" ] ) self . save ( force_create = True ) else : super ( FileImportJob , self ) . run ( params = import_params , files = self . files ) | Actually creates the import job on the CARTO server |
20,060 | def filter ( self ) : try : response = self . send ( self . get_collection_endpoint ( ) , "get" ) if self . json_collection_attribute is not None : resource_ids = self . client . get_response_data ( response , self . Meta . parse_json ) [ self . json_collection_attribute ] else : resource_ids = self . client . get_response_data ( response , self . Meta . parse_json ) except Exception as e : raise CartoException ( e ) resources = [ ] for resource_id in resource_ids : try : resource = self . resource_class ( self . client ) except ( ValueError , TypeError ) : continue else : setattr ( resource , resource . Meta . id_field , resource_id ) resources . append ( resource ) return resources | Get a filtered list of file imports |
20,061 | def send ( self , relative_path , http_method , ** requests_args ) : try : http_method , requests_args = self . prepare_send ( http_method , ** requests_args ) response = super ( APIKeyAuthClient , self ) . send ( relative_path , http_method , ** requests_args ) except Exception as e : raise CartoException ( e ) if CartoRateLimitException . is_rate_limited ( response ) : raise CartoRateLimitException ( response ) return response | Makes an API - key - authorized request |
20,062 | def is_valid_api_key ( self ) : res = self . send ( 'api/v3/api_keys' , 'get' ) return res . ok and self . api_key in ( ak [ 'token' ] for ak in res . json ( ) [ 'result' ] ) | Checks validity . Right now an API key is considered valid if it can list user API keys and the result contains that API key . This might change in the future . |
20,063 | def send ( self , url , http_method , ** client_args ) : try : client_args = client_args or { } if "params" not in client_args : client_args [ "params" ] = { } client_args [ "params" ] . update ( { "type" : "table" , "exclude_shared" : "true" } ) return super ( DatasetManager , self ) . send ( url , http_method , ** client_args ) except Exception as e : raise CartoException ( e ) | Sends an API request taking into account that datasets are part of the visualization endpoint . |
20,064 | def is_sync_table ( self , archive , interval , ** import_args ) : return ( hasattr ( archive , "startswith" ) and archive . startswith ( "http" ) or "connection" in import_args ) and interval is not None | Checks if this is a request for a sync dataset . |
20,065 | def create ( self , archive , interval = None , ** import_args ) : archive = archive . lower ( ) if hasattr ( archive , "lower" ) else archive if self . is_sync_table ( archive , interval , ** import_args ) : manager = SyncTableJobManager ( self . client ) else : manager = FileImportJobManager ( self . client ) import_job = manager . create ( archive ) if interval is None else manager . create ( archive , interval ) import_job . run ( ** import_args ) if import_job . get_id ( ) is None : raise CartoException ( _ ( "Import API returned corrupt job details \ when creating dataset" ) ) import_job . refresh ( ) count = 0 while import_job . state in ( "enqueued" , "queued" , "pending" , "uploading" , "unpacking" , "importing" , "guessing" ) or ( isinstance ( manager , SyncTableJobManager ) and import_job . state == "created" ) : if count >= MAX_NUMBER_OF_RETRIES : raise CartoException ( _ ( "Maximum number of retries exceeded \ when polling the import API for \ dataset creation" ) ) time . sleep ( INTERVAL_BETWEEN_RETRIES_S ) import_job . refresh ( ) count += 1 if import_job . state == "failure" : raise CartoException ( _ ( "Dataset creation was not successful \ because of failed import (error: {error}" ) . format ( error = json . dumps ( import_job . get_error_text ) ) ) if ( import_job . state != "complete" and import_job . state != "created" and import_job . state != "success" ) or import_job . success is False : raise CartoException ( _ ( "Dataset creation was not successful \ because of unknown import error" ) ) if hasattr ( import_job , "visualization_id" ) and import_job . visualization_id is not None : visualization_id = import_job . visualization_id else : table = TableManager ( self . client ) . get ( import_job . table_id ) visualization_id = table . table_visualization . get_id ( ) if table is not None else None try : return self . get ( visualization_id ) if visualization_id is not None else None except AttributeError : raise CartoException ( _ ( "Dataset creation was not successful \ because of unknown error" ) ) | Creating a table means uploading a file or setting up a sync table |
20,066 | def send ( self , url , http_method , ** client_args ) : try : client_args . setdefault ( 'params' , { } ) client_args [ "params" ] . update ( { "type" : "derived" , "exclude_shared" : "true" } ) return super ( VisualizationManager , self ) . send ( url , http_method , ** client_args ) except Exception as e : raise CartoException ( e ) | Sends API request taking into account that visualizations are only a subset of the resources available at the visualization endpoint |
20,067 | def is_rate_limited ( response ) : if ( response . status_code == codes . too_many_requests and 'Retry-After' in response . headers and int ( response . headers [ 'Retry-After' ] ) >= 0 ) : return True return False | Checks if the response has been rate limited by CARTO APIs |
20,068 | def update_from_dict ( self , attribute_dict ) : if 'template' in attribute_dict : self . update_from_dict ( attribute_dict [ 'template' ] ) setattr ( self , self . Meta . id_field , attribute_dict [ 'template' ] [ 'name' ] ) return try : for k , v in attribute_dict . items ( ) : setattr ( self , k , v ) except Exception : setattr ( self , self . Meta . id_field , attribute_dict ) | Method overriden from the base class |
20,069 | def run ( self , ** client_params ) : try : self . send ( self . get_collection_endpoint ( ) , http_method = "POST" , ** client_params ) except Exception as e : raise CartoException ( e ) | Actually creates the async job on the CARTO server |
20,070 | def send ( self , sql , parse_json = True , do_post = True , format = None , ** request_args ) : try : params = { 'q' : sql } if format : params [ 'format' ] = format if format not in [ 'json' , 'geojson' ] : parse_json = False if request_args is not None : for attr in request_args : params [ attr ] = request_args [ attr ] if len ( sql ) < MAX_GET_QUERY_LEN and do_post is False : resp = self . auth_client . send ( self . api_url , 'GET' , params = params ) else : resp = self . auth_client . send ( self . api_url , 'POST' , data = params ) return self . auth_client . get_response_data ( resp , parse_json ) except CartoRateLimitException as e : raise e except Exception as e : raise CartoException ( e ) | Executes SQL query in a CARTO server |
20,071 | def send ( self , url , http_method , json_body = None , http_header = None ) : try : data = self . client . send ( url , http_method = http_method , headers = http_header , json = json_body ) data_json = self . client . get_response_data ( data ) except CartoRateLimitException as e : raise e except Exception as e : raise CartoException ( e ) return data_json | Executes Batch SQL query in a CARTO server |
20,072 | def create ( self , sql_query ) : header = { 'content-type' : 'application/json' } data = self . send ( self . api_url , http_method = "POST" , json_body = { "query" : sql_query } , http_header = header ) return data | Creates a new batch SQL query . |
20,073 | def create_and_wait_for_completion ( self , sql_query ) : header = { 'content-type' : 'application/json' } data = self . send ( self . api_url , http_method = "POST" , json_body = { "query" : sql_query } , http_header = header ) warnings . warn ( 'Batch SQL job created with job_id: {job_id}' . format ( job_id = data [ 'job_id' ] ) ) while data and data [ 'status' ] in BATCH_JOBS_PENDING_STATUSES : time . sleep ( BATCH_READ_STATUS_AFTER_SECONDS ) data = self . read ( data [ 'job_id' ] ) if data [ 'status' ] in BATCH_JOBS_FAILED_STATUSES : raise CartoException ( _ ( "Batch SQL job failed with result: {data}" . format ( data = data ) ) ) return data | Creates a new batch SQL query and waits for its completion or failure |
20,074 | def read ( self , job_id ) : data = self . send ( self . api_url + job_id , http_method = "GET" ) return data | Reads the information for a specific Batch API request |
20,075 | def update ( self , job_id , sql_query ) : header = { 'content-type' : 'application/json' } data = self . send ( self . api_url + job_id , http_method = "PUT" , json_body = { "query" : sql_query } , http_header = header ) return data | Updates the sql query of a specific job |
20,076 | def cancel ( self , job_id ) : try : confirmation = self . send ( self . api_url + job_id , http_method = "DELETE" ) except CartoException as e : if 'Cannot set status from done to cancelled' in e . args [ 0 ] . args [ 0 ] : return 'done' else : raise e return confirmation [ 'status' ] | Cancels a job |
20,077 | def copyfrom ( self , query , iterable_data , compress = True , compression_level = DEFAULT_COMPRESSION_LEVEL ) : url = self . api_url + '/copyfrom' headers = { 'Content-Type' : 'application/octet-stream' , 'Transfer-Encoding' : 'chunked' } params = { 'api_key' : self . api_key , 'q' : query } if compress : headers [ 'Content-Encoding' ] = 'gzip' _iterable_data = self . _compress_chunks ( iterable_data , compression_level ) else : _iterable_data = iterable_data try : response = self . client . send ( url , http_method = 'POST' , params = params , data = _iterable_data , headers = headers , stream = True ) response_json = self . client . get_response_data ( response ) except CartoRateLimitException as e : raise e except Exception as e : raise CartoException ( e ) return response_json | Gets data from an iterable object into a table |
20,078 | def copyfrom_file_object ( self , query , file_object , compress = True , compression_level = DEFAULT_COMPRESSION_LEVEL ) : chunk_generator = self . _read_in_chunks ( file_object ) return self . copyfrom ( query , chunk_generator , compress , compression_level ) | Gets data from a readable file object into a table |
20,079 | def copyfrom_file_path ( self , query , path , compress = True , compression_level = DEFAULT_COMPRESSION_LEVEL ) : with open ( path , 'rb' ) as f : result = self . copyfrom_file_object ( query , f , compress , compression_level ) return result | Gets data from a readable file into a table |
20,080 | def copyto ( self , query ) : url = self . api_url + '/copyto' params = { 'api_key' : self . api_key , 'q' : query } try : response = self . client . send ( url , http_method = 'GET' , params = params , stream = True ) response . raise_for_status ( ) except CartoRateLimitException as e : raise e except HTTPError as e : if 400 <= response . status_code < 500 : reason = response . json ( ) [ 'error' ] [ 0 ] error_msg = u'%s Client Error: %s' % ( response . status_code , reason ) raise CartoException ( error_msg ) else : raise CartoException ( e ) except Exception as e : raise CartoException ( e ) return response | Gets data from a table into a Response object that can be iterated |
20,081 | def copyto_file_object ( self , query , file_object ) : response = self . copyto ( query ) for block in response . iter_content ( DEFAULT_CHUNK_SIZE ) : file_object . write ( block ) | Gets data from a table into a writable file object |
20,082 | def copyto_file_path ( self , query , path , append = False ) : file_mode = 'wb' if not append else 'ab' with open ( path , file_mode ) as f : self . copyto_file_object ( query , f ) | Gets data from a table into a writable file |
20,083 | def run ( self , ** import_params ) : import_params [ "url" ] = self . url import_params [ "interval" ] = self . interval if "connection" in import_params : self . fields . append ( "connector" ) import_params [ "connection" ] [ "interval" ] = self . interval self . update_from_dict ( import_params [ "connection" ] ) self . save ( force_create = True ) else : return super ( SyncTableJob , self ) . run ( params = import_params ) | Actually creates the job import on the CARTO server |
20,084 | def force_sync ( self ) : try : self . send ( self . get_resource_endpoint ( ) , "put" ) except Exception as e : raise CartoException ( e ) | Forces to sync the SyncTableJob |
20,085 | def set_prob_type ( cls , problem_type , classification_type , eval_type ) : assert problem_type in problem_type_list , 'Need to set Problem Type' if problem_type == 'classification' : assert classification_type in classification_type_list , 'Need to set Classification Type' assert eval_type in eval_type_list , 'Need to set Evaluation Type' cls . problem_type = problem_type cls . classification_type = classification_type cls . eval_type = eval_type if cls . problem_type == 'classification' : print 'Setting Problem:{}, Type:{}, Eval:{}' . format ( cls . problem_type , cls . classification_type , cls . eval_type ) elif cls . problem_type == 'regression' : print 'Setting Problem:{}, Eval:{}' . format ( cls . problem_type , cls . eval_type ) return | Set problem type |
20,086 | def make_multi_cols ( self , num_class , name ) : cols = [ 'c' + str ( i ) + '_' for i in xrange ( num_class ) ] cols = map ( lambda x : x + name , cols ) return cols | make cols for multi - class predictions |
20,087 | def parse ( self , data ) : ( self . len_di , self . xattr_length , self . extent_location , self . parent_directory_num ) = struct . unpack_from ( self . FMT , data [ : 8 ] , 0 ) if self . len_di % 2 != 0 : self . directory_identifier = data [ 8 : - 1 ] else : self . directory_identifier = data [ 8 : ] self . dirrecord = None self . _initialized = True | A method to parse an ISO9660 Path Table Record out of a string . |
20,088 | def _record ( self , ext_loc , parent_dir_num ) : return struct . pack ( self . FMT , self . len_di , self . xattr_length , ext_loc , parent_dir_num ) + self . directory_identifier + b'\x00' * ( self . len_di % 2 ) | An internal method to generate a string representing this Path Table Record . |
20,089 | def record_little_endian ( self ) : if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record not yet initialized' ) return self . _record ( self . extent_location , self . parent_directory_num ) | A method to generate a string representing the little endian version of this Path Table Record . |
20,090 | def record_big_endian ( self ) : if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record not yet initialized' ) return self . _record ( utils . swab_32bit ( self . extent_location ) , utils . swab_16bit ( self . parent_directory_num ) ) | A method to generate a string representing the big endian version of this Path Table Record . |
20,091 | def _new ( self , name , parent_dir_num ) : self . len_di = len ( name ) self . xattr_length = 0 self . parent_directory_num = parent_dir_num self . directory_identifier = name self . _initialized = True | An internal method to create a new Path Table Record . |
20,092 | def new_dir ( self , name ) : if self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record already initialized' ) self . _new ( name , 0 ) | A method to create a new Path Table Record . |
20,093 | def update_extent_location ( self , extent_loc ) : if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record not yet initialized' ) self . extent_location = extent_loc | A method to update the extent location for this Path Table Record . |
20,094 | def update_parent_directory_number ( self , parent_dir_num ) : if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record not yet initialized' ) self . parent_directory_num = parent_dir_num | A method to update the parent directory number for this Path Table Record from the directory record . |
20,095 | def equal_to_be ( self , be_record ) : if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'This Path Table Record is not yet initialized' ) if be_record . len_di != self . len_di or be_record . xattr_length != self . xattr_length or utils . swab_32bit ( be_record . extent_location ) != self . extent_location or utils . swab_16bit ( be_record . parent_directory_num ) != self . parent_directory_num or be_record . directory_identifier != self . directory_identifier : return False return True | A method to compare a little - endian path table record to its big - endian counterpart . This is used to ensure that the ISO is sane . |
20,096 | def copy_data ( data_length , blocksize , infp , outfp ) : use_sendfile = False if have_sendfile : try : x_unused = infp . fileno ( ) y_unused = outfp . fileno ( ) use_sendfile = True except ( AttributeError , io . UnsupportedOperation ) : pass if use_sendfile : in_offset = infp . tell ( ) out_offset = outfp . tell ( ) sendfile ( outfp . fileno ( ) , infp . fileno ( ) , in_offset , data_length ) infp . seek ( in_offset + data_length ) outfp . seek ( out_offset + data_length ) else : left = data_length readsize = blocksize while left > 0 : if left < readsize : readsize = left data = infp . read ( readsize ) data_len = len ( data ) if data_len != readsize : data_len = left outfp . write ( data ) left -= data_len | A utility function to copy data from the input file object to the output file object . This function will use the most efficient copy method available which is often sendfile . |
20,097 | def encode_space_pad ( instr , length , encoding ) : output = instr . decode ( 'utf-8' ) . encode ( encoding ) if len ( output ) > length : raise pycdlibexception . PyCdlibInvalidInput ( 'Input string too long!' ) encoded_space = ' ' . encode ( encoding ) left = length - len ( output ) while left > 0 : output += encoded_space left -= len ( encoded_space ) if left < 0 : output = output [ : left ] return output | A function to pad out an input string with spaces to the length specified . The space is first encoded into the specified encoding then appended to the input string until the length is reached . |
20,098 | def gmtoffset_from_tm ( tm , local ) : gmtime = time . gmtime ( tm ) tmpyear = gmtime . tm_year - local . tm_year tmpyday = gmtime . tm_yday - local . tm_yday tmphour = gmtime . tm_hour - local . tm_hour tmpmin = gmtime . tm_min - local . tm_min if tmpyday < 0 : tmpyday = - 1 else : if tmpyear > 0 : tmpyday = 1 return - ( tmpmin + 60 * ( tmphour + 24 * tmpyday ) ) // 15 | A function to compute the GMT offset from the time in seconds since the epoch and the local time object . |
20,099 | def zero_pad ( fp , data_size , pad_size ) : padbytes = pad_size - ( data_size % pad_size ) if padbytes == pad_size : return fp . seek ( padbytes - 1 , os . SEEK_CUR ) fp . write ( b'\x00' ) | A function to write padding out from data_size up to pad_size efficiently . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.