idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
19,900 | def interpolate_intervals ( intervals , labels , time_points , fill_value = None ) : # Verify that time_points is sorted time_points = np . asarray ( time_points ) if np . any ( time_points [ 1 : ] < time_points [ : - 1 ] ) : raise ValueError ( 'time_points must be in non-decreasing order' ) aligned_labels = [ fill_value ] * len ( time_points ) starts = np . searchsorted ( time_points , intervals [ : , 0 ] , side = 'left' ) ends = np . searchsorted ( time_points , intervals [ : , 1 ] , side = 'right' ) for ( start , end , lab ) in zip ( starts , ends , labels ) : aligned_labels [ start : end ] = [ lab ] * ( end - start ) return aligned_labels | Assign labels to a set of points in time given a set of intervals . | 194 | 16 |
19,901 | def sort_labeled_intervals ( intervals , labels = None ) : idx = np . argsort ( intervals [ : , 0 ] ) intervals_sorted = intervals [ idx ] if labels is None : return intervals_sorted else : return intervals_sorted , [ labels [ _ ] for _ in idx ] | Sort intervals and optionally their corresponding labels according to start time . | 70 | 12 |
19,902 | def f_measure ( precision , recall , beta = 1.0 ) : if precision == 0 and recall == 0 : return 0.0 return ( 1 + beta ** 2 ) * precision * recall / ( ( beta ** 2 ) * precision + recall ) | Compute the f - measure from precision and recall scores . | 54 | 12 |
19,903 | def intervals_to_boundaries ( intervals , q = 5 ) : return np . unique ( np . ravel ( np . round ( intervals , decimals = q ) ) ) | Convert interval times into boundaries . | 39 | 7 |
19,904 | def boundaries_to_intervals ( boundaries ) : if not np . allclose ( boundaries , np . unique ( boundaries ) ) : raise ValueError ( 'Boundary times are not unique or not ascending.' ) intervals = np . asarray ( list ( zip ( boundaries [ : - 1 ] , boundaries [ 1 : ] ) ) ) return intervals | Convert an array of event times into intervals | 72 | 9 |
19,905 | def merge_labeled_intervals ( x_intervals , x_labels , y_intervals , y_labels ) : align_check = [ x_intervals [ 0 , 0 ] == y_intervals [ 0 , 0 ] , x_intervals [ - 1 , 1 ] == y_intervals [ - 1 , 1 ] ] if False in align_check : raise ValueError ( "Time intervals do not align; did you mean to call " "'adjust_intervals()' first?" ) time_boundaries = np . unique ( np . concatenate ( [ x_intervals , y_intervals ] , axis = 0 ) ) output_intervals = np . array ( [ time_boundaries [ : - 1 ] , time_boundaries [ 1 : ] ] ) . T x_labels_out , y_labels_out = [ ] , [ ] x_label_range = np . arange ( len ( x_labels ) ) y_label_range = np . arange ( len ( y_labels ) ) for t0 , _ in output_intervals : x_idx = x_label_range [ ( t0 >= x_intervals [ : , 0 ] ) ] x_labels_out . append ( x_labels [ x_idx [ - 1 ] ] ) y_idx = y_label_range [ ( t0 >= y_intervals [ : , 0 ] ) ] y_labels_out . append ( y_labels [ y_idx [ - 1 ] ] ) return output_intervals , x_labels_out , y_labels_out | r Merge the time intervals of two sequences . | 362 | 9 |
19,906 | def match_events ( ref , est , window , distance = None ) : if distance is not None : # Compute the indices of feasible pairings hits = np . where ( distance ( ref , est ) <= window ) else : hits = _fast_hit_windows ( ref , est , window ) # Construct the graph input G = { } for ref_i , est_i in zip ( * hits ) : if est_i not in G : G [ est_i ] = [ ] G [ est_i ] . append ( ref_i ) # Compute the maximum matching matching = sorted ( _bipartite_match ( G ) . items ( ) ) return matching | Compute a maximum matching between reference and estimated event times subject to a window constraint . | 144 | 17 |
19,907 | def _fast_hit_windows ( ref , est , window ) : ref = np . asarray ( ref ) est = np . asarray ( est ) ref_idx = np . argsort ( ref ) ref_sorted = ref [ ref_idx ] left_idx = np . searchsorted ( ref_sorted , est - window , side = 'left' ) right_idx = np . searchsorted ( ref_sorted , est + window , side = 'right' ) hit_ref , hit_est = [ ] , [ ] for j , ( start , end ) in enumerate ( zip ( left_idx , right_idx ) ) : hit_ref . extend ( ref_idx [ start : end ] ) hit_est . extend ( [ j ] * ( end - start ) ) return hit_ref , hit_est | Fast calculation of windowed hits for time events . | 188 | 10 |
19,908 | def validate_events ( events , max_time = 30000. ) : # Make sure no event times are huge if ( events > max_time ) . any ( ) : raise ValueError ( 'An event at time {} was found which is greater than ' 'the maximum allowable time of max_time = {} (did you' ' supply event times in ' 'seconds?)' . format ( events . max ( ) , max_time ) ) # Make sure event locations are 1-d np ndarrays if events . ndim != 1 : raise ValueError ( 'Event times should be 1-d numpy ndarray, ' 'but shape={}' . format ( events . shape ) ) # Make sure event times are increasing if ( np . diff ( events ) < 0 ) . any ( ) : raise ValueError ( 'Events should be in increasing order.' ) | Checks that a 1 - d event location ndarray is well - formed and raises errors if not . | 184 | 22 |
19,909 | def validate_frequencies ( frequencies , max_freq , min_freq , allow_negatives = False ) : # If flag is true, map frequencies to their absolute value. if allow_negatives : frequencies = np . abs ( frequencies ) # Make sure no frequency values are huge if ( np . abs ( frequencies ) > max_freq ) . any ( ) : raise ValueError ( 'A frequency of {} was found which is greater than ' 'the maximum allowable value of max_freq = {} (did ' 'you supply frequency values in ' 'Hz?)' . format ( frequencies . max ( ) , max_freq ) ) # Make sure no frequency values are tiny if ( np . abs ( frequencies ) < min_freq ) . any ( ) : raise ValueError ( 'A frequency of {} was found which is less than the ' 'minimum allowable value of min_freq = {} (did you ' 'supply frequency values in ' 'Hz?)' . format ( frequencies . min ( ) , min_freq ) ) # Make sure frequency values are 1-d np ndarrays if frequencies . ndim != 1 : raise ValueError ( 'Frequencies should be 1-d numpy ndarray, ' 'but shape={}' . format ( frequencies . shape ) ) | Checks that a 1 - d frequency ndarray is well - formed and raises errors if not . | 279 | 21 |
19,910 | def intervals_to_durations ( intervals ) : validate_intervals ( intervals ) return np . abs ( np . diff ( intervals , axis = - 1 ) ) . flatten ( ) | Converts an array of n intervals to their n durations . | 40 | 13 |
19,911 | def validate ( reference_sources , estimated_sources ) : if reference_sources . shape != estimated_sources . shape : raise ValueError ( 'The shape of estimated sources and the true ' 'sources should match. reference_sources.shape ' '= {}, estimated_sources.shape ' '= {}' . format ( reference_sources . shape , estimated_sources . shape ) ) if reference_sources . ndim > 3 or estimated_sources . ndim > 3 : raise ValueError ( 'The number of dimensions is too high (must be less ' 'than 3). reference_sources.ndim = {}, ' 'estimated_sources.ndim ' '= {}' . format ( reference_sources . ndim , estimated_sources . ndim ) ) if reference_sources . size == 0 : warnings . warn ( "reference_sources is empty, should be of size " "(nsrc, nsample). sdr, sir, sar, and perm will all " "be empty np.ndarrays" ) elif _any_source_silent ( reference_sources ) : raise ValueError ( 'All the reference sources should be non-silent (not ' 'all-zeros), but at least one of the reference ' 'sources is all 0s, which introduces ambiguity to the' ' evaluation. (Otherwise we can add infinitely many ' 'all-zero sources.)' ) if estimated_sources . size == 0 : warnings . warn ( "estimated_sources is empty, should be of size " "(nsrc, nsample). sdr, sir, sar, and perm will all " "be empty np.ndarrays" ) elif _any_source_silent ( estimated_sources ) : raise ValueError ( 'All the estimated sources should be non-silent (not ' 'all-zeros), but at least one of the estimated ' 'sources is all 0s. Since we require each reference ' 'source to be non-silent, having a silent estimated ' 'source will result in an underdetermined system.' ) if ( estimated_sources . shape [ 0 ] > MAX_SOURCES or reference_sources . shape [ 0 ] > MAX_SOURCES ) : raise ValueError ( 'The supplied matrices should be of shape (nsrc,' ' nsampl) but reference_sources.shape[0] = {} and ' 'estimated_sources.shape[0] = {} which is greater ' 'than mir_eval.separation.MAX_SOURCES = {}. To ' 'override this check, set ' 'mir_eval.separation.MAX_SOURCES to a ' 'larger value.' . format ( reference_sources . shape [ 0 ] , estimated_sources . shape [ 0 ] , MAX_SOURCES ) ) | Checks that the input data to a metric are valid and throws helpful errors if not . | 629 | 18 |
19,912 | def _any_source_silent ( sources ) : return np . any ( np . all ( np . sum ( sources , axis = tuple ( range ( 2 , sources . ndim ) ) ) == 0 , axis = 1 ) ) | Returns true if the parameter sources has any silent first dimensions | 50 | 11 |
19,913 | def bss_eval_sources ( reference_sources , estimated_sources , compute_permutation = True ) : # make sure the input is of shape (nsrc, nsampl) if estimated_sources . ndim == 1 : estimated_sources = estimated_sources [ np . newaxis , : ] if reference_sources . ndim == 1 : reference_sources = reference_sources [ np . newaxis , : ] validate ( reference_sources , estimated_sources ) # If empty matrices were supplied, return empty lists (special case) if reference_sources . size == 0 or estimated_sources . size == 0 : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) nsrc = estimated_sources . shape [ 0 ] # does user desire permutations? if compute_permutation : # compute criteria for all possible pair matches sdr = np . empty ( ( nsrc , nsrc ) ) sir = np . empty ( ( nsrc , nsrc ) ) sar = np . empty ( ( nsrc , nsrc ) ) for jest in range ( nsrc ) : for jtrue in range ( nsrc ) : s_true , e_spat , e_interf , e_artif = _bss_decomp_mtifilt ( reference_sources , estimated_sources [ jest ] , jtrue , 512 ) sdr [ jest , jtrue ] , sir [ jest , jtrue ] , sar [ jest , jtrue ] = _bss_source_crit ( s_true , e_spat , e_interf , e_artif ) # select the best ordering perms = list ( itertools . permutations ( list ( range ( nsrc ) ) ) ) mean_sir = np . empty ( len ( perms ) ) dum = np . arange ( nsrc ) for ( i , perm ) in enumerate ( perms ) : mean_sir [ i ] = np . mean ( sir [ perm , dum ] ) popt = perms [ np . argmax ( mean_sir ) ] idx = ( popt , dum ) return ( sdr [ idx ] , sir [ idx ] , sar [ idx ] , np . asarray ( popt ) ) else : # compute criteria for only the simple correspondence # (estimate 1 is estimate corresponding to reference source 1, etc.) sdr = np . empty ( nsrc ) sir = np . empty ( nsrc ) sar = np . empty ( nsrc ) for j in range ( nsrc ) : s_true , e_spat , e_interf , e_artif = _bss_decomp_mtifilt ( reference_sources , estimated_sources [ j ] , j , 512 ) sdr [ j ] , sir [ j ] , sar [ j ] = _bss_source_crit ( s_true , e_spat , e_interf , e_artif ) # return the default permutation for compatibility popt = np . arange ( nsrc ) return ( sdr , sir , sar , popt ) | Ordering and measurement of the separation quality for estimated source signals in terms of filtered true source interference and artifacts . | 704 | 22 |
19,914 | def bss_eval_sources_framewise ( reference_sources , estimated_sources , window = 30 * 44100 , hop = 15 * 44100 , compute_permutation = False ) : # make sure the input is of shape (nsrc, nsampl) if estimated_sources . ndim == 1 : estimated_sources = estimated_sources [ np . newaxis , : ] if reference_sources . ndim == 1 : reference_sources = reference_sources [ np . newaxis , : ] validate ( reference_sources , estimated_sources ) # If empty matrices were supplied, return empty lists (special case) if reference_sources . size == 0 or estimated_sources . size == 0 : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) nsrc = reference_sources . shape [ 0 ] nwin = int ( np . floor ( ( reference_sources . shape [ 1 ] - window + hop ) / hop ) ) # if fewer than 2 windows would be evaluated, return the sources result if nwin < 2 : result = bss_eval_sources ( reference_sources , estimated_sources , compute_permutation ) return [ np . expand_dims ( score , - 1 ) for score in result ] # compute the criteria across all windows sdr = np . empty ( ( nsrc , nwin ) ) sir = np . empty ( ( nsrc , nwin ) ) sar = np . empty ( ( nsrc , nwin ) ) perm = np . empty ( ( nsrc , nwin ) ) # k iterates across all the windows for k in range ( nwin ) : win_slice = slice ( k * hop , k * hop + window ) ref_slice = reference_sources [ : , win_slice ] est_slice = estimated_sources [ : , win_slice ] # check for a silent frame if ( not _any_source_silent ( ref_slice ) and not _any_source_silent ( est_slice ) ) : sdr [ : , k ] , sir [ : , k ] , sar [ : , k ] , perm [ : , k ] = bss_eval_sources ( ref_slice , est_slice , compute_permutation ) else : # if we have a silent frame set results as np.nan sdr [ : , k ] = sir [ : , k ] = sar [ : , k ] = perm [ : , k ] = np . nan return sdr , sir , sar , perm | Framewise computation of bss_eval_sources | 569 | 12 |
19,915 | def bss_eval_images_framewise ( reference_sources , estimated_sources , window = 30 * 44100 , hop = 15 * 44100 , compute_permutation = False ) : # make sure the input has 3 dimensions # assuming input is in shape (nsampl) or (nsrc, nsampl) estimated_sources = np . atleast_3d ( estimated_sources ) reference_sources = np . atleast_3d ( reference_sources ) # we will ensure input doesn't have more than 3 dimensions in validate validate ( reference_sources , estimated_sources ) # If empty matrices were supplied, return empty lists (special case) if reference_sources . size == 0 or estimated_sources . size == 0 : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) nsrc = reference_sources . shape [ 0 ] nwin = int ( np . floor ( ( reference_sources . shape [ 1 ] - window + hop ) / hop ) ) # if fewer than 2 windows would be evaluated, return the images result if nwin < 2 : result = bss_eval_images ( reference_sources , estimated_sources , compute_permutation ) return [ np . expand_dims ( score , - 1 ) for score in result ] # compute the criteria across all windows sdr = np . empty ( ( nsrc , nwin ) ) isr = np . empty ( ( nsrc , nwin ) ) sir = np . empty ( ( nsrc , nwin ) ) sar = np . empty ( ( nsrc , nwin ) ) perm = np . empty ( ( nsrc , nwin ) ) # k iterates across all the windows for k in range ( nwin ) : win_slice = slice ( k * hop , k * hop + window ) ref_slice = reference_sources [ : , win_slice , : ] est_slice = estimated_sources [ : , win_slice , : ] # check for a silent frame if ( not _any_source_silent ( ref_slice ) and not _any_source_silent ( est_slice ) ) : sdr [ : , k ] , isr [ : , k ] , sir [ : , k ] , sar [ : , k ] , perm [ : , k ] = bss_eval_images ( ref_slice , est_slice , compute_permutation ) else : # if we have a silent frame set results as np.nan sdr [ : , k ] = sir [ : , k ] = sar [ : , k ] = perm [ : , k ] = np . nan return sdr , isr , sir , sar , perm | Framewise computation of bss_eval_images | 604 | 11 |
19,916 | def _project ( reference_sources , estimated_source , flen ) : nsrc = reference_sources . shape [ 0 ] nsampl = reference_sources . shape [ 1 ] # computing coefficients of least squares problem via FFT ## # zero padding and FFT of input data reference_sources = np . hstack ( ( reference_sources , np . zeros ( ( nsrc , flen - 1 ) ) ) ) estimated_source = np . hstack ( ( estimated_source , np . zeros ( flen - 1 ) ) ) n_fft = int ( 2 ** np . ceil ( np . log2 ( nsampl + flen - 1. ) ) ) sf = scipy . fftpack . fft ( reference_sources , n = n_fft , axis = 1 ) sef = scipy . fftpack . fft ( estimated_source , n = n_fft ) # inner products between delayed versions of reference_sources G = np . zeros ( ( nsrc * flen , nsrc * flen ) ) for i in range ( nsrc ) : for j in range ( nsrc ) : ssf = sf [ i ] * np . conj ( sf [ j ] ) ssf = np . real ( scipy . fftpack . ifft ( ssf ) ) ss = toeplitz ( np . hstack ( ( ssf [ 0 ] , ssf [ - 1 : - flen : - 1 ] ) ) , r = ssf [ : flen ] ) G [ i * flen : ( i + 1 ) * flen , j * flen : ( j + 1 ) * flen ] = ss G [ j * flen : ( j + 1 ) * flen , i * flen : ( i + 1 ) * flen ] = ss . T # inner products between estimated_source and delayed versions of # reference_sources D = np . zeros ( nsrc * flen ) for i in range ( nsrc ) : ssef = sf [ i ] * np . conj ( sef ) ssef = np . real ( scipy . fftpack . ifft ( ssef ) ) D [ i * flen : ( i + 1 ) * flen ] = np . hstack ( ( ssef [ 0 ] , ssef [ - 1 : - flen : - 1 ] ) ) # Computing projection # Distortion filters try : C = np . linalg . solve ( G , D ) . reshape ( flen , nsrc , order = 'F' ) except np . linalg . linalg . LinAlgError : C = np . linalg . lstsq ( G , D ) [ 0 ] . reshape ( flen , nsrc , order = 'F' ) # Filtering sproj = np . zeros ( nsampl + flen - 1 ) for i in range ( nsrc ) : sproj += fftconvolve ( C [ : , i ] , reference_sources [ i ] ) [ : nsampl + flen - 1 ] return sproj | Least - squares projection of estimated source on the subspace spanned by delayed versions of reference sources with delays between 0 and flen - 1 | 692 | 29 |
19,917 | def _bss_image_crit ( s_true , e_spat , e_interf , e_artif ) : # energy ratios sdr = _safe_db ( np . sum ( s_true ** 2 ) , np . sum ( ( e_spat + e_interf + e_artif ) ** 2 ) ) isr = _safe_db ( np . sum ( s_true ** 2 ) , np . sum ( e_spat ** 2 ) ) sir = _safe_db ( np . sum ( ( s_true + e_spat ) ** 2 ) , np . sum ( e_interf ** 2 ) ) sar = _safe_db ( np . sum ( ( s_true + e_spat + e_interf ) ** 2 ) , np . sum ( e_artif ** 2 ) ) return ( sdr , isr , sir , sar ) | Measurement of the separation quality for a given image in terms of filtered true source spatial error interference and artifacts . | 197 | 22 |
19,918 | def _safe_db ( num , den ) : if den == 0 : return np . Inf return 10 * np . log10 ( num / den ) | Properly handle the potential + Inf db SIR instead of raising a RuntimeWarning . Only denominator is checked because the numerator can never be 0 . | 32 | 32 |
19,919 | def evaluate ( reference_sources , estimated_sources , * * kwargs ) : # Compute all the metrics scores = collections . OrderedDict ( ) sdr , isr , sir , sar , perm = util . filter_kwargs ( bss_eval_images , reference_sources , estimated_sources , * * kwargs ) scores [ 'Images - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Images - Image to Spatial' ] = isr . tolist ( ) scores [ 'Images - Source to Interference' ] = sir . tolist ( ) scores [ 'Images - Source to Artifact' ] = sar . tolist ( ) scores [ 'Images - Source permutation' ] = perm . tolist ( ) sdr , isr , sir , sar , perm = util . filter_kwargs ( bss_eval_images_framewise , reference_sources , estimated_sources , * * kwargs ) scores [ 'Images Frames - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Images Frames - Image to Spatial' ] = isr . tolist ( ) scores [ 'Images Frames - Source to Interference' ] = sir . tolist ( ) scores [ 'Images Frames - Source to Artifact' ] = sar . tolist ( ) scores [ 'Images Frames - Source permutation' ] = perm . tolist ( ) # Verify we can compute sources on this input if reference_sources . ndim < 3 and estimated_sources . ndim < 3 : sdr , sir , sar , perm = util . filter_kwargs ( bss_eval_sources_framewise , reference_sources , estimated_sources , * * kwargs ) scores [ 'Sources Frames - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Sources Frames - Source to Interference' ] = sir . tolist ( ) scores [ 'Sources Frames - Source to Artifact' ] = sar . tolist ( ) scores [ 'Sources Frames - Source permutation' ] = perm . tolist ( ) sdr , sir , sar , perm = util . filter_kwargs ( bss_eval_sources , reference_sources , estimated_sources , * * kwargs ) scores [ 'Sources - Source to Distortion' ] = sdr . tolist ( ) scores [ 'Sources - Source to Interference' ] = sir . tolist ( ) scores [ 'Sources - Source to Artifact' ] = sar . tolist ( ) scores [ 'Sources - Source permutation' ] = perm . tolist ( ) return scores | Compute all metrics for the given reference and estimated signals . | 571 | 12 |
19,920 | def clicks ( times , fs , click = None , length = None ) : # Create default click signal if click is None : # 1 kHz tone, 100ms click = np . sin ( 2 * np . pi * np . arange ( fs * .1 ) * 1000 / ( 1. * fs ) ) # Exponential decay click *= np . exp ( - np . arange ( fs * .1 ) / ( fs * .01 ) ) # Set default length if length is None : length = int ( times . max ( ) * fs + click . shape [ 0 ] + 1 ) # Pre-allocate click signal click_signal = np . zeros ( length ) # Place clicks for time in times : # Compute the boundaries of the click start = int ( time * fs ) end = start + click . shape [ 0 ] # Make sure we don't try to output past the end of the signal if start >= length : break if end >= length : click_signal [ start : ] = click [ : length - start ] break # Normally, just add a click here click_signal [ start : end ] = click return click_signal | Returns a signal with the signal click placed at each specified time | 245 | 12 |
19,921 | def time_frequency ( gram , frequencies , times , fs , function = np . sin , length = None , n_dec = 1 ) : # Default value for length if times . ndim == 1 : # Convert to intervals times = util . boundaries_to_intervals ( times ) if length is None : length = int ( times [ - 1 , 1 ] * fs ) times , _ = util . adjust_intervals ( times , t_max = length ) # Truncate times so that the shape matches gram n_times = gram . shape [ 1 ] times = times [ : n_times ] def _fast_synthesize ( frequency ) : """A faster way to synthesize a signal. Generate one cycle, and simulate arbitrary repetitions using array indexing tricks. """ # hack so that we can ensure an integer number of periods and samples # rounds frequency to 1st decimal, s.t. 10 * frequency will be an int frequency = np . round ( frequency , n_dec ) # Generate 10*frequency periods at this frequency # Equivalent to n_samples = int(n_periods * fs / frequency) # n_periods = 10*frequency is the smallest integer that guarantees # that n_samples will be an integer, since assuming 10*frequency # is an integer n_samples = int ( 10.0 ** n_dec * fs ) short_signal = function ( 2.0 * np . pi * np . arange ( n_samples ) * frequency / fs ) # Calculate the number of loops we need to fill the duration n_repeats = int ( np . ceil ( length / float ( short_signal . shape [ 0 ] ) ) ) # Simulate tiling the short buffer by using stride tricks long_signal = as_strided ( short_signal , shape = ( n_repeats , len ( short_signal ) ) , strides = ( 0 , short_signal . itemsize ) ) # Use a flatiter to simulate a long 1D buffer return long_signal . flat def _const_interpolator ( value ) : """Return a function that returns `value` no matter the input. """ def __interpolator ( x ) : return value return __interpolator # Threshold the tfgram to remove non-positive values gram = np . maximum ( gram , 0 ) # Pre-allocate output signal output = np . zeros ( length ) time_centers = np . mean ( times , axis = 1 ) * float ( fs ) for n , frequency in enumerate ( frequencies ) : # Get a waveform of length samples at this frequency wave = _fast_synthesize ( frequency ) # Interpolate the values in gram over the time grid if len ( time_centers ) > 1 : gram_interpolator = interp1d ( time_centers , gram [ n , : ] , kind = 'linear' , bounds_error = False , fill_value = 0.0 ) # If only one time point, create constant interpolator else : gram_interpolator = _const_interpolator ( gram [ n , 0 ] ) # Scale each time interval by the piano roll magnitude for m , ( start , end ) in enumerate ( ( times * fs ) . astype ( int ) ) : # Clip the timings to make sure the indices are valid start , end = max ( start , 0 ) , min ( end , length ) # add to waveform output [ start : end ] += ( wave [ start : end ] * gram_interpolator ( np . arange ( start , end ) ) ) # Normalize, but only if there's non-zero values norm = np . abs ( output ) . max ( ) if norm >= np . finfo ( output . dtype ) . tiny : output /= norm return output | Reverse synthesis of a time - frequency representation of a signal | 823 | 13 |
19,922 | def pitch_contour ( times , frequencies , fs , amplitudes = None , function = np . sin , length = None , kind = 'linear' ) : fs = float ( fs ) if length is None : length = int ( times . max ( ) * fs ) # Squash the negative frequencies. # wave(0) = 0, so clipping here will un-voice the corresponding instants frequencies = np . maximum ( frequencies , 0.0 ) # Build a frequency interpolator f_interp = interp1d ( times * fs , 2 * np . pi * frequencies / fs , kind = kind , fill_value = 0.0 , bounds_error = False , copy = False ) # Estimate frequency at sample points f_est = f_interp ( np . arange ( length ) ) if amplitudes is None : a_est = np . ones ( ( length , ) ) else : # build an amplitude interpolator a_interp = interp1d ( times * fs , amplitudes , kind = kind , fill_value = 0.0 , bounds_error = False , copy = False ) a_est = a_interp ( np . arange ( length ) ) # Sonify the waveform return a_est * function ( np . cumsum ( f_est ) ) | Sonify a pitch contour . | 278 | 7 |
19,923 | def chords ( chord_labels , intervals , fs , * * kwargs ) : util . validate_intervals ( intervals ) # Convert from labels to chroma roots , interval_bitmaps , _ = chord . encode_many ( chord_labels ) chromagram = np . array ( [ np . roll ( interval_bitmap , root ) for ( interval_bitmap , root ) in zip ( interval_bitmaps , roots ) ] ) . T return chroma ( chromagram , intervals , fs , * * kwargs ) | Synthesizes chord labels | 114 | 6 |
19,924 | def validate ( reference_onsets , estimated_onsets ) : # If reference or estimated onsets are empty, warn because metric will be 0 if reference_onsets . size == 0 : warnings . warn ( "Reference onsets are empty." ) if estimated_onsets . size == 0 : warnings . warn ( "Estimated onsets are empty." ) for onsets in [ reference_onsets , estimated_onsets ] : util . validate_events ( onsets , MAX_TIME ) | Checks that the input annotations to a metric look like valid onset time arrays and throws helpful errors if not . | 104 | 22 |
19,925 | def f_measure ( reference_onsets , estimated_onsets , window = .05 ) : validate ( reference_onsets , estimated_onsets ) # If either list is empty, return 0s if reference_onsets . size == 0 or estimated_onsets . size == 0 : return 0. , 0. , 0. # Compute the best-case matching between reference and estimated onset # locations matching = util . match_events ( reference_onsets , estimated_onsets , window ) precision = float ( len ( matching ) ) / len ( estimated_onsets ) recall = float ( len ( matching ) ) / len ( reference_onsets ) # Compute F-measure and return all statistics return util . f_measure ( precision , recall ) , precision , recall | Compute the F - measure of correct vs incorrectly predicted onsets . Corectness is determined over a small window . | 168 | 24 |
19,926 | def validate ( ref_intervals , ref_pitches , est_intervals , est_pitches ) : # Validate intervals validate_intervals ( ref_intervals , est_intervals ) # Make sure intervals and pitches match in length if not ref_intervals . shape [ 0 ] == ref_pitches . shape [ 0 ] : raise ValueError ( 'Reference intervals and pitches have different ' 'lengths.' ) if not est_intervals . shape [ 0 ] == est_pitches . shape [ 0 ] : raise ValueError ( 'Estimated intervals and pitches have different ' 'lengths.' ) # Make sure all pitch values are positive if ref_pitches . size > 0 and np . min ( ref_pitches ) <= 0 : raise ValueError ( "Reference contains at least one non-positive pitch " "value" ) if est_pitches . size > 0 and np . min ( est_pitches ) <= 0 : raise ValueError ( "Estimate contains at least one non-positive pitch " "value" ) | Checks that the input annotations to a metric look like time intervals and a pitch list and throws helpful errors if not . | 222 | 24 |
19,927 | def validate_intervals ( ref_intervals , est_intervals ) : # If reference or estimated notes are empty, warn if ref_intervals . size == 0 : warnings . warn ( "Reference notes are empty." ) if est_intervals . size == 0 : warnings . warn ( "Estimated notes are empty." ) # Validate intervals util . validate_intervals ( ref_intervals ) util . validate_intervals ( est_intervals ) | Checks that the input annotations to a metric look like time intervals and throws helpful errors if not . | 98 | 20 |
19,928 | def match_note_offsets ( ref_intervals , est_intervals , offset_ratio = 0.2 , offset_min_tolerance = 0.05 , strict = False ) : # set the comparison function if strict : cmp_func = np . less else : cmp_func = np . less_equal # check for offset matches offset_distances = np . abs ( np . subtract . outer ( ref_intervals [ : , 1 ] , est_intervals [ : , 1 ] ) ) # Round distances to a target precision to avoid the situation where # if the distance is exactly 50ms (and strict=False) it erroneously # doesn't match the notes because of precision issues. offset_distances = np . around ( offset_distances , decimals = N_DECIMALS ) ref_durations = util . intervals_to_durations ( ref_intervals ) offset_tolerances = np . maximum ( offset_ratio * ref_durations , offset_min_tolerance ) offset_hit_matrix = ( cmp_func ( offset_distances , offset_tolerances . reshape ( - 1 , 1 ) ) ) # check for hits hits = np . where ( offset_hit_matrix ) # Construct the graph input # Flip graph so that 'matching' is a list of tuples where the first item # in each tuple is the reference note index, and the second item is the # estimated note index. G = { } for ref_i , est_i in zip ( * hits ) : if est_i not in G : G [ est_i ] = [ ] G [ est_i ] . append ( ref_i ) # Compute the maximum matching matching = sorted ( util . _bipartite_match ( G ) . items ( ) ) return matching | Compute a maximum matching between reference and estimated notes only taking note offsets into account . | 396 | 17 |
19,929 | def match_note_onsets ( ref_intervals , est_intervals , onset_tolerance = 0.05 , strict = False ) : # set the comparison function if strict : cmp_func = np . less else : cmp_func = np . less_equal # check for onset matches onset_distances = np . abs ( np . subtract . outer ( ref_intervals [ : , 0 ] , est_intervals [ : , 0 ] ) ) # Round distances to a target precision to avoid the situation where # if the distance is exactly 50ms (and strict=False) it erroneously # doesn't match the notes because of precision issues. onset_distances = np . around ( onset_distances , decimals = N_DECIMALS ) onset_hit_matrix = cmp_func ( onset_distances , onset_tolerance ) # find hits hits = np . where ( onset_hit_matrix ) # Construct the graph input # Flip graph so that 'matching' is a list of tuples where the first item # in each tuple is the reference note index, and the second item is the # estimated note index. G = { } for ref_i , est_i in zip ( * hits ) : if est_i not in G : G [ est_i ] = [ ] G [ est_i ] . append ( ref_i ) # Compute the maximum matching matching = sorted ( util . _bipartite_match ( G ) . items ( ) ) return matching | Compute a maximum matching between reference and estimated notes only taking note onsets into account . | 326 | 18 |
19,930 | def validate_voicing ( ref_voicing , est_voicing ) : if ref_voicing . size == 0 : warnings . warn ( "Reference voicing array is empty." ) if est_voicing . size == 0 : warnings . warn ( "Estimated voicing array is empty." ) if ref_voicing . sum ( ) == 0 : warnings . warn ( "Reference melody has no voiced frames." ) if est_voicing . sum ( ) == 0 : warnings . warn ( "Estimated melody has no voiced frames." ) # Make sure they're the same length if ref_voicing . shape [ 0 ] != est_voicing . shape [ 0 ] : raise ValueError ( 'Reference and estimated voicing arrays should ' 'be the same length.' ) for voicing in [ ref_voicing , est_voicing ] : # Make sure they're (effectively) boolean if np . logical_and ( voicing != 0 , voicing != 1 ) . any ( ) : raise ValueError ( 'Voicing arrays must be boolean.' ) | Checks that voicing inputs to a metric are in the correct format . | 219 | 14 |
19,931 | def hz2cents ( freq_hz , base_frequency = 10.0 ) : freq_cent = np . zeros ( freq_hz . shape [ 0 ] ) freq_nonz_ind = np . flatnonzero ( freq_hz ) normalized_frequency = np . abs ( freq_hz [ freq_nonz_ind ] ) / base_frequency freq_cent [ freq_nonz_ind ] = 1200 * np . log2 ( normalized_frequency ) return freq_cent | Convert an array of frequency values in Hz to cents . 0 values are left in place . | 116 | 19 |
19,932 | def constant_hop_timebase ( hop , end_time ) : # Compute new timebase. Rounding/linspace is to avoid float problems. end_time = np . round ( end_time , 10 ) times = np . linspace ( 0 , hop * int ( np . floor ( end_time / hop ) ) , int ( np . floor ( end_time / hop ) ) + 1 ) times = np . round ( times , 10 ) return times | Generates a time series from 0 to end_time with times spaced hop apart | 101 | 16 |
19,933 | def detection ( reference_intervals , estimated_intervals , window = 0.5 , beta = 1.0 , trim = False ) : validate_boundary ( reference_intervals , estimated_intervals , trim ) # Convert intervals to boundaries reference_boundaries = util . intervals_to_boundaries ( reference_intervals ) estimated_boundaries = util . intervals_to_boundaries ( estimated_intervals ) # Suppress the first and last intervals if trim : reference_boundaries = reference_boundaries [ 1 : - 1 ] estimated_boundaries = estimated_boundaries [ 1 : - 1 ] # If we have no boundaries, we get no score. if len ( reference_boundaries ) == 0 or len ( estimated_boundaries ) == 0 : return 0.0 , 0.0 , 0.0 matching = util . match_events ( reference_boundaries , estimated_boundaries , window ) precision = float ( len ( matching ) ) / len ( estimated_boundaries ) recall = float ( len ( matching ) ) / len ( reference_boundaries ) f_measure = util . f_measure ( precision , recall , beta = beta ) return precision , recall , f_measure | Boundary detection hit - rate . | 259 | 7 |
19,934 | def deviation ( reference_intervals , estimated_intervals , trim = False ) : validate_boundary ( reference_intervals , estimated_intervals , trim ) # Convert intervals to boundaries reference_boundaries = util . intervals_to_boundaries ( reference_intervals ) estimated_boundaries = util . intervals_to_boundaries ( estimated_intervals ) # Suppress the first and last intervals if trim : reference_boundaries = reference_boundaries [ 1 : - 1 ] estimated_boundaries = estimated_boundaries [ 1 : - 1 ] # If we have no boundaries, we get no score. if len ( reference_boundaries ) == 0 or len ( estimated_boundaries ) == 0 : return np . nan , np . nan dist = np . abs ( np . subtract . outer ( reference_boundaries , estimated_boundaries ) ) estimated_to_reference = np . median ( dist . min ( axis = 0 ) ) reference_to_estimated = np . median ( dist . min ( axis = 1 ) ) return reference_to_estimated , estimated_to_reference | Compute the median deviations between reference and estimated boundary times . | 235 | 12 |
19,935 | def pairwise ( reference_intervals , reference_labels , estimated_intervals , estimated_labels , frame_size = 0.1 , beta = 1.0 ) : validate_structure ( reference_intervals , reference_labels , estimated_intervals , estimated_labels ) # Check for empty annotations. Don't need to check labels because # validate_structure makes sure they're the same size as intervals if reference_intervals . size == 0 or estimated_intervals . size == 0 : return 0. , 0. , 0. # Generate the cluster labels y_ref = util . intervals_to_samples ( reference_intervals , reference_labels , sample_size = frame_size ) [ - 1 ] y_ref = util . index_labels ( y_ref ) [ 0 ] # Map to index space y_est = util . intervals_to_samples ( estimated_intervals , estimated_labels , sample_size = frame_size ) [ - 1 ] y_est = util . index_labels ( y_est ) [ 0 ] # Build the reference label agreement matrix agree_ref = np . equal . outer ( y_ref , y_ref ) # Count the unique pairs n_agree_ref = ( agree_ref . sum ( ) - len ( y_ref ) ) / 2.0 # Repeat for estimate agree_est = np . equal . outer ( y_est , y_est ) n_agree_est = ( agree_est . sum ( ) - len ( y_est ) ) / 2.0 # Find where they agree matches = np . logical_and ( agree_ref , agree_est ) n_matches = ( matches . sum ( ) - len ( y_ref ) ) / 2.0 precision = n_matches / n_agree_est recall = n_matches / n_agree_ref f_measure = util . f_measure ( precision , recall , beta = beta ) return precision , recall , f_measure | Frame - clustering segmentation evaluation by pair - wise agreement . | 437 | 13 |
19,936 | def _contingency_matrix ( reference_indices , estimated_indices ) : ref_classes , ref_class_idx = np . unique ( reference_indices , return_inverse = True ) est_classes , est_class_idx = np . unique ( estimated_indices , return_inverse = True ) n_ref_classes = ref_classes . shape [ 0 ] n_est_classes = est_classes . shape [ 0 ] # Using coo_matrix is faster than histogram2d return scipy . sparse . coo_matrix ( ( np . ones ( ref_class_idx . shape [ 0 ] ) , ( ref_class_idx , est_class_idx ) ) , shape = ( n_ref_classes , n_est_classes ) , dtype = np . int ) . toarray ( ) | Computes the contingency matrix of a true labeling vs an estimated one . | 192 | 14 |
19,937 | def _adjusted_rand_index ( reference_indices , estimated_indices ) : n_samples = len ( reference_indices ) ref_classes = np . unique ( reference_indices ) est_classes = np . unique ( estimated_indices ) # Special limit cases: no clustering since the data is not split; # or trivial clustering where each document is assigned a unique cluster. # These are perfect matches hence return 1.0. if ( ref_classes . shape [ 0 ] == est_classes . shape [ 0 ] == 1 or ref_classes . shape [ 0 ] == est_classes . shape [ 0 ] == 0 or ( ref_classes . shape [ 0 ] == est_classes . shape [ 0 ] == len ( reference_indices ) ) ) : return 1.0 contingency = _contingency_matrix ( reference_indices , estimated_indices ) # Compute the ARI using the contingency data sum_comb_c = sum ( scipy . special . comb ( n_c , 2 , exact = 1 ) for n_c in contingency . sum ( axis = 1 ) ) sum_comb_k = sum ( scipy . special . comb ( n_k , 2 , exact = 1 ) for n_k in contingency . sum ( axis = 0 ) ) sum_comb = sum ( ( scipy . special . comb ( n_ij , 2 , exact = 1 ) for n_ij in contingency . flatten ( ) ) ) prod_comb = ( sum_comb_c * sum_comb_k ) / float ( scipy . special . comb ( n_samples , 2 ) ) mean_comb = ( sum_comb_k + sum_comb_c ) / 2. return ( sum_comb - prod_comb ) / ( mean_comb - prod_comb ) | Compute the Rand index adjusted for change . | 396 | 9 |
19,938 | def _mutual_info_score ( reference_indices , estimated_indices , contingency = None ) : if contingency is None : contingency = _contingency_matrix ( reference_indices , estimated_indices ) . astype ( float ) contingency_sum = np . sum ( contingency ) pi = np . sum ( contingency , axis = 1 ) pj = np . sum ( contingency , axis = 0 ) outer = np . outer ( pi , pj ) nnz = contingency != 0.0 # normalized contingency contingency_nm = contingency [ nnz ] log_contingency_nm = np . log ( contingency_nm ) contingency_nm /= contingency_sum # log(a / b) should be calculated as log(a) - log(b) for # possible loss of precision log_outer = - np . log ( outer [ nnz ] ) + np . log ( pi . sum ( ) ) + np . log ( pj . sum ( ) ) mi = ( contingency_nm * ( log_contingency_nm - np . log ( contingency_sum ) ) + contingency_nm * log_outer ) return mi . sum ( ) | Compute the mutual information between two sequence labelings . | 249 | 11 |
19,939 | def _entropy ( labels ) : if len ( labels ) == 0 : return 1.0 label_idx = np . unique ( labels , return_inverse = True ) [ 1 ] pi = np . bincount ( label_idx ) . astype ( np . float ) pi = pi [ pi > 0 ] pi_sum = np . sum ( pi ) # log(a / b) should be calculated as log(a) - log(b) for # possible loss of precision return - np . sum ( ( pi / pi_sum ) * ( np . log ( pi ) - np . log ( pi_sum ) ) ) | Calculates the entropy for a labeling . | 138 | 9 |
19,940 | def validate_tempi ( tempi , reference = True ) : if tempi . size != 2 : raise ValueError ( 'tempi must have exactly two values' ) if not np . all ( np . isfinite ( tempi ) ) or np . any ( tempi < 0 ) : raise ValueError ( 'tempi={} must be non-negative numbers' . format ( tempi ) ) if reference and np . all ( tempi == 0 ) : raise ValueError ( 'reference tempi={} must have one' ' value greater than zero' . format ( tempi ) ) | Checks that there are two non - negative tempi . For a reference value at least one tempo has to be greater than zero . | 126 | 27 |
19,941 | def validate ( reference_tempi , reference_weight , estimated_tempi ) : validate_tempi ( reference_tempi , reference = True ) validate_tempi ( estimated_tempi , reference = False ) if reference_weight < 0 or reference_weight > 1 : raise ValueError ( 'Reference weight must lie in range [0, 1]' ) | Checks that the input annotations to a metric look like valid tempo annotations . | 76 | 15 |
19,942 | def detection ( reference_tempi , reference_weight , estimated_tempi , tol = 0.08 ) : validate ( reference_tempi , reference_weight , estimated_tempi ) if tol < 0 or tol > 1 : raise ValueError ( 'invalid tolerance {}: must lie in the range ' '[0, 1]' . format ( tol ) ) if tol == 0. : warnings . warn ( 'A tolerance of 0.0 may not ' 'lead to the results you expect.' ) hits = [ False , False ] for i , ref_t in enumerate ( reference_tempi ) : if ref_t > 0 : # Compute the relative error for this reference tempo f_ref_t = float ( ref_t ) relative_error = np . min ( np . abs ( ref_t - estimated_tempi ) / f_ref_t ) # Count the hits hits [ i ] = relative_error <= tol p_score = reference_weight * hits [ 0 ] + ( 1.0 - reference_weight ) * hits [ 1 ] one_correct = bool ( np . max ( hits ) ) both_correct = bool ( np . min ( hits ) ) return p_score , one_correct , both_correct | Compute the tempo detection accuracy metric . | 270 | 8 |
19,943 | def validate ( ref_time , ref_freqs , est_time , est_freqs ) : util . validate_events ( ref_time , max_time = MAX_TIME ) util . validate_events ( est_time , max_time = MAX_TIME ) if ref_time . size == 0 : warnings . warn ( "Reference times are empty." ) if ref_time . ndim != 1 : raise ValueError ( "Reference times have invalid dimension" ) if len ( ref_freqs ) == 0 : warnings . warn ( "Reference frequencies are empty." ) if est_time . size == 0 : warnings . warn ( "Estimated times are empty." ) if est_time . ndim != 1 : raise ValueError ( "Estimated times have invalid dimension" ) if len ( est_freqs ) == 0 : warnings . warn ( "Estimated frequencies are empty." ) if ref_time . size != len ( ref_freqs ) : raise ValueError ( 'Reference times and frequencies have unequal ' 'lengths.' ) if est_time . size != len ( est_freqs ) : raise ValueError ( 'Estimate times and frequencies have unequal ' 'lengths.' ) for freq in ref_freqs : util . validate_frequencies ( freq , max_freq = MAX_FREQ , min_freq = MIN_FREQ , allow_negatives = False ) for freq in est_freqs : util . validate_frequencies ( freq , max_freq = MAX_FREQ , min_freq = MIN_FREQ , allow_negatives = False ) | Checks that the time and frequency inputs are well - formed . | 352 | 13 |
19,944 | def resample_multipitch ( times , frequencies , target_times ) : if target_times . size == 0 : return [ ] if times . size == 0 : return [ np . array ( [ ] ) ] * len ( target_times ) n_times = len ( frequencies ) # scipy's interpolate doesn't handle ragged arrays. Instead, we interpolate # the frequency index and then map back to the frequency values. # This only works because we're using a nearest neighbor interpolator! frequency_index = np . arange ( 0 , n_times ) # times are already ordered so assume_sorted=True for efficiency # since we're interpolating the index, fill_value is set to the first index # that is out of range. We handle this in the next line. new_frequency_index = scipy . interpolate . interp1d ( times , frequency_index , kind = 'nearest' , bounds_error = False , assume_sorted = True , fill_value = n_times ) ( target_times ) # create array of frequencies plus additional empty element at the end for # target time stamps that are out of the interpolation range freq_vals = frequencies + [ np . array ( [ ] ) ] # map interpolated indices back to frequency values frequencies_resampled = [ freq_vals [ i ] for i in new_frequency_index . astype ( int ) ] return frequencies_resampled | Resamples multipitch time series to a new timescale . Values in target_times outside the range of times return no pitch estimate . | 311 | 27 |
19,945 | def compute_num_true_positives ( ref_freqs , est_freqs , window = 0.5 , chroma = False ) : n_frames = len ( ref_freqs ) true_positives = np . zeros ( ( n_frames , ) ) for i , ( ref_frame , est_frame ) in enumerate ( zip ( ref_freqs , est_freqs ) ) : if chroma : # match chroma-wrapped frequency events matching = util . match_events ( ref_frame , est_frame , window , distance = util . _outer_distance_mod_n ) else : # match frequency events within tolerance window in semitones matching = util . match_events ( ref_frame , est_frame , window ) true_positives [ i ] = len ( matching ) return true_positives | Compute the number of true positives in an estimate given a reference . A frequency is correct if it is within a quartertone of the correct frequency . | 182 | 30 |
19,946 | def compute_accuracy ( true_positives , n_ref , n_est ) : true_positive_sum = float ( true_positives . sum ( ) ) n_est_sum = n_est . sum ( ) if n_est_sum > 0 : precision = true_positive_sum / n_est . sum ( ) else : warnings . warn ( "Estimate frequencies are all empty." ) precision = 0.0 n_ref_sum = n_ref . sum ( ) if n_ref_sum > 0 : recall = true_positive_sum / n_ref . sum ( ) else : warnings . warn ( "Reference frequencies are all empty." ) recall = 0.0 acc_denom = ( n_est + n_ref - true_positives ) . sum ( ) if acc_denom > 0 : acc = true_positive_sum / acc_denom else : acc = 0.0 return precision , recall , acc | Compute accuracy metrics . | 206 | 5 |
19,947 | def compute_err_score ( true_positives , n_ref , n_est ) : n_ref_sum = float ( n_ref . sum ( ) ) if n_ref_sum == 0 : warnings . warn ( "Reference frequencies are all empty." ) return 0. , 0. , 0. , 0. # Substitution error e_sub = ( np . min ( [ n_ref , n_est ] , axis = 0 ) - true_positives ) . sum ( ) / n_ref_sum # compute the max of (n_ref - n_est) and 0 e_miss_numerator = n_ref - n_est e_miss_numerator [ e_miss_numerator < 0 ] = 0 # Miss error e_miss = e_miss_numerator . sum ( ) / n_ref_sum # compute the max of (n_est - n_ref) and 0 e_fa_numerator = n_est - n_ref e_fa_numerator [ e_fa_numerator < 0 ] = 0 # False alarm error e_fa = e_fa_numerator . sum ( ) / n_ref_sum # total error e_tot = ( np . max ( [ n_ref , n_est ] , axis = 0 ) - true_positives ) . sum ( ) / n_ref_sum return e_sub , e_miss , e_fa , e_tot | Compute error score metrics . | 324 | 6 |
19,948 | def _hierarchy_bounds ( intervals_hier ) : boundaries = list ( itertools . chain ( * list ( itertools . chain ( * intervals_hier ) ) ) ) return min ( boundaries ) , max ( boundaries ) | Compute the covered time range of a hierarchical segmentation . | 53 | 12 |
19,949 | def _align_intervals ( int_hier , lab_hier , t_min = 0.0 , t_max = None ) : return [ list ( _ ) for _ in zip ( * [ util . adjust_intervals ( np . asarray ( ival ) , labels = lab , t_min = t_min , t_max = t_max ) for ival , lab in zip ( int_hier , lab_hier ) ] ) ] | Align a hierarchical annotation to span a fixed start and end time . | 102 | 14 |
19,950 | def _compare_frame_rankings ( ref , est , transitive = False ) : idx = np . argsort ( ref ) ref_sorted = ref [ idx ] est_sorted = est [ idx ] # Find the break-points in ref_sorted levels , positions , counts = np . unique ( ref_sorted , return_index = True , return_counts = True ) positions = list ( positions ) positions . append ( len ( ref_sorted ) ) index = collections . defaultdict ( lambda : slice ( 0 ) ) ref_map = collections . defaultdict ( lambda : 0 ) for level , cnt , start , end in zip ( levels , counts , positions [ : - 1 ] , positions [ 1 : ] ) : index [ level ] = slice ( start , end ) ref_map [ level ] = cnt # Now that we have values sorted, apply the inversion-counter to # pairs of reference values if transitive : level_pairs = itertools . combinations ( levels , 2 ) else : level_pairs = [ ( i , i + 1 ) for i in levels ] level_pairs , lcounter = itertools . tee ( level_pairs ) normalizer = float ( sum ( [ ref_map [ i ] * ref_map [ j ] for ( i , j ) in lcounter ] ) ) if normalizer == 0 : return 0 , 0.0 inversions = 0 for level_1 , level_2 in level_pairs : inversions += _count_inversions ( est_sorted [ index [ level_1 ] ] , est_sorted [ index [ level_2 ] ] ) return inversions , float ( normalizer ) | Compute the number of ranking disagreements in two lists . | 369 | 11 |
19,951 | def validate_hier_intervals ( intervals_hier ) : # Synthesize a label array for the top layer. label_top = util . generate_labels ( intervals_hier [ 0 ] ) boundaries = set ( util . intervals_to_boundaries ( intervals_hier [ 0 ] ) ) for level , intervals in enumerate ( intervals_hier [ 1 : ] , 1 ) : # Make sure this level is consistent with the root label_current = util . generate_labels ( intervals ) validate_structure ( intervals_hier [ 0 ] , label_top , intervals , label_current ) # Make sure all previous boundaries are accounted for new_bounds = set ( util . intervals_to_boundaries ( intervals ) ) if boundaries - new_bounds : warnings . warn ( 'Segment hierarchy is inconsistent ' 'at level {:d}' . format ( level ) ) boundaries |= new_bounds | Validate a hierarchical segment annotation . | 201 | 7 |
19,952 | def evaluate ( ref_intervals_hier , ref_labels_hier , est_intervals_hier , est_labels_hier , * * kwargs ) : # First, find the maximum length of the reference _ , t_end = _hierarchy_bounds ( ref_intervals_hier ) # Pre-process the intervals to match the range of the reference, # and start at 0 ref_intervals_hier , ref_labels_hier = _align_intervals ( ref_intervals_hier , ref_labels_hier , t_min = 0.0 , t_max = None ) est_intervals_hier , est_labels_hier = _align_intervals ( est_intervals_hier , est_labels_hier , t_min = 0.0 , t_max = t_end ) scores = collections . OrderedDict ( ) # Force the transitivity setting kwargs [ 'transitive' ] = False ( scores [ 'T-Precision reduced' ] , scores [ 'T-Recall reduced' ] , scores [ 'T-Measure reduced' ] ) = util . filter_kwargs ( tmeasure , ref_intervals_hier , est_intervals_hier , * * kwargs ) kwargs [ 'transitive' ] = True ( scores [ 'T-Precision full' ] , scores [ 'T-Recall full' ] , scores [ 'T-Measure full' ] ) = util . filter_kwargs ( tmeasure , ref_intervals_hier , est_intervals_hier , * * kwargs ) ( scores [ 'L-Precision' ] , scores [ 'L-Recall' ] , scores [ 'L-Measure' ] ) = util . filter_kwargs ( lmeasure , ref_intervals_hier , ref_labels_hier , est_intervals_hier , est_labels_hier , * * kwargs ) return scores | Compute all hierarchical structure metrics for the given reference and estimated annotations . | 453 | 14 |
19,953 | def __expand_limits ( ax , limits , which = 'x' ) : if which == 'x' : getter , setter = ax . get_xlim , ax . set_xlim elif which == 'y' : getter , setter = ax . get_ylim , ax . set_ylim else : raise ValueError ( 'invalid axis: {}' . format ( which ) ) old_lims = getter ( ) new_lims = list ( limits ) # infinite limits occur on new axis objects with no data if np . isfinite ( old_lims [ 0 ] ) : new_lims [ 0 ] = min ( old_lims [ 0 ] , limits [ 0 ] ) if np . isfinite ( old_lims [ 1 ] ) : new_lims [ 1 ] = max ( old_lims [ 1 ] , limits [ 1 ] ) setter ( new_lims ) | Helper function to expand axis limits | 205 | 6 |
19,954 | def __get_axes ( ax = None , fig = None ) : new_axes = False if ax is not None : return ax , new_axes if fig is None : import matplotlib . pyplot as plt fig = plt . gcf ( ) if not fig . get_axes ( ) : new_axes = True return fig . gca ( ) , new_axes | Get or construct the target axes object for a new plot . | 88 | 12 |
19,955 | def segments ( intervals , labels , base = None , height = None , text = False , text_kw = None , ax = None , * * kwargs ) : if text_kw is None : text_kw = dict ( ) text_kw . setdefault ( 'va' , 'top' ) text_kw . setdefault ( 'clip_on' , True ) text_kw . setdefault ( 'bbox' , dict ( boxstyle = 'round' , facecolor = 'white' ) ) # Make sure we have a numpy array intervals = np . atleast_2d ( intervals ) seg_def_style = dict ( linewidth = 1 ) ax , new_axes = __get_axes ( ax = ax ) if new_axes : ax . set_ylim ( [ 0 , 1 ] ) # Infer height if base is None : base = ax . get_ylim ( ) [ 0 ] if height is None : height = ax . get_ylim ( ) [ 1 ] cycler = ax . _get_patches_for_fill . prop_cycler seg_map = dict ( ) for lab in labels : if lab in seg_map : continue style = next ( cycler ) seg_map [ lab ] = seg_def_style . copy ( ) seg_map [ lab ] . update ( style ) # Swap color -> facecolor here so we preserve edgecolor on rects seg_map [ lab ] [ 'facecolor' ] = seg_map [ lab ] . pop ( 'color' ) seg_map [ lab ] . update ( kwargs ) seg_map [ lab ] [ 'label' ] = lab for ival , lab in zip ( intervals , labels ) : rect = Rectangle ( ( ival [ 0 ] , base ) , ival [ 1 ] - ival [ 0 ] , height , * * seg_map [ lab ] ) ax . add_patch ( rect ) seg_map [ lab ] . pop ( 'label' , None ) if text : ann = ax . annotate ( lab , xy = ( ival [ 0 ] , height ) , xycoords = 'data' , xytext = ( 8 , - 10 ) , textcoords = 'offset points' , * * text_kw ) ann . set_clip_path ( rect ) if new_axes : ax . set_yticks ( [ ] ) # Only expand if we have data if intervals . size : __expand_limits ( ax , [ intervals . min ( ) , intervals . max ( ) ] , which = 'x' ) return ax | Plot a segmentation as a set of disjoint rectangles . | 575 | 14 |
19,956 | def labeled_intervals ( intervals , labels , label_set = None , base = None , height = None , extend_labels = True , ax = None , tick = True , * * kwargs ) : # Get the axes handle ax , _ = __get_axes ( ax = ax ) # Make sure we have a numpy array intervals = np . atleast_2d ( intervals ) if label_set is None : # If we have non-empty pre-existing tick labels, use them label_set = [ _ . get_text ( ) for _ in ax . get_yticklabels ( ) ] # If none of the label strings have content, treat it as empty if not any ( label_set ) : label_set = [ ] else : label_set = list ( label_set ) # Put additional labels at the end, in order if extend_labels : ticks = label_set + sorted ( set ( labels ) - set ( label_set ) ) elif label_set : ticks = label_set else : ticks = sorted ( set ( labels ) ) style = dict ( linewidth = 1 ) style . update ( next ( ax . _get_patches_for_fill . prop_cycler ) ) # Swap color -> facecolor here so we preserve edgecolor on rects style [ 'facecolor' ] = style . pop ( 'color' ) style . update ( kwargs ) if base is None : base = np . arange ( len ( ticks ) ) if height is None : height = 1 if np . isscalar ( height ) : height = height * np . ones_like ( base ) seg_y = dict ( ) for ybase , yheight , lab in zip ( base , height , ticks ) : seg_y [ lab ] = ( ybase , yheight ) xvals = defaultdict ( list ) for ival , lab in zip ( intervals , labels ) : if lab not in seg_y : continue xvals [ lab ] . append ( ( ival [ 0 ] , ival [ 1 ] - ival [ 0 ] ) ) for lab in seg_y : ax . add_collection ( BrokenBarHCollection ( xvals [ lab ] , seg_y [ lab ] , * * style ) ) # Pop the label after the first time we see it, so we only get # one legend entry style . pop ( 'label' , None ) # Draw a line separating the new labels from pre-existing labels if label_set != ticks : ax . axhline ( len ( label_set ) , color = 'k' , alpha = 0.5 ) if tick : ax . grid ( True , axis = 'y' ) ax . set_yticks ( [ ] ) ax . set_yticks ( base ) ax . set_yticklabels ( ticks , va = 'bottom' ) ax . yaxis . set_major_formatter ( IntervalFormatter ( base , ticks ) ) if base . size : __expand_limits ( ax , [ base . min ( ) , ( base + height ) . max ( ) ] , which = 'y' ) if intervals . size : __expand_limits ( ax , [ intervals . min ( ) , intervals . max ( ) ] , which = 'x' ) return ax | Plot labeled intervals with each label on its own row . | 712 | 11 |
19,957 | def hierarchy ( intervals_hier , labels_hier , levels = None , ax = None , * * kwargs ) : # This will break if a segment label exists in multiple levels if levels is None : levels = list ( range ( len ( intervals_hier ) ) ) # Get the axes handle ax , _ = __get_axes ( ax = ax ) # Count the pre-existing patches n_patches = len ( ax . patches ) for ints , labs , key in zip ( intervals_hier [ : : - 1 ] , labels_hier [ : : - 1 ] , levels [ : : - 1 ] ) : labeled_intervals ( ints , labs , label = key , ax = ax , * * kwargs ) # Reverse the patch ordering for anything we've added. # This way, intervals are listed in the legend from top to bottom ax . patches [ n_patches : ] = ax . patches [ n_patches : ] [ : : - 1 ] return ax | Plot a hierarchical segmentation | 216 | 5 |
19,958 | def events ( times , labels = None , base = None , height = None , ax = None , text_kw = None , * * kwargs ) : if text_kw is None : text_kw = dict ( ) text_kw . setdefault ( 'va' , 'top' ) text_kw . setdefault ( 'clip_on' , True ) text_kw . setdefault ( 'bbox' , dict ( boxstyle = 'round' , facecolor = 'white' ) ) # make sure we have an array for times times = np . asarray ( times ) # Get the axes handle ax , new_axes = __get_axes ( ax = ax ) # If we have fresh axes, set the limits if new_axes : # Infer base and height if base is None : base = 0 if height is None : height = 1 ax . set_ylim ( [ base , height ] ) else : if base is None : base = ax . get_ylim ( ) [ 0 ] if height is None : height = ax . get_ylim ( ) [ 1 ] cycler = ax . _get_patches_for_fill . prop_cycler style = next ( cycler ) . copy ( ) style . update ( kwargs ) # If the user provided 'colors', don't override it with 'color' if 'colors' in style : style . pop ( 'color' , None ) lines = ax . vlines ( times , base , base + height , * * style ) if labels : for path , lab in zip ( lines . get_paths ( ) , labels ) : ax . annotate ( lab , xy = ( path . vertices [ 0 ] [ 0 ] , height ) , xycoords = 'data' , xytext = ( 8 , - 10 ) , textcoords = 'offset points' , * * text_kw ) if new_axes : ax . set_yticks ( [ ] ) __expand_limits ( ax , [ base , base + height ] , which = 'y' ) if times . size : __expand_limits ( ax , [ times . min ( ) , times . max ( ) ] , which = 'x' ) return ax | Plot event times as a set of vertical lines | 483 | 9 |
19,959 | def pitch ( times , frequencies , midi = False , unvoiced = False , ax = None , * * kwargs ) : ax , _ = __get_axes ( ax = ax ) times = np . asarray ( times ) # First, segment into contiguously voiced contours frequencies , voicings = freq_to_voicing ( np . asarray ( frequencies , dtype = np . float ) ) # Here are all the change-points v_changes = 1 + np . flatnonzero ( voicings [ 1 : ] != voicings [ : - 1 ] ) v_changes = np . unique ( np . concatenate ( [ [ 0 ] , v_changes , [ len ( voicings ) ] ] ) ) # Set up arrays of slices for voiced and unvoiced regions v_slices , u_slices = [ ] , [ ] for start , end in zip ( v_changes , v_changes [ 1 : ] ) : idx = slice ( start , end ) # A region is voiced if its starting sample is voiced # It's unvoiced if none of the samples in the region are voiced. if voicings [ start ] : v_slices . append ( idx ) elif frequencies [ idx ] . all ( ) : u_slices . append ( idx ) # Now we just need to plot the contour style = dict ( ) style . update ( next ( ax . _get_lines . prop_cycler ) ) style . update ( kwargs ) if midi : idx = frequencies > 0 frequencies [ idx ] = hz_to_midi ( frequencies [ idx ] ) # Tick at integer midi notes ax . yaxis . set_minor_locator ( MultipleLocator ( 1 ) ) for idx in v_slices : ax . plot ( times [ idx ] , frequencies [ idx ] , * * style ) style . pop ( 'label' , None ) # Plot the unvoiced portions if unvoiced : style [ 'alpha' ] = style . get ( 'alpha' , 1.0 ) * 0.5 for idx in u_slices : ax . plot ( times [ idx ] , frequencies [ idx ] , * * style ) return ax | Visualize pitch contours | 493 | 5 |
19,960 | def multipitch ( times , frequencies , midi = False , unvoiced = False , ax = None , * * kwargs ) : # Get the axes handle ax , _ = __get_axes ( ax = ax ) # Set up a style for the plot style_voiced = dict ( ) style_voiced . update ( next ( ax . _get_lines . prop_cycler ) ) style_voiced . update ( kwargs ) style_unvoiced = style_voiced . copy ( ) style_unvoiced . pop ( 'label' , None ) style_unvoiced [ 'alpha' ] = style_unvoiced . get ( 'alpha' , 1.0 ) * 0.5 # We'll collect all times and frequencies first, then plot them voiced_times = [ ] voiced_freqs = [ ] unvoiced_times = [ ] unvoiced_freqs = [ ] for t , freqs in zip ( times , frequencies ) : if not len ( freqs ) : continue freqs , voicings = freq_to_voicing ( np . asarray ( freqs , dtype = np . float ) ) # Discard all 0-frequency measurements idx = freqs > 0 freqs = freqs [ idx ] voicings = voicings [ idx ] if midi : freqs = hz_to_midi ( freqs ) n_voiced = sum ( voicings ) voiced_times . extend ( [ t ] * n_voiced ) voiced_freqs . extend ( freqs [ voicings ] ) unvoiced_times . extend ( [ t ] * ( len ( freqs ) - n_voiced ) ) unvoiced_freqs . extend ( freqs [ ~ voicings ] ) # Plot the voiced frequencies ax . scatter ( voiced_times , voiced_freqs , * * style_voiced ) # Plot the unvoiced frequencies if unvoiced : ax . scatter ( unvoiced_times , unvoiced_freqs , * * style_unvoiced ) # Tick at integer midi notes if midi : ax . yaxis . set_minor_locator ( MultipleLocator ( 1 ) ) return ax | Visualize multiple f0 measurements | 479 | 6 |
19,961 | def piano_roll ( intervals , pitches = None , midi = None , ax = None , * * kwargs ) : if midi is None : if pitches is None : raise ValueError ( 'At least one of `midi` or `pitches` ' 'must be provided.' ) midi = hz_to_midi ( pitches ) scale = np . arange ( 128 ) ax = labeled_intervals ( intervals , np . round ( midi ) . astype ( int ) , label_set = scale , tick = False , ax = ax , * * kwargs ) # Minor tick at each semitone ax . yaxis . set_minor_locator ( MultipleLocator ( 1 ) ) ax . axis ( 'auto' ) return ax | Plot a quantized piano roll as intervals | 165 | 8 |
19,962 | def separation ( sources , fs = 22050 , labels = None , alpha = 0.75 , ax = None , * * kwargs ) : # Get the axes handle ax , new_axes = __get_axes ( ax = ax ) # Make sure we have at least two dimensions sources = np . atleast_2d ( sources ) if labels is None : labels = [ 'Source {:d}' . format ( _ ) for _ in range ( len ( sources ) ) ] kwargs . setdefault ( 'scaling' , 'spectrum' ) # The cumulative spectrogram across sources # is used to establish the reference power # for each individual source cumspec = None specs = [ ] for i , src in enumerate ( sources ) : freqs , times , spec = spectrogram ( src , fs = fs , * * kwargs ) specs . append ( spec ) if cumspec is None : cumspec = spec . copy ( ) else : cumspec += spec ref_max = cumspec . max ( ) ref_min = ref_max * 1e-6 color_conv = ColorConverter ( ) for i , spec in enumerate ( specs ) : # For each source, grab a new color from the cycler # Then construct a colormap that interpolates from # [transparent white -> new color] color = next ( ax . _get_lines . prop_cycler ) [ 'color' ] color = color_conv . to_rgba ( color , alpha = alpha ) cmap = LinearSegmentedColormap . from_list ( labels [ i ] , [ ( 1.0 , 1.0 , 1.0 , 0.0 ) , color ] ) ax . pcolormesh ( times , freqs , spec , cmap = cmap , norm = LogNorm ( vmin = ref_min , vmax = ref_max ) , shading = 'gouraud' , label = labels [ i ] ) # Attach a 0x0 rect to the axis with the corresponding label # This way, it will show up in the legend ax . add_patch ( Rectangle ( ( 0 , 0 ) , 0 , 0 , color = color , label = labels [ i ] ) ) if new_axes : ax . axis ( 'tight' ) return ax | Source - separation visualization | 492 | 4 |
19,963 | def __ticker_midi_note ( x , pos ) : NOTES = [ 'C' , 'C#' , 'D' , 'D#' , 'E' , 'F' , 'F#' , 'G' , 'G#' , 'A' , 'A#' , 'B' ] cents = float ( np . mod ( x , 1.0 ) ) if cents >= 0.5 : cents = cents - 1.0 x = x + 0.5 idx = int ( x % 12 ) octave = int ( x / 12 ) - 1 if cents == 0 : return '{:s}{:2d}' . format ( NOTES [ idx ] , octave ) return '{:s}{:2d}{:+02d}' . format ( NOTES [ idx ] , octave , int ( cents * 100 ) ) | A ticker function for midi notes . | 193 | 9 |
19,964 | def ticker_notes ( ax = None ) : ax , _ = __get_axes ( ax = ax ) ax . yaxis . set_major_formatter ( FMT_MIDI_NOTE ) # Get the tick labels and reset the vertical alignment for tick in ax . yaxis . get_ticklabels ( ) : tick . set_verticalalignment ( 'baseline' ) | Set the y - axis of the given axes to MIDI notes | 86 | 12 |
19,965 | def ticker_pitch ( ax = None ) : ax , _ = __get_axes ( ax = ax ) ax . yaxis . set_major_formatter ( FMT_MIDI_HZ ) | Set the y - axis of the given axes to MIDI frequencies | 48 | 12 |
19,966 | def run ( self , * * import_params ) : if self . file : import_params [ "url" ] = self . file self . id_field = "id" if "connection" in import_params : self . fields . append ( "connector" ) self . update_from_dict ( import_params [ "connection" ] ) self . save ( force_create = True ) else : super ( FileImportJob , self ) . run ( params = import_params , files = self . files ) | Actually creates the import job on the CARTO server | 110 | 11 |
19,967 | def filter ( self ) : try : response = self . send ( self . get_collection_endpoint ( ) , "get" ) if self . json_collection_attribute is not None : resource_ids = self . client . get_response_data ( response , self . Meta . parse_json ) [ self . json_collection_attribute ] else : resource_ids = self . client . get_response_data ( response , self . Meta . parse_json ) except Exception as e : raise CartoException ( e ) resources = [ ] for resource_id in resource_ids : try : resource = self . resource_class ( self . client ) except ( ValueError , TypeError ) : continue else : setattr ( resource , resource . Meta . id_field , resource_id ) resources . append ( resource ) return resources | Get a filtered list of file imports | 176 | 7 |
19,968 | def send ( self , relative_path , http_method , * * requests_args ) : try : http_method , requests_args = self . prepare_send ( http_method , * * requests_args ) response = super ( APIKeyAuthClient , self ) . send ( relative_path , http_method , * * requests_args ) except Exception as e : raise CartoException ( e ) if CartoRateLimitException . is_rate_limited ( response ) : raise CartoRateLimitException ( response ) return response | Makes an API - key - authorized request | 113 | 9 |
19,969 | def is_valid_api_key ( self ) : res = self . send ( 'api/v3/api_keys' , 'get' ) return res . ok and self . api_key in ( ak [ 'token' ] for ak in res . json ( ) [ 'result' ] ) | Checks validity . Right now an API key is considered valid if it can list user API keys and the result contains that API key . This might change in the future . | 65 | 34 |
19,970 | def send ( self , url , http_method , * * client_args ) : try : client_args = client_args or { } if "params" not in client_args : client_args [ "params" ] = { } client_args [ "params" ] . update ( { "type" : "table" , "exclude_shared" : "true" } ) return super ( DatasetManager , self ) . send ( url , http_method , * * client_args ) except Exception as e : raise CartoException ( e ) | Sends an API request taking into account that datasets are part of the visualization endpoint . | 120 | 17 |
19,971 | def is_sync_table ( self , archive , interval , * * import_args ) : return ( hasattr ( archive , "startswith" ) and archive . startswith ( "http" ) or "connection" in import_args ) and interval is not None | Checks if this is a request for a sync dataset . | 58 | 12 |
19,972 | def create ( self , archive , interval = None , * * import_args ) : archive = archive . lower ( ) if hasattr ( archive , "lower" ) else archive if self . is_sync_table ( archive , interval , * * import_args ) : manager = SyncTableJobManager ( self . client ) else : manager = FileImportJobManager ( self . client ) import_job = manager . create ( archive ) if interval is None else manager . create ( archive , interval ) import_job . run ( * * import_args ) if import_job . get_id ( ) is None : raise CartoException ( _ ( "Import API returned corrupt job details \ when creating dataset" ) ) import_job . refresh ( ) count = 0 while import_job . state in ( "enqueued" , "queued" , "pending" , "uploading" , "unpacking" , "importing" , "guessing" ) or ( isinstance ( manager , SyncTableJobManager ) and import_job . state == "created" ) : if count >= MAX_NUMBER_OF_RETRIES : raise CartoException ( _ ( "Maximum number of retries exceeded \ when polling the import API for \ dataset creation" ) ) time . sleep ( INTERVAL_BETWEEN_RETRIES_S ) import_job . refresh ( ) count += 1 if import_job . state == "failure" : raise CartoException ( _ ( "Dataset creation was not successful \ because of failed import (error: {error}" ) . format ( error = json . dumps ( import_job . get_error_text ) ) ) if ( import_job . state != "complete" and import_job . state != "created" and import_job . state != "success" ) or import_job . success is False : raise CartoException ( _ ( "Dataset creation was not successful \ because of unknown import error" ) ) if hasattr ( import_job , "visualization_id" ) and import_job . visualization_id is not None : visualization_id = import_job . visualization_id else : table = TableManager ( self . client ) . get ( import_job . table_id ) visualization_id = table . table_visualization . get_id ( ) if table is not None else None try : return self . get ( visualization_id ) if visualization_id is not None else None except AttributeError : raise CartoException ( _ ( "Dataset creation was not successful \ because of unknown error" ) ) | Creating a table means uploading a file or setting up a sync table | 553 | 13 |
19,973 | def send ( self , url , http_method , * * client_args ) : try : client_args . setdefault ( 'params' , { } ) client_args [ "params" ] . update ( { "type" : "derived" , "exclude_shared" : "true" } ) return super ( VisualizationManager , self ) . send ( url , http_method , * * client_args ) except Exception as e : raise CartoException ( e ) | Sends API request taking into account that visualizations are only a subset of the resources available at the visualization endpoint | 102 | 22 |
19,974 | def is_rate_limited ( response ) : if ( response . status_code == codes . too_many_requests and 'Retry-After' in response . headers and int ( response . headers [ 'Retry-After' ] ) >= 0 ) : return True return False | Checks if the response has been rate limited by CARTO APIs | 60 | 14 |
19,975 | def update_from_dict ( self , attribute_dict ) : if 'template' in attribute_dict : self . update_from_dict ( attribute_dict [ 'template' ] ) setattr ( self , self . Meta . id_field , attribute_dict [ 'template' ] [ 'name' ] ) return try : for k , v in attribute_dict . items ( ) : setattr ( self , k , v ) except Exception : setattr ( self , self . Meta . id_field , attribute_dict ) | Method overriden from the base class | 112 | 8 |
19,976 | def run ( self , * * client_params ) : try : self . send ( self . get_collection_endpoint ( ) , http_method = "POST" , * * client_params ) except Exception as e : raise CartoException ( e ) | Actually creates the async job on the CARTO server | 55 | 11 |
19,977 | def send ( self , sql , parse_json = True , do_post = True , format = None , * * request_args ) : try : params = { 'q' : sql } if format : params [ 'format' ] = format if format not in [ 'json' , 'geojson' ] : parse_json = False if request_args is not None : for attr in request_args : params [ attr ] = request_args [ attr ] if len ( sql ) < MAX_GET_QUERY_LEN and do_post is False : resp = self . auth_client . send ( self . api_url , 'GET' , params = params ) else : resp = self . auth_client . send ( self . api_url , 'POST' , data = params ) return self . auth_client . get_response_data ( resp , parse_json ) except CartoRateLimitException as e : raise e except Exception as e : raise CartoException ( e ) | Executes SQL query in a CARTO server | 216 | 10 |
19,978 | def send ( self , url , http_method , json_body = None , http_header = None ) : try : data = self . client . send ( url , http_method = http_method , headers = http_header , json = json_body ) data_json = self . client . get_response_data ( data ) except CartoRateLimitException as e : raise e except Exception as e : raise CartoException ( e ) return data_json | Executes Batch SQL query in a CARTO server | 99 | 12 |
19,979 | def create ( self , sql_query ) : header = { 'content-type' : 'application/json' } data = self . send ( self . api_url , http_method = "POST" , json_body = { "query" : sql_query } , http_header = header ) return data | Creates a new batch SQL query . | 67 | 8 |
19,980 | def create_and_wait_for_completion ( self , sql_query ) : header = { 'content-type' : 'application/json' } data = self . send ( self . api_url , http_method = "POST" , json_body = { "query" : sql_query } , http_header = header ) warnings . warn ( 'Batch SQL job created with job_id: {job_id}' . format ( job_id = data [ 'job_id' ] ) ) while data and data [ 'status' ] in BATCH_JOBS_PENDING_STATUSES : time . sleep ( BATCH_READ_STATUS_AFTER_SECONDS ) data = self . read ( data [ 'job_id' ] ) if data [ 'status' ] in BATCH_JOBS_FAILED_STATUSES : raise CartoException ( _ ( "Batch SQL job failed with result: {data}" . format ( data = data ) ) ) return data | Creates a new batch SQL query and waits for its completion or failure | 222 | 14 |
19,981 | def read ( self , job_id ) : data = self . send ( self . api_url + job_id , http_method = "GET" ) return data | Reads the information for a specific Batch API request | 36 | 11 |
19,982 | def update ( self , job_id , sql_query ) : header = { 'content-type' : 'application/json' } data = self . send ( self . api_url + job_id , http_method = "PUT" , json_body = { "query" : sql_query } , http_header = header ) return data | Updates the sql query of a specific job | 75 | 9 |
19,983 | def cancel ( self , job_id ) : try : confirmation = self . send ( self . api_url + job_id , http_method = "DELETE" ) except CartoException as e : if 'Cannot set status from done to cancelled' in e . args [ 0 ] . args [ 0 ] : return 'done' else : raise e return confirmation [ 'status' ] | Cancels a job | 84 | 5 |
19,984 | def copyfrom ( self , query , iterable_data , compress = True , compression_level = DEFAULT_COMPRESSION_LEVEL ) : url = self . api_url + '/copyfrom' headers = { 'Content-Type' : 'application/octet-stream' , 'Transfer-Encoding' : 'chunked' } params = { 'api_key' : self . api_key , 'q' : query } if compress : headers [ 'Content-Encoding' ] = 'gzip' _iterable_data = self . _compress_chunks ( iterable_data , compression_level ) else : _iterable_data = iterable_data try : response = self . client . send ( url , http_method = 'POST' , params = params , data = _iterable_data , headers = headers , stream = True ) response_json = self . client . get_response_data ( response ) except CartoRateLimitException as e : raise e except Exception as e : raise CartoException ( e ) return response_json | Gets data from an iterable object into a table | 232 | 11 |
19,985 | def copyfrom_file_object ( self , query , file_object , compress = True , compression_level = DEFAULT_COMPRESSION_LEVEL ) : chunk_generator = self . _read_in_chunks ( file_object ) return self . copyfrom ( query , chunk_generator , compress , compression_level ) | Gets data from a readable file object into a table | 73 | 11 |
19,986 | def copyfrom_file_path ( self , query , path , compress = True , compression_level = DEFAULT_COMPRESSION_LEVEL ) : with open ( path , 'rb' ) as f : result = self . copyfrom_file_object ( query , f , compress , compression_level ) return result | Gets data from a readable file into a table | 68 | 10 |
19,987 | def copyto ( self , query ) : url = self . api_url + '/copyto' params = { 'api_key' : self . api_key , 'q' : query } try : response = self . client . send ( url , http_method = 'GET' , params = params , stream = True ) response . raise_for_status ( ) except CartoRateLimitException as e : raise e except HTTPError as e : if 400 <= response . status_code < 500 : # Client error, provide better reason reason = response . json ( ) [ 'error' ] [ 0 ] error_msg = u'%s Client Error: %s' % ( response . status_code , reason ) raise CartoException ( error_msg ) else : raise CartoException ( e ) except Exception as e : raise CartoException ( e ) return response | Gets data from a table into a Response object that can be iterated | 185 | 15 |
19,988 | def copyto_file_object ( self , query , file_object ) : response = self . copyto ( query ) for block in response . iter_content ( DEFAULT_CHUNK_SIZE ) : file_object . write ( block ) | Gets data from a table into a writable file object | 53 | 12 |
19,989 | def copyto_file_path ( self , query , path , append = False ) : file_mode = 'wb' if not append else 'ab' with open ( path , file_mode ) as f : self . copyto_file_object ( query , f ) | Gets data from a table into a writable file | 58 | 11 |
19,990 | def run ( self , * * import_params ) : import_params [ "url" ] = self . url import_params [ "interval" ] = self . interval if "connection" in import_params : self . fields . append ( "connector" ) import_params [ "connection" ] [ "interval" ] = self . interval self . update_from_dict ( import_params [ "connection" ] ) self . save ( force_create = True ) else : return super ( SyncTableJob , self ) . run ( params = import_params ) | Actually creates the job import on the CARTO server | 122 | 11 |
19,991 | def force_sync ( self ) : try : self . send ( self . get_resource_endpoint ( ) , "put" ) except Exception as e : raise CartoException ( e ) | Forces to sync the SyncTableJob | 41 | 8 |
19,992 | def set_prob_type ( cls , problem_type , classification_type , eval_type ) : assert problem_type in problem_type_list , 'Need to set Problem Type' if problem_type == 'classification' : assert classification_type in classification_type_list , 'Need to set Classification Type' assert eval_type in eval_type_list , 'Need to set Evaluation Type' cls . problem_type = problem_type cls . classification_type = classification_type cls . eval_type = eval_type if cls . problem_type == 'classification' : print 'Setting Problem:{}, Type:{}, Eval:{}' . format ( cls . problem_type , cls . classification_type , cls . eval_type ) elif cls . problem_type == 'regression' : print 'Setting Problem:{}, Eval:{}' . format ( cls . problem_type , cls . eval_type ) return | Set problem type | 210 | 3 |
19,993 | def make_multi_cols ( self , num_class , name ) : cols = [ 'c' + str ( i ) + '_' for i in xrange ( num_class ) ] cols = map ( lambda x : x + name , cols ) return cols | make cols for multi - class predictions | 62 | 8 |
19,994 | def parse ( self , data ) : # type: (bytes) -> None ( self . len_di , self . xattr_length , self . extent_location , self . parent_directory_num ) = struct . unpack_from ( self . FMT , data [ : 8 ] , 0 ) if self . len_di % 2 != 0 : self . directory_identifier = data [ 8 : - 1 ] else : self . directory_identifier = data [ 8 : ] self . dirrecord = None self . _initialized = True | A method to parse an ISO9660 Path Table Record out of a string . | 116 | 16 |
19,995 | def _record ( self , ext_loc , parent_dir_num ) : # type: (int, int) -> bytes return struct . pack ( self . FMT , self . len_di , self . xattr_length , ext_loc , parent_dir_num ) + self . directory_identifier + b'\x00' * ( self . len_di % 2 ) | An internal method to generate a string representing this Path Table Record . | 84 | 13 |
19,996 | def record_little_endian ( self ) : # type: () -> bytes if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record not yet initialized' ) return self . _record ( self . extent_location , self . parent_directory_num ) | A method to generate a string representing the little endian version of this Path Table Record . | 67 | 18 |
19,997 | def record_big_endian ( self ) : # type: () -> bytes if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record not yet initialized' ) return self . _record ( utils . swab_32bit ( self . extent_location ) , utils . swab_16bit ( self . parent_directory_num ) ) | A method to generate a string representing the big endian version of this Path Table Record . | 87 | 18 |
19,998 | def _new ( self , name , parent_dir_num ) : # type: (bytes, int) -> None self . len_di = len ( name ) self . xattr_length = 0 # FIXME: we don't support xattr for now self . parent_directory_num = parent_dir_num self . directory_identifier = name self . _initialized = True | An internal method to create a new Path Table Record . | 82 | 11 |
19,999 | def new_dir ( self , name ) : # type: (bytes) -> None if self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Path Table Record already initialized' ) # Zero for the parent dir num is bogus, but that will get fixed later. self . _new ( name , 0 ) | A method to create a new Path Table Record . | 72 | 10 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.