_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q267900 | A_weighting | test | def A_weighting(frequencies, min_db=-80.0): # pylint: disable=invalid-name
'''Compute the A-weighting of a set of frequencies.
Parameters
----------
frequencies : scalar or np.ndarray [shape=(n,)]
One or more frequencies (in Hz)
min_db : float [scalar] or None
Clip weights below this threshold.
If `None`, no clipping is performed.
Returns
-------
A_weighting : scalar or np.ndarray [shape=(n,)]
`A_weighting[i]` is the A-weighting of `frequencies[i]`
See Also
--------
perceptual_weighting
Examples
--------
Get the A-weighting for CQT frequencies
>>> import matplotlib.pyplot as plt
>>> freqs = librosa.cqt_frequencies(108, librosa.note_to_hz('C1'))
>>> aw = librosa.A_weighting(freqs)
>>> plt.plot(freqs, aw)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Weighting (log10)')
>>> plt.title('A-Weighting of CQT frequencies')
'''
# Vectorize to make our lives easier
frequencies = np.asanyarray(frequencies)
# Pre-compute squared frequency
f_sq = frequencies**2.0
const = np.array([12200, 20.6, 107.7, 737.9])**2.0
weights = 2.0 + 20.0 * (np.log10(const[0]) + 4 * np.log10(frequencies)
- np.log10(f_sq + const[0])
- np.log10(f_sq + const[1])
- 0.5 * np.log10(f_sq + const[2])
- 0.5 * np.log10(f_sq + const[3]))
if min_db is not None:
weights = np.maximum(min_db, weights)
return weights | python | {
"resource": ""
} |
q267901 | times_like | test | def times_like(X, sr=22050, hop_length=512, n_fft=None, axis=-1):
"""Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
"""
samples = samples_like(X, hop_length=hop_length, n_fft=n_fft, axis=axis)
return samples_to_time(samples, sr=sr) | python | {
"resource": ""
} |
q267902 | samples_like | test | def samples_like(X, hop_length=512, n_fft=None, axis=-1):
"""Return an array of sample indices to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
samples : np.ndarray [shape=(n,)]
ndarray of sample indices corresponding to each frame of X.
See Also
--------
times_like : Return an array of time values to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> samples = librosa.samples_like(X)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
Provide a scalar input:
>>> n_frames = 2647
>>> samples = librosa.samples_like(n_frames)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
"""
if np.isscalar(X):
frames = np.arange(X)
else:
frames = np.arange(X.shape[axis])
return frames_to_samples(frames, hop_length=hop_length, n_fft=n_fft) | python | {
"resource": ""
} |
q267903 | hybrid_cqt | test | def hybrid_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann', scale=True,
pad_mode='reflect', res_type=None):
'''Compute the hybrid constant-Q transform of an audio signal.
Here, the hybrid CQT uses the pseudo CQT for higher frequencies where
the hop_length is longer than half the filter length and the full CQT
for lower frequencies.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
res_type : string
Resampling mode. See `librosa.core.cqt` for details.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
See Also
--------
cqt
pseudo_cqt
Notes
-----
This function caches at level 20.
'''
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
# Get all CQT frequencies
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)
# Compute the length of each constant-Q basis function
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
window=window)
# Determine which filters to use with Pseudo CQT
# These are the ones that fit within 2 hop lengths after padding
pseudo_filters = 2.0**np.ceil(np.log2(lengths)) < 2 * hop_length
n_bins_pseudo = int(np.sum(pseudo_filters))
n_bins_full = n_bins - n_bins_pseudo
cqt_resp = []
if n_bins_pseudo > 0:
fmin_pseudo = np.min(freqs[pseudo_filters])
cqt_resp.append(pseudo_cqt(y, sr,
hop_length=hop_length,
fmin=fmin_pseudo,
n_bins=n_bins_pseudo,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
sparsity=sparsity,
window=window,
scale=scale,
pad_mode=pad_mode))
if n_bins_full > 0:
cqt_resp.append(np.abs(cqt(y, sr,
hop_length=hop_length,
fmin=fmin,
n_bins=n_bins_full,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
sparsity=sparsity,
window=window,
scale=scale,
pad_mode=pad_mode,
res_type=res_type)))
return __trim_stack(cqt_resp, n_bins) | python | {
"resource": ""
} |
q267904 | pseudo_cqt | test | def pseudo_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann', scale=True,
pad_mode='reflect'):
'''Compute the pseudo constant-Q transform of an audio signal.
This uses a single fft size that is the smallest power of 2 that is greater
than or equal to the max of:
1. The longest CQT filter
2. 2x the hop_length
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Pseudo Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
Notes
-----
This function caches at level 20.
'''
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin, n_bins,
bins_per_octave,
tuning, filter_scale,
norm, sparsity,
hop_length=hop_length,
window=window)
fft_basis = np.abs(fft_basis)
# Compute the magnitude STFT with Hann window
D = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length, pad_mode=pad_mode))
# Project onto the pseudo-cqt basis
C = fft_basis.dot(D)
if scale:
C /= np.sqrt(n_fft)
else:
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
C *= np.sqrt(lengths[:, np.newaxis] / n_fft)
return C | python | {
"resource": ""
} |
q267905 | icqt | test | def icqt(C, sr=22050, hop_length=512, fmin=None, bins_per_octave=12,
tuning=0.0, filter_scale=1, norm=1, sparsity=0.01, window='hann',
scale=True, length=None, amin=util.Deprecated(), res_type='fft'):
'''Compute the inverse constant-Q transform.
Given a constant-Q transform representation `C` of an audio signal `y`,
this function produces an approximation `y_hat`.
Parameters
----------
C : np.ndarray, [shape=(n_bins, n_frames)]
Constant-Q representation as produced by `core.cqt`
hop_length : int > 0 [scalar]
number of samples between successive frames
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
tuning : float in `[-0.5, 0.5)` [scalar]
Tuning offset in fractions of a bin (cents).
filter_scale : float > 0 [scalar]
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length
of each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to `norm=None`
in FFT.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
amin : float or None [DEPRECATED]
.. note:: This parameter is deprecated in 0.7.0 and will be removed in 0.8.0.
res_type : string
Resampling mode. By default, this uses `fft` mode for high-quality
reconstruction, but this may be slow depending on your signal duration.
See `librosa.resample` for supported modes.
Returns
-------
y : np.ndarray, [shape=(n_samples), dtype=np.float]
Audio time-series reconstructed from the CQT representation.
See Also
--------
cqt
core.resample
Notes
-----
This function caches at level 40.
Examples
--------
Using default parameters
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> C = librosa.cqt(y=y, sr=sr)
>>> y_hat = librosa.icqt(C=C, sr=sr)
Or with a different hop length and frequency resolution:
>>> hop_length = 256
>>> bins_per_octave = 12 * 3
>>> C = librosa.cqt(y=y, sr=sr, hop_length=256, n_bins=7*bins_per_octave,
... bins_per_octave=bins_per_octave)
>>> y_hat = librosa.icqt(C=C, sr=sr, hop_length=hop_length,
... bins_per_octave=bins_per_octave)
'''
if fmin is None:
fmin = note_to_hz('C1')
# Get the top octave of frequencies
n_bins = len(C)
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)[-bins_per_octave:]
n_filters = min(n_bins, bins_per_octave)
fft_basis, n_fft, lengths = __cqt_filter_fft(sr, np.min(freqs),
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity=sparsity,
window=window)
if hop_length > min(lengths):
warnings.warn('hop_length={} exceeds minimum CQT filter length={:.3f}.\n'
'This will probably cause unpleasant acoustic artifacts. '
'Consider decreasing your hop length or increasing the frequency resolution of your CQT.'.format(hop_length, min(lengths)))
# The basis gets renormalized by the effective window length above;
# This step undoes that
fft_basis = fft_basis.todense() * n_fft / lengths[:, np.newaxis]
# This step conjugate-transposes the filter
inv_basis = fft_basis.H
# How many octaves do we have?
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
y = None
for octave in range(n_octaves - 1, -1, -1):
slice_ = slice(-(octave+1) * bins_per_octave - 1,
-(octave) * bins_per_octave - 1)
# Slice this octave
C_oct = C[slice_]
inv_oct = inv_basis[:, -C_oct.shape[0]:]
oct_hop = hop_length // 2**octave
# Apply energy corrections
if scale:
C_scale = np.sqrt(lengths[-C_oct.shape[0]:, np.newaxis]) / n_fft
else:
C_scale = lengths[-C_oct.shape[0]:, np.newaxis] * np.sqrt(2**octave) / n_fft
# Inverse-project the basis for each octave
D_oct = inv_oct.dot(C_oct / C_scale)
# Inverse-STFT that response
y_oct = istft(D_oct, window='ones', hop_length=oct_hop)
# Up-sample that octave
if y is None:
y = y_oct
else:
# Up-sample the previous buffer and add in the new one
# Scipy-resampling is fast here, since it's a power-of-two relation
y = audio.resample(y, 1, 2, scale=True, res_type=res_type, fix=False)
y[:len(y_oct)] += y_oct
if length:
y = util.fix_length(y, length)
return y | python | {
"resource": ""
} |
q267906 | __cqt_filter_fft | test | def __cqt_filter_fft(sr, fmin, n_bins, bins_per_octave, tuning,
filter_scale, norm, sparsity, hop_length=None,
window='hann'):
'''Generate the frequency domain constant-Q filter basis.'''
basis, lengths = filters.constant_q(sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
pad_fft=True,
window=window)
# Filters are padded up to the nearest integral power of 2
n_fft = basis.shape[1]
if (hop_length is not None and
n_fft < 2.0**(1 + np.ceil(np.log2(hop_length)))):
n_fft = int(2.0 ** (1 + np.ceil(np.log2(hop_length))))
# re-normalize bases with respect to the FFT window length
basis *= lengths[:, np.newaxis] / float(n_fft)
# FFT and retain only the non-negative frequencies
fft = get_fftlib()
fft_basis = fft.fft(basis, n=n_fft, axis=1)[:, :(n_fft // 2)+1]
# sparsify the basis
fft_basis = util.sparsify_rows(fft_basis, quantile=sparsity)
return fft_basis, n_fft, lengths | python | {
"resource": ""
} |
q267907 | __trim_stack | test | def __trim_stack(cqt_resp, n_bins):
'''Helper function to trim and stack a collection of CQT responses'''
# cleanup any framing errors at the boundaries
max_col = min(x.shape[1] for x in cqt_resp)
cqt_resp = np.vstack([x[:, :max_col] for x in cqt_resp][::-1])
# Finally, clip out any bottom frequencies that we don't really want
# Transpose magic here to ensure column-contiguity
return np.ascontiguousarray(cqt_resp[-n_bins:].T).T | python | {
"resource": ""
} |
q267908 | __cqt_response | test | def __cqt_response(y, n_fft, hop_length, fft_basis, mode):
'''Compute the filter response with a target STFT hop.'''
# Compute the STFT matrix
D = stft(y, n_fft=n_fft, hop_length=hop_length,
window='ones',
pad_mode=mode)
# And filter response energy
return fft_basis.dot(D) | python | {
"resource": ""
} |
q267909 | __early_downsample_count | test | def __early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves):
'''Compute the number of early downsampling operations'''
downsample_count1 = max(0, int(np.ceil(np.log2(audio.BW_FASTEST * nyquist /
filter_cutoff)) - 1) - 1)
num_twos = __num_two_factors(hop_length)
downsample_count2 = max(0, num_twos - n_octaves + 1)
return min(downsample_count1, downsample_count2) | python | {
"resource": ""
} |
q267910 | __early_downsample | test | def __early_downsample(y, sr, hop_length, res_type, n_octaves,
nyquist, filter_cutoff, scale):
'''Perform early downsampling on an audio signal, if it applies.'''
downsample_count = __early_downsample_count(nyquist, filter_cutoff,
hop_length, n_octaves)
if downsample_count > 0 and res_type == 'kaiser_fast':
downsample_factor = 2**(downsample_count)
hop_length //= downsample_factor
if len(y) < downsample_factor:
raise ParameterError('Input signal length={:d} is too short for '
'{:d}-octave CQT'.format(len(y), n_octaves))
new_sr = sr / float(downsample_factor)
y = audio.resample(y, sr, new_sr,
res_type=res_type,
scale=True)
# If we're not going to length-scale after CQT, we
# need to compensate for the downsampling factor here
if not scale:
y *= np.sqrt(downsample_factor)
sr = new_sr
return y, sr, hop_length | python | {
"resource": ""
} |
q267911 | __dtw_calc_accu_cost | test | def __dtw_calc_accu_cost(C, D, D_steps, step_sizes_sigma,
weights_mul, weights_add, max_0, max_1): # pragma: no cover
'''Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
'''
for cur_n in range(max_0, D.shape[0]):
for cur_m in range(max_1, D.shape[1]):
# accumulate costs
for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]),
weights_add, weights_mul):
cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0],
cur_m - step_sizes_sigma[cur_step_idx, 1]]
cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1]
cur_C += cur_w_add
cur_cost = cur_D + cur_C
# check if cur_cost is smaller than the one stored in D
if cur_cost < D[cur_n, cur_m]:
D[cur_n, cur_m] = cur_cost
# save step-index
D_steps[cur_n, cur_m] = cur_step_idx
return D, D_steps | python | {
"resource": ""
} |
q267912 | __dtw_backtracking | test | def __dtw_backtracking(D_steps, step_sizes_sigma): # pragma: no cover
'''Backtrack optimal warping path.
Uses the saved step sizes from the cost accumulation
step to backtrack the index pairs for an optimal
warping path.
Parameters
----------
D_steps : np.ndarray [shape=(N, M)]
Saved indices of the used steps used in the calculation of D.
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
Returns
-------
wp : list [shape=(N,)]
Warping path with index pairs.
Each list entry contains an index pair
(n,m) as a tuple
See Also
--------
dtw
'''
wp = []
# Set starting point D(N,M) and append it to the path
cur_idx = (D_steps.shape[0] - 1, D_steps.shape[1] - 1)
wp.append((cur_idx[0], cur_idx[1]))
# Loop backwards.
# Stop criteria:
# Setting it to (0, 0) does not work for the subsequence dtw,
# so we only ask to reach the first row of the matrix.
while cur_idx[0] > 0:
cur_step_idx = D_steps[(cur_idx[0], cur_idx[1])]
# save tuple with minimal acc. cost in path
cur_idx = (cur_idx[0] - step_sizes_sigma[cur_step_idx][0],
cur_idx[1] - step_sizes_sigma[cur_step_idx][1])
# append to warping path
wp.append((cur_idx[0], cur_idx[1]))
return wp | python | {
"resource": ""
} |
q267913 | _viterbi | test | def _viterbi(log_prob, log_trans, log_p_init, state, value, ptr): # pragma: no cover
'''Core Viterbi algorithm.
This is intended for internal use only.
Parameters
----------
log_prob : np.ndarray [shape=(T, m)]
`log_prob[t, s]` is the conditional log-likelihood
log P[X = X(t) | State(t) = s]
log_trans : np.ndarray [shape=(m, m)]
The log transition matrix
`log_trans[i, j]` = log P[State(t+1) = j | State(t) = i]
log_p_init : np.ndarray [shape=(m,)]
log of the initial state distribution
state : np.ndarray [shape=(T,), dtype=int]
Pre-allocated state index array
value : np.ndarray [shape=(T, m)] float
Pre-allocated value array
ptr : np.ndarray [shape=(T, m), dtype=int]
Pre-allocated pointer array
Returns
-------
None
All computations are performed in-place on `state, value, ptr`.
'''
n_steps, n_states = log_prob.shape
# factor in initial state distribution
value[0] = log_prob[0] + log_p_init
for t in range(1, n_steps):
# Want V[t, j] <- p[t, j] * max_k V[t-1, k] * A[k, j]
# assume at time t-1 we were in state k
# transition k -> j
# Broadcast over rows:
# Tout[k, j] = V[t-1, k] * A[k, j]
# then take the max over columns
# We'll do this in log-space for stability
trans_out = value[t - 1] + log_trans.T
# Unroll the max/argmax loop to enable numba support
for j in range(n_states):
ptr[t, j] = np.argmax(trans_out[j])
# value[t, j] = log_prob[t, j] + np.max(trans_out[j])
value[t, j] = log_prob[t, j] + trans_out[j, ptr[t][j]]
# Now roll backward
# Get the last state
state[-1] = np.argmax(value[-1])
for t in range(n_steps - 2, -1, -1):
state[t] = ptr[t+1, state[t+1]] | python | {
"resource": ""
} |
q267914 | viterbi_discriminative | test | def viterbi_discriminative(prob, transition, p_state=None, p_init=None, return_logp=False):
'''Viterbi decoding from discriminative state predictions.
Given a sequence of conditional state predictions `prob[s, t]`,
indicating the conditional likelihood of state `s` given the
observation at time `t`, and a transition matrix `transition[i, j]`
which encodes the conditional probability of moving from state `i`
to state `j`, the Viterbi algorithm computes the most likely sequence
of states from the observations.
This implementation uses the standard Viterbi decoding algorithm
for observation likelihood sequences, under the assumption that
`P[Obs(t) | State(t) = s]` is proportional to
`P[State(t) = s | Obs(t)] / P[State(t) = s]`, where the denominator
is the marginal probability of state `s` occurring as given by `p_state`.
Parameters
----------
prob : np.ndarray [shape=(n_states, n_steps), non-negative]
`prob[s, t]` is the probability of state `s` conditional on
the observation at time `t`.
Must be non-negative and sum to 1 along each column.
transition : np.ndarray [shape=(n_states, n_states), non-negative]
`transition[i, j]` is the probability of a transition from i->j.
Each row must sum to 1.
p_state : np.ndarray [shape=(n_states,)]
Optional: marginal probability distribution over states,
must be non-negative and sum to 1.
If not provided, a uniform distribution is assumed.
p_init : np.ndarray [shape=(n_states,)]
Optional: initial state distribution.
If not provided, it is assumed to be uniform.
return_logp : bool
If `True`, return the log-likelihood of the state sequence.
Returns
-------
Either `states` or `(states, logp)`:
states : np.ndarray [shape=(n_steps,)]
The most likely state sequence.
logp : scalar [float]
If `return_logp=True`, the log probability of `states` given
the observations.
See Also
--------
viterbi : Viterbi decoding from observation likelihoods
viterbi_binary: Viterbi decoding for multi-label, conditional state likelihoods
Examples
--------
This example constructs a simple, template-based discriminative chord estimator,
using CENS chroma as input features.
.. note:: this chord model is not accurate enough to use in practice. It is only
intended to demonstrate how to use discriminative Viterbi decoding.
>>> # Create templates for major, minor, and no-chord qualities
>>> maj_template = np.array([1,0,0, 0,1,0, 0,1,0, 0,0,0])
>>> min_template = np.array([1,0,0, 1,0,0, 0,1,0, 0,0,0])
>>> N_template = np.array([1,1,1, 1,1,1, 1,1,1, 1,1,1.]) / 4.
>>> # Generate the weighting matrix that maps chroma to labels
>>> weights = np.zeros((25, 12), dtype=float)
>>> labels = ['C:maj', 'C#:maj', 'D:maj', 'D#:maj', 'E:maj', 'F:maj',
... 'F#:maj', 'G:maj', 'G#:maj', 'A:maj', 'A#:maj', 'B:maj',
... 'C:min', 'C#:min', 'D:min', 'D#:min', 'E:min', 'F:min',
... 'F#:min', 'G:min', 'G#:min', 'A:min', 'A#:min', 'B:min',
... 'N']
>>> for c in range(12):
... weights[c, :] = np.roll(maj_template, c) # c:maj
... weights[c + 12, :] = np.roll(min_template, c) # c:min
>>> weights[-1] = N_template # the last row is the no-chord class
>>> # Make a self-loop transition matrix over 25 states
>>> trans = librosa.sequence.transition_loop(25, 0.9)
>>> # Load in audio and make features
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cens(y=y, sr=sr, bins_per_octave=36)
>>> # Map chroma (observations) to class (state) likelihoods
>>> probs = np.exp(weights.dot(chroma)) # P[class | chroma] proportional to exp(template' chroma)
>>> probs /= probs.sum(axis=0, keepdims=True) # probabilities must sum to 1 in each column
>>> # Compute independent frame-wise estimates
>>> chords_ind = np.argmax(probs, axis=0)
>>> # And viterbi estimates
>>> chords_vit = librosa.sequence.viterbi_discriminative(probs, trans)
>>> # Plot the features and prediction map
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma, x_axis='time', y_axis='chroma')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(weights, x_axis='chroma')
>>> plt.yticks(np.arange(25) + 0.5, labels)
>>> plt.ylabel('Chord')
>>> plt.colorbar()
>>> plt.tight_layout()
>>> # And plot the results
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(probs, x_axis='time', cmap='gray')
>>> plt.colorbar()
>>> times = librosa.frames_to_time(np.arange(len(chords_vit)))
>>> plt.scatter(times, chords_ind + 0.75, color='lime', alpha=0.5, marker='+', s=15, label='Independent')
>>> plt.scatter(times, chords_vit + 0.25, color='deeppink', alpha=0.5, marker='o', s=15, label='Viterbi')
>>> plt.yticks(0.5 + np.unique(chords_vit), [labels[i] for i in np.unique(chords_vit)], va='center')
>>> plt.legend(loc='best')
>>> plt.tight_layout()
'''
n_states, n_steps = prob.shape
if transition.shape != (n_states, n_states):
raise ParameterError('transition.shape={}, must be '
'(n_states, n_states)={}'.format(transition.shape,
(n_states, n_states)))
if np.any(transition < 0) or not np.allclose(transition.sum(axis=1), 1):
raise ParameterError('Invalid transition matrix: must be non-negative '
'and sum to 1 on each row.')
if np.any(prob < 0) or not np.allclose(prob.sum(axis=0), 1):
raise ParameterError('Invalid probability values: each column must '
'sum to 1 and be non-negative')
states = np.zeros(n_steps, dtype=int)
values = np.zeros((n_steps, n_states), dtype=float)
ptr = np.zeros((n_steps, n_states), dtype=int)
# Compute log-likelihoods while avoiding log-underflow
epsilon = np.finfo(prob.dtype).tiny
# Compute marginal log probabilities while avoiding underflow
if p_state is None:
p_state = np.empty(n_states)
p_state.fill(1./n_states)
elif p_state.shape != (n_states,):
raise ParameterError('Marginal distribution p_state must have shape (n_states,). '
'Got p_state.shape={}'.format(p_state.shape))
elif np.any(p_state < 0) or not np.allclose(p_state.sum(axis=-1), 1):
raise ParameterError('Invalid marginal state distribution: '
'p_state={}'.format(p_state))
log_trans = np.log(transition + epsilon)
log_marginal = np.log(p_state + epsilon)
# By Bayes' rule, P[X | Y] * P[Y] = P[Y | X] * P[X]
# P[X] is constant for the sake of maximum likelihood inference
# and P[Y] is given by the marginal distribution p_state.
#
# So we have P[X | y] \propto P[Y | x] / P[Y]
# if X = observation and Y = states, this can be done in log space as
# log P[X | y] \propto \log P[Y | x] - \log P[Y]
log_prob = np.log(prob.T + epsilon) - log_marginal
if p_init is None:
p_init = np.empty(n_states)
p_init.fill(1./n_states)
elif np.any(p_init < 0) or not np.allclose(p_init.sum(), 1):
raise ParameterError('Invalid initial state distribution: '
'p_init={}'.format(p_init))
log_p_init = np.log(p_init + epsilon)
_viterbi(log_prob, log_trans, log_p_init, states, values, ptr)
if return_logp:
return states, values[-1, states[-1]]
return states | python | {
"resource": ""
} |
q267915 | transition_uniform | test | def transition_uniform(n_states):
'''Construct a uniform transition matrix over `n_states`.
Parameters
----------
n_states : int > 0
The number of states
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
`transition[i, j] = 1./n_states`
Examples
--------
>>> librosa.sequence.transition_uniform(3)
array([[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333]])
'''
if not isinstance(n_states, int) or n_states <= 0:
raise ParameterError('n_states={} must be a positive integer')
transition = np.empty((n_states, n_states), dtype=np.float)
transition.fill(1./n_states)
return transition | python | {
"resource": ""
} |
q267916 | transition_loop | test | def transition_loop(n_states, prob):
'''Construct a self-loop transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p` for all i
- `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i`
This type of transition matrix is appropriate when states tend to be
locally stable, and there is no additional structure between different
states. This is primarily useful for de-noising frame-wise predictions.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_loop(3, 0.5)
array([[0.5 , 0.25, 0.25],
[0.25, 0.5 , 0.25],
[0.25, 0.25, 0.5 ]])
>>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25])
array([[0.8 , 0.1 , 0.1 ],
[0.25 , 0.5 , 0.25 ],
[0.375, 0.375, 0.25 ]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
transition = np.empty((n_states, n_states), dtype=np.float)
# if it's a float, make it a vector
prob = np.asarray(prob, dtype=np.float)
if prob.ndim == 0:
prob = np.tile(prob, n_states)
if prob.shape != (n_states,):
raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states))
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob))
for i, prob_i in enumerate(prob):
transition[i] = (1. - prob_i) / (n_states - 1)
transition[i, i] = prob_i
return transition | python | {
"resource": ""
} |
q267917 | transition_cycle | test | def transition_cycle(n_states, prob):
'''Construct a cyclic transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p`
- `transition[i, i + 1] = (1 - p)`
This type of transition matrix is appropriate for state spaces
with cyclical structure, such as metrical position within a bar.
For example, a song in 4/4 time has state transitions of the form
1->{1, 2}, 2->{2, 3}, 3->{3, 4}, 4->{4, 1}.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state
`i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_cycle(4, 0.9)
array([[0.9, 0.1, 0. , 0. ],
[0. , 0.9, 0.1, 0. ],
[0. , 0. , 0.9, 0.1],
[0.1, 0. , 0. , 0.9]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
transition = np.zeros((n_states, n_states), dtype=np.float)
# if it's a float, make it a vector
prob = np.asarray(prob, dtype=np.float)
if prob.ndim == 0:
prob = np.tile(prob, n_states)
if prob.shape != (n_states,):
raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states))
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob))
for i, prob_i in enumerate(prob):
transition[i, np.mod(i + 1, n_states)] = 1. - prob_i
transition[i, i] = prob_i
return transition | python | {
"resource": ""
} |
q267918 | transition_local | test | def transition_local(n_states, width, window='triangle', wrap=False):
'''Construct a localized transition matrix.
The transition matrix will have the following properties:
- `transition[i, j] = 0` if `|i - j| > width`
- `transition[i, i]` is maximal
- `transition[i, i - width//2 : i + width//2]` has shape `window`
This type of transition matrix is appropriate for state spaces
that discretely approximate continuous variables, such as in fundamental
frequency estimation.
Parameters
----------
n_states : int > 1
The number of states
width : int >= 1 or iterable
The maximum number of states to treat as "local".
If iterable, it should have length equal to `n_states`,
and specify the width independently for each state.
window : str, callable, or window specification
The window function to determine the shape of the "local" distribution.
Any window specification supported by `filters.get_window` will work here.
.. note:: Certain windows (e.g., 'hann') are identically 0 at the boundaries,
so and effectively have `width-2` non-zero values. You may have to expand
`width` to get the desired behavior.
wrap : bool
If `True`, then state locality `|i - j|` is computed modulo `n_states`.
If `False` (default), then locality is absolute.
See Also
--------
filters.get_window
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
Triangular distributions with and without wrapping
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=False)
array([[0.667, 0.333, 0. , 0. , 0. ],
[0.25 , 0.5 , 0.25 , 0. , 0. ],
[0. , 0.25 , 0.5 , 0.25 , 0. ],
[0. , 0. , 0.25 , 0.5 , 0.25 ],
[0. , 0. , 0. , 0.333, 0.667]])
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=True)
array([[0.5 , 0.25, 0. , 0. , 0.25],
[0.25, 0.5 , 0.25, 0. , 0. ],
[0. , 0.25, 0.5 , 0.25, 0. ],
[0. , 0. , 0.25, 0.5 , 0.25],
[0.25, 0. , 0. , 0.25, 0.5 ]])
Uniform local distributions with variable widths and no wrapping
>>> librosa.sequence.transition_local(5, [1, 2, 3, 3, 1], window='ones', wrap=False)
array([[1. , 0. , 0. , 0. , 0. ],
[0.5 , 0.5 , 0. , 0. , 0. ],
[0. , 0.333, 0.333, 0.333, 0. ],
[0. , 0. , 0.333, 0.333, 0.333],
[0. , 0. , 0. , 0. , 1. ]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
width = np.asarray(width, dtype=int)
if width.ndim == 0:
width = np.tile(width, n_states)
if width.shape != (n_states,):
raise ParameterError('width={} must have length equal to n_states={}'.format(width, n_states))
if np.any(width < 1):
raise ParameterError('width={} must be at least 1')
transition = np.zeros((n_states, n_states), dtype=np.float)
# Fill in the widths. This is inefficient, but simple
for i, width_i in enumerate(width):
trans_row = pad_center(get_window(window, width_i, fftbins=False), n_states)
trans_row = np.roll(trans_row, n_states//2 + i + 1)
if not wrap:
# Knock out the off-diagonal-band elements
trans_row[min(n_states, i + width_i//2 + 1):] = 0
trans_row[:max(0, i - width_i//2)] = 0
transition[i] = trans_row
# Row-normalize
transition /= transition.sum(axis=1, keepdims=True)
return transition | python | {
"resource": ""
} |
q267919 | onset_detect | test | def onset_detect(y=None, sr=22050, onset_envelope=None, hop_length=512,
backtrack=False, energy=None,
units='frames', **kwargs):
"""Basic onset detector. Locate note onset events by picking peaks in an
onset strength envelope.
The `peak_pick` parameters were chosen by large-scale hyper-parameter
optimization over the dataset provided by [1]_.
.. [1] https://github.com/CPJKU/onset_db
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(m,)]
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length (in samples)
units : {'frames', 'samples', 'time'}
The units to encode detected onset events in.
By default, 'frames' are used.
backtrack : bool
If `True`, detected onset events are backtracked to the nearest
preceding minimum of `energy`.
This is primarily useful when using onsets as slice points for segmentation.
energy : np.ndarray [shape=(m,)] (optional)
An energy function to use for backtracking detected onset events.
If none is provided, then `onset_envelope` is used.
kwargs : additional keyword arguments
Additional parameters for peak picking.
See `librosa.util.peak_pick` for details.
Returns
-------
onsets : np.ndarray [shape=(n_onsets,)]
estimated positions of detected onsets, in whichever units
are specified. By default, frame indices.
.. note::
If no onset strength could be detected, onset_detect returns
an empty list.
Raises
------
ParameterError
if neither `y` nor `onsets` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
onset_strength : compute onset strength per-frame
onset_backtrack : backtracking onset events
librosa.util.peak_pick : pick peaks from a time series
Examples
--------
Get onset times from a signal
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> onset_frames = librosa.onset.onset_detect(y=y, sr=sr)
>>> librosa.frames_to_time(onset_frames, sr=sr)
array([ 0.07 , 0.395, 0.511, 0.627, 0.766, 0.975,
1.207, 1.324, 1.44 , 1.788, 1.881])
Or use a pre-computed onset envelope
>>> o_env = librosa.onset.onset_strength(y, sr=sr)
>>> times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr)
>>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
>>> import matplotlib.pyplot as plt
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... x_axis='time', y_axis='log')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, o_env, label='Onset strength')
>>> plt.vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,
... linestyle='--', label='Onsets')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, framealpha=0.75)
"""
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
# Shift onset envelope up to be non-negative
# (a common normalization step to make the threshold more consistent)
onset_envelope -= onset_envelope.min()
# Do we have any onsets to grab?
if not onset_envelope.any():
return np.array([], dtype=np.int)
# Normalize onset strength function to [0, 1] range
onset_envelope /= onset_envelope.max()
# These parameter settings found by large-scale search
kwargs.setdefault('pre_max', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('post_max', 0.00*sr//hop_length + 1) # 0ms
kwargs.setdefault('pre_avg', 0.10*sr//hop_length) # 100ms
kwargs.setdefault('post_avg', 0.10*sr//hop_length + 1) # 100ms
kwargs.setdefault('wait', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('delta', 0.07)
# Peak pick the onset envelope
onsets = util.peak_pick(onset_envelope, **kwargs)
# Optionally backtrack the events
if backtrack:
if energy is None:
energy = onset_envelope
onsets = onset_backtrack(onsets, energy)
if units == 'frames':
pass
elif units == 'samples':
onsets = core.frames_to_samples(onsets, hop_length=hop_length)
elif units == 'time':
onsets = core.frames_to_time(onsets, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return onsets | python | {
"resource": ""
} |
q267920 | onset_strength | test | def onset_strength(y=None, sr=22050, S=None, lag=1, max_size=1,
ref=None,
detrend=False, center=True,
feature=None, aggregate=None,
centering=None,
**kwargs):
"""Compute a spectral flux onset strength envelope.
Onset strength at time `t` is determined by:
`mean_f max(0, S[f, t] - ref[f, t - lag])`
where `ref` is `S` after local max filtering along the frequency
axis [1]_.
By default, if a time series `y` is provided, S will be the
log-power Mel spectrogram.
.. [1] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(m,)]
vector containing the onset strength envelope
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
or if `lag` or `max_size` are not positive integers
See Also
--------
onset_detect
onset_strength_multi
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> times = librosa.frames_to_time(np.arange(D.shape[1]))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
Construct a standard onset function
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,
... label='Mean (mel)')
Median aggregation, and custom mel options
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... aggregate=np.median,
... fmax=8000, n_mels=256)
>>> plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
... label='Median (custom mel)')
Constant-Q spectrogram instead of Mel
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... feature=librosa.cqt)
>>> plt.plot(times, onset_env / onset_env.max(), alpha=0.8,
... label='Mean (CQT)')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.ylabel('Normalized strength')
>>> plt.yticks([])
>>> plt.axis('tight')
>>> plt.tight_layout()
"""
if aggregate is False:
raise ParameterError('aggregate={} cannot be False when computing full-spectrum onset strength.')
odf_all = onset_strength_multi(y=y,
sr=sr,
S=S,
lag=lag,
max_size=max_size,
ref=ref,
detrend=detrend,
center=center,
feature=feature,
aggregate=aggregate,
channels=None,
**kwargs)
return odf_all[0] | python | {
"resource": ""
} |
q267921 | onset_backtrack | test | def onset_backtrack(events, energy):
'''Backtrack detected onset events to the nearest preceding local
minimum of an energy function.
This function can be used to roll back the timing of detected onsets
from a detected peak amplitude to the preceding minimum.
This is most useful when using onsets to determine slice points for
segmentation, as described by [1]_.
.. [1] Jehan, Tristan.
"Creating music by listening"
Doctoral dissertation
Massachusetts Institute of Technology, 2005.
Parameters
----------
events : np.ndarray, dtype=int
List of onset event frame indices, as computed by `onset_detect`
energy : np.ndarray, shape=(m,)
An energy function
Returns
-------
events_backtracked : np.ndarray, shape=events.shape
The input events matched to nearest preceding minima of `energy`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Detect events without backtracking
>>> onset_raw = librosa.onset.onset_detect(onset_envelope=oenv,
... backtrack=False)
>>> # Backtrack the events using the onset envelope
>>> onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
>>> # Backtrack the events using the RMS values
>>> rms = librosa.feature.rms(S=np.abs(librosa.stft(y=y)))
>>> onset_bt_rms = librosa.onset.onset_backtrack(onset_raw, rms[0])
>>> # Plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
>>> plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(2,1,2)
>>> plt.plot(rms[0], label='RMS')
>>> plt.vlines(onset_bt_rms, 0, rms.max(), label='Backtracked (RMS)', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
'''
# Find points where energy is non-increasing
# all points: energy[i] <= energy[i-1]
# tail points: energy[i] < energy[i+1]
minima = np.flatnonzero((energy[1:-1] <= energy[:-2]) &
(energy[1:-1] < energy[2:]))
# Pad on a 0, just in case we have onsets with no preceding minimum
# Shift by one to account for slicing in minima detection
minima = util.fix_frames(1 + minima, x_min=0)
# Only match going left from the detected events
return minima[util.match_events(events, minima, right=False)] | python | {
"resource": ""
} |
q267922 | onset_strength_multi | test | def onset_strength_multi(y=None, sr=22050, S=None, lag=1, max_size=1,
ref=None, detrend=False, center=True, feature=None,
aggregate=None, channels=None, **kwargs):
"""Compute a spectral flux onset strength envelope across multiple channels.
Onset strength for channel `i` at time `t` is determined by:
`mean_{f in channels[i]} max(0, S[f, t+1] - S[f, t])`
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function or False
Aggregation function to use when combining onsets
at different frequency bins.
If `False`, then no aggregation is performed.
Default: `np.mean`
channels : list or None
Array of channel boundaries or slice objects.
If `None`, then a single channel is generated to span all bands.
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(n_channels, m)]
array containing the onset strength envelope for each specified channel
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
See Also
--------
onset_strength
Notes
-----
This function caches at level 30.
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log')
>>> plt.title('Power spectrogram')
Construct a standard onset function over four sub-bands
>>> onset_subbands = librosa.onset.onset_strength_multi(y=y, sr=sr,
... channels=[0, 32, 64, 96, 128])
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(onset_subbands, x_axis='time')
>>> plt.ylabel('Sub-bands')
>>> plt.title('Sub-band onset strength')
"""
if feature is None:
feature = melspectrogram
kwargs.setdefault('fmax', 11025.0)
if aggregate is None:
aggregate = np.mean
if lag < 1 or not isinstance(lag, int):
raise ParameterError('lag must be a positive integer')
if max_size < 1 or not isinstance(max_size, int):
raise ParameterError('max_size must be a positive integer')
# First, compute mel spectrogram
if S is None:
S = np.abs(feature(y=y, sr=sr, **kwargs))
# Convert to dBs
S = core.power_to_db(S)
# Retrieve the n_fft and hop_length,
# or default values for onsets if not provided
n_fft = kwargs.get('n_fft', 2048)
hop_length = kwargs.get('hop_length', 512)
# Ensure that S is at least 2-d
S = np.atleast_2d(S)
# Compute the reference spectrogram.
# Efficiency hack: skip filtering step and pass by reference
# if max_size will produce a no-op.
if ref is None:
if max_size == 1:
ref = S
else:
ref = scipy.ndimage.maximum_filter1d(S, max_size, axis=0)
elif ref.shape != S.shape:
raise ParameterError('Reference spectrum shape {} must match input spectrum {}'.format(ref.shape, S.shape))
# Compute difference to the reference, spaced by lag
onset_env = S[:, lag:] - ref[:, :-lag]
# Discard negatives (decreasing amplitude)
onset_env = np.maximum(0.0, onset_env)
# Aggregate within channels
pad = True
if channels is None:
channels = [slice(None)]
else:
pad = False
if aggregate:
onset_env = util.sync(onset_env, channels,
aggregate=aggregate,
pad=pad, axis=0)
# compensate for lag
pad_width = lag
if center:
# Counter-act framing effects. Shift the onsets by n_fft / hop_length
pad_width += n_fft // (2 * hop_length)
onset_env = np.pad(onset_env, ([0, 0], [int(pad_width), 0]),
mode='constant')
# remove the DC component
if detrend:
onset_env = scipy.signal.lfilter([1.0, -1.0], [1.0, -0.99],
onset_env, axis=-1)
# Trim to match the input duration
if center:
onset_env = onset_env[:, :S.shape[1]]
return onset_env | python | {
"resource": ""
} |
q267923 | times_csv | test | def times_csv(path, times, annotations=None, delimiter=',', fmt='%0.3f'):
r"""Save time steps as in CSV format. This can be used to store the output
of a beat-tracker or segmentation algorithm.
If only `times` are provided, the file will contain each value
of `times` on a row::
times[0]\n
times[1]\n
times[2]\n
...
If `annotations` are also provided, the file will contain
delimiter-separated values::
times[0],annotations[0]\n
times[1],annotations[1]\n
times[2],annotations[2]\n
...
Parameters
----------
path : string
path to save the output CSV file
times : list-like of floats
list of frame numbers for beat events
annotations : None or list-like
optional annotations for each time step
delimiter : str
character to separate fields
fmt : str
format-string for rendering time
Raises
------
ParameterError
if `annotations` is not `None` and length does not
match `times`
Examples
--------
Write beat-tracker time to CSV
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y, sr=sr, units='time')
>>> librosa.output.times_csv('beat_times.csv', beats)
"""
if annotations is not None and len(annotations) != len(times):
raise ParameterError('len(annotations) != len(times)')
with open(path, 'w') as output_file:
writer = csv.writer(output_file, delimiter=delimiter)
if annotations is None:
for t in times:
writer.writerow([fmt % t])
else:
for t, lab in zip(times, annotations):
writer.writerow([(fmt % t), lab]) | python | {
"resource": ""
} |
q267924 | write_wav | test | def write_wav(path, y, sr, norm=False):
"""Output a time series as a .wav file
Note: only mono or stereo, floating-point data is supported.
For more advanced and flexible output options, refer to
`soundfile`.
Parameters
----------
path : str
path to save the output wav file
y : np.ndarray [shape=(n,) or (2,n), dtype=np.float]
audio time series (mono or stereo).
Note that only floating-point values are supported.
sr : int > 0 [scalar]
sampling rate of `y`
norm : boolean [scalar]
enable amplitude normalization.
For floating point `y`, scale the data to the range [-1, +1].
Examples
--------
Trim a signal to 5 seconds and save it back
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=5.0)
>>> librosa.output.write_wav('file_trim_5s.wav', y, sr)
See Also
--------
soundfile.write
"""
# Validate the buffer. Stereo is okay here.
util.valid_audio(y, mono=False)
# normalize
if norm and np.issubdtype(y.dtype, np.floating):
wav = util.normalize(y, norm=np.inf, axis=None)
else:
wav = y
# Check for stereo
if wav.ndim > 1 and wav.shape[0] == 2:
wav = wav.T
# Save
scipy.io.wavfile.write(path, sr, wav) | python | {
"resource": ""
} |
q267925 | cmap | test | def cmap(data, robust=True, cmap_seq='magma', cmap_bool='gray_r', cmap_div='coolwarm'):
'''Get a default colormap from the given data.
If the data is boolean, use a black and white colormap.
If the data has both positive and negative values,
use a diverging colormap.
Otherwise, use a sequential colormap.
Parameters
----------
data : np.ndarray
Input data
robust : bool
If True, discard the top and bottom 2% of data when calculating
range.
cmap_seq : str
The sequential colormap name
cmap_bool : str
The boolean colormap name
cmap_div : str
The diverging colormap name
Returns
-------
cmap : matplotlib.colors.Colormap
The colormap to use for `data`
See Also
--------
matplotlib.pyplot.colormaps
'''
data = np.atleast_1d(data)
if data.dtype == 'bool':
return get_cmap(cmap_bool)
data = data[np.isfinite(data)]
if robust:
min_p, max_p = 2, 98
else:
min_p, max_p = 0, 100
max_val = np.percentile(data, max_p)
min_val = np.percentile(data, min_p)
if min_val >= 0 or max_val <= 0:
return get_cmap(cmap_seq)
return get_cmap(cmap_div) | python | {
"resource": ""
} |
q267926 | waveplot | test | def waveplot(y, sr=22050, max_points=5e4, x_axis='time', offset=0.0,
max_sr=1000, ax=None, **kwargs):
'''Plot the amplitude envelope of a waveform.
If `y` is monophonic, a filled curve is drawn between `[-abs(y), abs(y)]`.
If `y` is stereo, the curve is drawn between `[-abs(y[1]), abs(y[0])]`,
so that the left and right channels are drawn above and below the axis,
respectively.
Long signals (`duration >= max_points`) are down-sampled to at
most `max_sr` before plotting.
Parameters
----------
y : np.ndarray [shape=(n,) or (2,n)]
audio time series (mono or stereo)
sr : number > 0 [scalar]
sampling rate of `y`
max_points : postive number or None
Maximum number of time-points to plot: if `max_points` exceeds
the duration of `y`, then `y` is downsampled.
If `None`, no downsampling is performed.
x_axis : str {'time', 'off', 'none'} or None
If 'time', the x-axis is given time tick-marks.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
offset : float
Horizontal offset (in seconds) to start the waveform plot
max_sr : number > 0 [scalar]
Maximum sampling rate for the visualization
kwargs
Additional keyword arguments to `matplotlib.pyplot.fill_between`
Returns
-------
pc : matplotlib.collections.PolyCollection
The PolyCollection created by `fill_between`.
See also
--------
librosa.core.resample
matplotlib.pyplot.fill_between
Examples
--------
Plot a monophonic waveform
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Monophonic')
Or a stereo waveform
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... mono=False, duration=10)
>>> plt.subplot(3, 1, 2)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Stereo')
Or harmonic and percussive components with transparency
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> y_harm, y_perc = librosa.effects.hpss(y)
>>> plt.subplot(3, 1, 3)
>>> librosa.display.waveplot(y_harm, sr=sr, alpha=0.25)
>>> librosa.display.waveplot(y_perc, sr=sr, color='r', alpha=0.5)
>>> plt.title('Harmonic + Percussive')
>>> plt.tight_layout()
'''
util.valid_audio(y, mono=False)
if not (isinstance(max_sr, int) and max_sr > 0):
raise ParameterError('max_sr must be a non-negative integer')
target_sr = sr
hop_length = 1
if max_points is not None:
if max_points <= 0:
raise ParameterError('max_points must be strictly positive')
if max_points < y.shape[-1]:
target_sr = min(max_sr, (sr * y.shape[-1]) // max_points)
hop_length = sr // target_sr
if y.ndim == 1:
y = __envelope(y, hop_length)
else:
y = np.vstack([__envelope(_, hop_length) for _ in y])
if y.ndim > 1:
y_top = y[0]
y_bottom = -y[1]
else:
y_top = y
y_bottom = -y
axes = __check_axes(ax)
kwargs.setdefault('color', next(axes._get_lines.prop_cycler)['color'])
locs = offset + core.frames_to_time(np.arange(len(y_top)),
sr=sr,
hop_length=hop_length)
out = axes.fill_between(locs, y_bottom, y_top, **kwargs)
axes.set_xlim([locs.min(), locs.max()])
if x_axis == 'time':
axes.xaxis.set_major_formatter(TimeFormatter(lag=False))
axes.xaxis.set_label_text('Time')
elif x_axis is None or x_axis in ['off', 'none']:
axes.set_xticks([])
else:
raise ParameterError('Unknown x_axis value: {}'.format(x_axis))
return out | python | {
"resource": ""
} |
q267927 | __set_current_image | test | def __set_current_image(ax, img):
'''Helper to set the current image in pyplot mode.
If the provided `ax` is not `None`, then we assume that the user is using the object API.
In this case, the pyplot current image is not set.
'''
if ax is None:
import matplotlib.pyplot as plt
plt.sci(img) | python | {
"resource": ""
} |
q267928 | __mesh_coords | test | def __mesh_coords(ax_type, coords, n, **kwargs):
'''Compute axis coordinates'''
if coords is not None:
if len(coords) < n:
raise ParameterError('Coordinate shape mismatch: '
'{}<{}'.format(len(coords), n))
return coords
coord_map = {'linear': __coord_fft_hz,
'hz': __coord_fft_hz,
'log': __coord_fft_hz,
'mel': __coord_mel_hz,
'cqt': __coord_cqt_hz,
'cqt_hz': __coord_cqt_hz,
'cqt_note': __coord_cqt_hz,
'chroma': __coord_chroma,
'time': __coord_time,
's': __coord_time,
'ms': __coord_time,
'lag': __coord_time,
'lag_s': __coord_time,
'lag_ms': __coord_time,
'tonnetz': __coord_n,
'off': __coord_n,
'tempo': __coord_tempo,
'frames': __coord_n,
None: __coord_n}
if ax_type not in coord_map:
raise ParameterError('Unknown axis type: {}'.format(ax_type))
return coord_map[ax_type](n, **kwargs) | python | {
"resource": ""
} |
q267929 | __check_axes | test | def __check_axes(axes):
'''Check if "axes" is an instance of an axis object. If not, use `gca`.'''
if axes is None:
import matplotlib.pyplot as plt
axes = plt.gca()
elif not isinstance(axes, Axes):
raise ValueError("`axes` must be an instance of matplotlib.axes.Axes. "
"Found type(axes)={}".format(type(axes)))
return axes | python | {
"resource": ""
} |
q267930 | __scale_axes | test | def __scale_axes(axes, ax_type, which):
'''Set the axis scaling'''
kwargs = dict()
if which == 'x':
thresh = 'linthreshx'
base = 'basex'
scale = 'linscalex'
scaler = axes.set_xscale
limit = axes.set_xlim
else:
thresh = 'linthreshy'
base = 'basey'
scale = 'linscaley'
scaler = axes.set_yscale
limit = axes.set_ylim
# Map ticker scales
if ax_type == 'mel':
mode = 'symlog'
kwargs[thresh] = 1000.0
kwargs[base] = 2
elif ax_type == 'log':
mode = 'symlog'
kwargs[base] = 2
kwargs[thresh] = core.note_to_hz('C2')
kwargs[scale] = 0.5
elif ax_type in ['cqt', 'cqt_hz', 'cqt_note']:
mode = 'log'
kwargs[base] = 2
elif ax_type == 'tempo':
mode = 'log'
kwargs[base] = 2
limit(16, 480)
else:
return
scaler(mode, **kwargs) | python | {
"resource": ""
} |
q267931 | __coord_fft_hz | test | def __coord_fft_hz(n, sr=22050, **_kwargs):
'''Get the frequencies for FFT bins'''
n_fft = 2 * (n - 1)
# The following code centers the FFT bins at their frequencies
# and clips to the non-negative frequency range [0, nyquist]
basis = core.fft_frequencies(sr=sr, n_fft=n_fft)
fmax = basis[-1]
basis -= 0.5 * (basis[1] - basis[0])
basis = np.append(np.maximum(0, basis), [fmax])
return basis | python | {
"resource": ""
} |
q267932 | __coord_mel_hz | test | def __coord_mel_hz(n, fmin=0, fmax=11025.0, **_kwargs):
'''Get the frequencies for Mel bins'''
if fmin is None:
fmin = 0
if fmax is None:
fmax = 11025.0
basis = core.mel_frequencies(n, fmin=fmin, fmax=fmax)
basis[1:] -= 0.5 * np.diff(basis)
basis = np.append(np.maximum(0, basis), [fmax])
return basis | python | {
"resource": ""
} |
q267933 | __coord_cqt_hz | test | def __coord_cqt_hz(n, fmin=None, bins_per_octave=12, **_kwargs):
'''Get CQT bin frequencies'''
if fmin is None:
fmin = core.note_to_hz('C1')
# we drop by half a bin so that CQT bins are centered vertically
return core.cqt_frequencies(n+1,
fmin=fmin / 2.0**(0.5/bins_per_octave),
bins_per_octave=bins_per_octave) | python | {
"resource": ""
} |
q267934 | __coord_chroma | test | def __coord_chroma(n, bins_per_octave=12, **_kwargs):
'''Get chroma bin numbers'''
return np.linspace(0, (12.0 * n) / bins_per_octave, num=n+1, endpoint=True) | python | {
"resource": ""
} |
q267935 | __coord_time | test | def __coord_time(n, sr=22050, hop_length=512, **_kwargs):
'''Get time coordinates from frames'''
return core.frames_to_time(np.arange(n+1), sr=sr, hop_length=hop_length) | python | {
"resource": ""
} |
q267936 | estimate_tuning | test | def estimate_tuning(y=None, sr=22050, S=None, n_fft=2048,
resolution=0.01, bins_per_octave=12, **kwargs):
'''Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
'''
pitch, mag = piptrack(y=y, sr=sr, S=S, n_fft=n_fft, **kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return pitch_tuning(pitch[(mag >= threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave) | python | {
"resource": ""
} |
q267937 | piptrack | test | def piptrack(y=None, sr=22050, S=None, n_fft=2048, hop_length=None,
fmin=150.0, fmax=4000.0, threshold=0.1,
win_length=None, window='hann', center=True, pad_mode='reflect',
ref=None):
'''Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
'''
# Check that we received an audio time series or STFT
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
# Make sure we're dealing with magnitudes
S = np.abs(S)
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
fft_freqs = time_frequency.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (np.abs(shift) < util.tiny(shift)))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode='constant')
shift = np.pad(shift, ([1, 1], [0, 0]), mode='constant')
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
if ref is None:
ref = np.max
if six.callable(ref):
ref_value = threshold * ref(S, axis=0)
else:
ref_value = np.abs(ref)
idx = np.argwhere(freq_mask & util.localmax(S * (S > ref_value)))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = ((idx[:, 0] + shift[idx[:, 0], idx[:, 1]])
* float(sr) / n_fft)
mags[idx[:, 0], idx[:, 1]] = (S[idx[:, 0], idx[:, 1]]
+ dskew[idx[:, 0], idx[:, 1]])
return pitches, mags | python | {
"resource": ""
} |
q267938 | hpss | test | def hpss(y, **kwargs):
'''Decompose an audio time series into harmonic and percussive components.
This function automates the STFT->HPSS->ISTFT pipeline, and ensures that
the output waveforms have equal length to the input waveform `y`.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of the harmonic elements
y_percussive : np.ndarray [shape=(n,)]
audio time series of the percussive elements
See Also
--------
harmonic : Extract only the harmonic component
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS on spectrograms
Examples
--------
>>> # Extract harmonic and percussive components
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic, y_percussive = librosa.effects.hpss(y)
>>> # Get a more isolated percussive component by widening its margin
>>> y_harmonic, y_percussive = librosa.effects.hpss(y, margin=(1.0,5.0))
'''
# Compute the STFT matrix
stft = core.stft(y)
# Decompose into harmonic and percussives
stft_harm, stft_perc = decompose.hpss(stft, **kwargs)
# Invert the STFTs. Adjust length to match the input.
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y))
return y_harm, y_perc | python | {
"resource": ""
} |
q267939 | harmonic | test | def harmonic(y, **kwargs):
'''Extract harmonic elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of just the harmonic portion
See Also
--------
hpss : Separate harmonic and percussive components
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract harmonic component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic = librosa.effects.harmonic(y)
>>> # Use a margin > 1.0 for greater harmonic separation
>>> y_harmonic = librosa.effects.harmonic(y, margin=3.0)
'''
# Compute the STFT matrix
stft = core.stft(y)
# Remove percussives
stft_harm = decompose.hpss(stft, **kwargs)[0]
# Invert the STFTs
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
return y_harm | python | {
"resource": ""
} |
q267940 | percussive | test | def percussive(y, **kwargs):
'''Extract percussive elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_percussive : np.ndarray [shape=(n,)]
audio time series of just the percussive portion
See Also
--------
hpss : Separate harmonic and percussive components
harmonic : Extract only the harmonic component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract percussive component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_percussive = librosa.effects.percussive(y)
>>> # Use a margin > 1.0 for greater percussive separation
>>> y_percussive = librosa.effects.percussive(y, margin=3.0)
'''
# Compute the STFT matrix
stft = core.stft(y)
# Remove harmonics
stft_perc = decompose.hpss(stft, **kwargs)[1]
# Invert the STFT
y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y))
return y_perc | python | {
"resource": ""
} |
q267941 | time_stretch | test | def time_stretch(y, rate):
'''Time-stretch an audio series by a fixed rate.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
rate : float > 0 [scalar]
Stretch factor. If `rate > 1`, then the signal is sped up.
If `rate < 1`, then the signal is slowed down.
Returns
-------
y_stretch : np.ndarray [shape=(rate * n,)]
audio time series stretched by the specified rate
See Also
--------
pitch_shift : pitch shifting
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Compress to be twice as fast
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_fast = librosa.effects.time_stretch(y, 2.0)
Or half the original speed
>>> y_slow = librosa.effects.time_stretch(y, 0.5)
'''
if rate <= 0:
raise ParameterError('rate must be a positive number')
# Construct the stft
stft = core.stft(y)
# Stretch by phase vocoding
stft_stretch = core.phase_vocoder(stft, rate)
# Invert the stft
y_stretch = core.istft(stft_stretch, dtype=y.dtype)
return y_stretch | python | {
"resource": ""
} |
q267942 | pitch_shift | test | def pitch_shift(y, sr, n_steps, bins_per_octave=12, res_type='kaiser_best'):
'''Pitch-shift the waveform by `n_steps` half-steps.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
audio sampling rate of `y`
n_steps : float [scalar]
how many (fractional) half-steps to shift `y`
bins_per_octave : float > 0 [scalar]
how many steps per octave
res_type : string
Resample type.
Possible options: 'kaiser_best', 'kaiser_fast', and 'scipy', 'polyphase',
'fft'.
By default, 'kaiser_best' is used.
See `core.resample` for more information.
Returns
-------
y_shift : np.ndarray [shape=(n,)]
The pitch-shifted audio time-series
See Also
--------
time_stretch : time stretching
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Shift up by a major third (four half-steps)
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_third = librosa.effects.pitch_shift(y, sr, n_steps=4)
Shift down by a tritone (six half-steps)
>>> y_tritone = librosa.effects.pitch_shift(y, sr, n_steps=-6)
Shift up by 3 quarter-tones
>>> y_three_qt = librosa.effects.pitch_shift(y, sr, n_steps=3,
... bins_per_octave=24)
'''
if bins_per_octave < 1 or not np.issubdtype(type(bins_per_octave), np.integer):
raise ParameterError('bins_per_octave must be a positive integer.')
rate = 2.0 ** (-float(n_steps) / bins_per_octave)
# Stretch in time, then resample
y_shift = core.resample(time_stretch(y, rate), float(sr) / rate, sr,
res_type=res_type)
# Crop to the same dimension as the input
return util.fix_length(y_shift, len(y)) | python | {
"resource": ""
} |
q267943 | remix | test | def remix(y, intervals, align_zeros=True):
'''Remix an audio signal by re-ordering time intervals.
Parameters
----------
y : np.ndarray [shape=(t,) or (2, t)]
Audio time series
intervals : iterable of tuples (start, end)
An iterable (list-like or generator) where the `i`th item
`intervals[i]` indicates the start and end (in samples)
of a slice of `y`.
align_zeros : boolean
If `True`, interval boundaries are mapped to the closest
zero-crossing in `y`. If `y` is stereo, zero-crossings
are computed after converting to mono.
Returns
-------
y_remix : np.ndarray [shape=(d,) or (2, d)]
`y` remixed in the order specified by `intervals`
Examples
--------
Load in the example track and reverse the beats
>>> y, sr = librosa.load(librosa.util.example_audio_file())
Compute beats
>>> _, beat_frames = librosa.beat.beat_track(y=y, sr=sr,
... hop_length=512)
Convert from frames to sample indices
>>> beat_samples = librosa.frames_to_samples(beat_frames)
Generate intervals from consecutive events
>>> intervals = librosa.util.frame(beat_samples, frame_length=2,
... hop_length=1).T
Reverse the beat intervals
>>> y_out = librosa.effects.remix(y, intervals[::-1])
'''
# Validate the audio buffer
util.valid_audio(y, mono=False)
y_out = []
if align_zeros:
y_mono = core.to_mono(y)
zeros = np.nonzero(core.zero_crossings(y_mono))[-1]
# Force end-of-signal onto zeros
zeros = np.append(zeros, [len(y_mono)])
clip = [slice(None)] * y.ndim
for interval in intervals:
if align_zeros:
interval = zeros[util.match_events(interval, zeros)]
clip[-1] = slice(interval[0], interval[1])
y_out.append(y[tuple(clip)])
return np.concatenate(y_out, axis=-1) | python | {
"resource": ""
} |
q267944 | _signal_to_frame_nonsilent | test | def _signal_to_frame_nonsilent(y, frame_length=2048, hop_length=512, top_db=60,
ref=np.max):
'''Frame-wise non-silent indicator for audio input.
This is a helper function for `trim` and `split`.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, mono or stereo
frame_length : int > 0
The number of samples per frame
hop_length : int > 0
The number of samples between frames
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : callable or float
The reference power
Returns
-------
non_silent : np.ndarray, shape=(m,), dtype=bool
Indicator of non-silent frames
'''
# Convert to mono
y_mono = core.to_mono(y)
# Compute the MSE for the signal
mse = feature.rms(y=y_mono,
frame_length=frame_length,
hop_length=hop_length)**2
return (core.power_to_db(mse.squeeze(),
ref=ref,
top_db=None) > - top_db) | python | {
"resource": ""
} |
q267945 | trim | test | def trim(y, top_db=60, ref=np.max, frame_length=2048, hop_length=512):
'''Trim leading and trailing silence from an audio signal.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, can be mono or stereo
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
y_trimmed : np.ndarray, shape=(m,) or (2, m)
The trimmed signal
index : np.ndarray, shape=(2,)
the interval of `y` corresponding to the non-silent region:
`y_trimmed = y[index[0]:index[1]]` (for mono) or
`y_trimmed = y[:, index[0]:index[1]]` (for stereo).
Examples
--------
>>> # Load some audio
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Trim the beginning and ending silence
>>> yt, index = librosa.effects.trim(y)
>>> # Print the durations
>>> print(librosa.get_duration(y), librosa.get_duration(yt))
61.45886621315193 60.58086167800454
'''
non_silent = _signal_to_frame_nonsilent(y,
frame_length=frame_length,
hop_length=hop_length,
ref=ref,
top_db=top_db)
nonzero = np.flatnonzero(non_silent)
if nonzero.size > 0:
# Compute the start and end positions
# End position goes one frame past the last non-zero
start = int(core.frames_to_samples(nonzero[0], hop_length))
end = min(y.shape[-1],
int(core.frames_to_samples(nonzero[-1] + 1, hop_length)))
else:
# The signal only contains zeros
start, end = 0, 0
# Build the mono/stereo index
full_index = [slice(None)] * y.ndim
full_index[-1] = slice(start, end)
return y[tuple(full_index)], np.asarray([start, end]) | python | {
"resource": ""
} |
q267946 | split | test | def split(y, top_db=60, ref=np.max, frame_length=2048, hop_length=512):
'''Split an audio signal into non-silent intervals.
Parameters
----------
y : np.ndarray, shape=(n,) or (2, n)
An audio signal
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
intervals : np.ndarray, shape=(m, 2)
`intervals[i] == (start_i, end_i)` are the start and end time
(in samples) of non-silent interval `i`.
'''
non_silent = _signal_to_frame_nonsilent(y,
frame_length=frame_length,
hop_length=hop_length,
ref=ref,
top_db=top_db)
# Interval slicing, adapted from
# https://stackoverflow.com/questions/2619413/efficiently-finding-the-interval-with-non-zeros-in-scipy-numpy-in-python
# Find points where the sign flips
edges = np.flatnonzero(np.diff(non_silent.astype(int)))
# Pad back the sample lost in the diff
edges = [edges + 1]
# If the first frame had high energy, count it
if non_silent[0]:
edges.insert(0, [0])
# Likewise for the last frame
if non_silent[-1]:
edges.append([len(non_silent)])
# Convert from frames to samples
edges = core.frames_to_samples(np.concatenate(edges),
hop_length=hop_length)
# Clip to the signal duration
edges = np.minimum(edges, y.shape[-1])
# Stack the results back as an ndarray
return edges.reshape((-1, 2)) | python | {
"resource": ""
} |
q267947 | phase_vocoder | test | def phase_vocoder(D, rate, hop_length=None):
"""Phase vocoder. Given an STFT matrix D, speed up by a factor of `rate`
Based on the implementation provided by [1]_.
.. [1] Ellis, D. P. W. "A phase vocoder in Matlab."
Columbia University, 2002.
http://www.ee.columbia.edu/~dpwe/resources/matlab/pvoc/
Examples
--------
>>> # Play at double speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_fast = librosa.phase_vocoder(D, 2.0, hop_length=512)
>>> y_fast = librosa.istft(D_fast, hop_length=512)
>>> # Or play at 1/3 speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_slow = librosa.phase_vocoder(D, 1./3, hop_length=512)
>>> y_slow = librosa.istft(D_slow, hop_length=512)
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
STFT matrix
rate : float > 0 [scalar]
Speed-up factor: `rate > 1` is faster, `rate < 1` is slower.
hop_length : int > 0 [scalar] or None
The number of samples between successive columns of `D`.
If None, defaults to `n_fft/4 = (D.shape[0]-1)/2`
Returns
-------
D_stretched : np.ndarray [shape=(d, t / rate), dtype=complex]
time-stretched STFT
"""
n_fft = 2 * (D.shape[0] - 1)
if hop_length is None:
hop_length = int(n_fft // 4)
time_steps = np.arange(0, D.shape[1], rate, dtype=np.float)
# Create an empty output array
d_stretch = np.zeros((D.shape[0], len(time_steps)), D.dtype, order='F')
# Expected phase advance in each bin
phi_advance = np.linspace(0, np.pi * hop_length, D.shape[0])
# Phase accumulator; initialize to the first sample
phase_acc = np.angle(D[:, 0])
# Pad 0 columns to simplify boundary logic
D = np.pad(D, [(0, 0), (0, 2)], mode='constant')
for (t, step) in enumerate(time_steps):
columns = D[:, int(step):int(step + 2)]
# Weighting for linear magnitude interpolation
alpha = np.mod(step, 1.0)
mag = ((1.0 - alpha) * np.abs(columns[:, 0])
+ alpha * np.abs(columns[:, 1]))
# Store to output array
d_stretch[:, t] = mag * np.exp(1.j * phase_acc)
# Compute phase advance
dphase = (np.angle(columns[:, 1])
- np.angle(columns[:, 0])
- phi_advance)
# Wrap to -pi:pi range
dphase = dphase - 2.0 * np.pi * np.round(dphase / (2.0 * np.pi))
# Accumulate phase
phase_acc += phi_advance + dphase
return d_stretch | python | {
"resource": ""
} |
q267948 | amplitude_to_db | test | def amplitude_to_db(S, ref=1.0, amin=1e-5, top_db=80.0):
'''Convert an amplitude spectrogram to dB-scaled spectrogram.
This is equivalent to ``power_to_db(S**2)``, but is provided for convenience.
Parameters
----------
S : np.ndarray
input amplitude
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`20 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `S` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(20 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S`` measured in dB
See Also
--------
power_to_db, db_to_amplitude
Notes
-----
This function caches at level 30.
'''
S = np.asarray(S)
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('amplitude_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call amplitude_to_db(np.abs(S)) instead.')
magnitude = np.abs(S)
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
power = np.square(magnitude, out=magnitude)
return power_to_db(power, ref=ref_value**2, amin=amin**2,
top_db=top_db) | python | {
"resource": ""
} |
q267949 | _spectrogram | test | def _spectrogram(y=None, S=None, n_fft=2048, hop_length=512, power=1,
win_length=None, window='hann', center=True, pad_mode='reflect'):
'''Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, ...)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
'''
if S is not None:
# Infer n_fft from spectrogram shape
n_fft = 2 * (S.shape[0] - 1)
else:
# Otherwise, compute a magnitude spectrogram from input
S = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, center=center,
window=window, pad_mode=pad_mode))**power
return S, n_fft | python | {
"resource": ""
} |
q267950 | hpss_beats | test | def hpss_beats(input_file, output_csv):
'''HPSS beat tracking
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save beat event timestamps as a CSV file
'''
# Load the file
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Do HPSS
print('Harmonic-percussive separation ... ')
y = librosa.effects.percussive(y)
# Construct onset envelope from percussive component
print('Tracking beats on percussive component')
onset_env = librosa.onset.onset_strength(y=y,
sr=sr,
hop_length=HOP_LENGTH,
n_fft=N_FFT,
aggregate=np.median)
# Track the beats
tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
sr=sr,
hop_length=HOP_LENGTH)
beat_times = librosa.frames_to_time(beats,
sr=sr,
hop_length=HOP_LENGTH)
# Save the output
print('Saving beats to ', output_csv)
librosa.output.times_csv(output_csv, beat_times) | python | {
"resource": ""
} |
q267951 | decompose | test | def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"""Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
"""
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations | python | {
"resource": ""
} |
q267952 | nn_filter | test | def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis) | python | {
"resource": ""
} |
q267953 | __nn_filter_helper | test | def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out | python | {
"resource": ""
} |
q267954 | mel | test | def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False,
norm=1, dtype=np.float32):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
if norm is not None and norm != 1 and norm != np.inf:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
if norm == 1:
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights | python | {
"resource": ""
} |
q267955 | chroma | test | def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True, dtype=np.float32):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)], dtype=dtype) | python | {
"resource": ""
} |
q267956 | __float_window | test | def __float_window(window_spec):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = get_window(window_spec, n_min)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap | python | {
"resource": ""
} |
q267957 | constant_q | test | def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window='hann', filter_scale=1, pad_fft=True, norm=1,
dtype=np.complex64, **kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : string, tuple, number, or function
Windowing function to apply to filters.
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
dtype : np.dtype
The data type of the output basis.
By default, uses 64-bit (single precision) complex floating point.
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
Notes
-----
This function caches at level 10.
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(np.arange(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(np.arange(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(-ilen//2, ilen//2, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(len(sig))
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = int(np.ceil(max_len))
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters], dtype=dtype)
return filters, np.asarray(lengths) | python | {
"resource": ""
} |
q267958 | constant_q_lengths | test | def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', filter_scale=1):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
Notes
-----
This function caches at level 10.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if filter_scale <= 0:
raise ParameterError('filter_scale must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if freq[-1] * (1 + 0.5 * window_bandwidth(window) / Q) > sr / 2.0:
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths | python | {
"resource": ""
} |
q267959 | cq_to_chroma | test | def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True, dtype=np.float32):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Notes
-----
This function caches at level 10.
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = np.abs(librosa.cqt(y, sr=sr))
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... y_axis='cqt_note')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma_stft')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(dtype)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch | python | {
"resource": ""
} |
q267960 | window_bandwidth | test | def window_bandwidth(window, n=1000):
'''Get the equivalent noise bandwidth of a window function.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
n : int > 0
The number of coefficients to use in estimating the
window bandwidth
Returns
-------
bandwidth : float
The equivalent noise bandwidth (in FFT bins) of the
given window function
Notes
-----
This function caches at level 10.
See Also
--------
get_window
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
win = get_window(window, n)
WINDOW_BANDWIDTHS[key] = n * np.sum(win**2) / np.sum(np.abs(win))**2
return WINDOW_BANDWIDTHS[key] | python | {
"resource": ""
} |
q267961 | get_window | test | def get_window(window, Nx, fftbins=True):
'''Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
'''
if six.callable(window):
return window(Nx)
elif (isinstance(window, (six.string_types, tuple)) or
np.isscalar(window)):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise ParameterError('Window size mismatch: '
'{:d} != {:d}'.format(len(window), Nx))
else:
raise ParameterError('Invalid window specification: {}'.format(window)) | python | {
"resource": ""
} |
q267962 | _multirate_fb | test | def _multirate_fb(center_freqs=None, sample_rates=None, Q=25.0,
passband_ripple=1, stopband_attenuation=50, ftype='ellip', flayout='ba'):
r'''Helper function to construct a multirate filterbank.
A filter bank consists of multiple band-pass filters which divide the input signal
into subbands. In the case of a multirate filter bank, the band-pass filters
operate with resampled versions of the input signal, e.g. to keep the length
of a filter constant while shifting its center frequency.
This implementation uses `scipy.signal.iirdesign` to design the filters.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter (used for multirate filterbank).
Q : float
Q factor (influences the filter bandwith).
passband_ripple : float
The maximum loss in the passband (dB)
See `scipy.signal.iirdesign` for details.
stopband_attenuation : float
The minimum attenuation in the stopband (dB)
See `scipy.signal.iirdesign` for details.
ftype : str
The type of IIR filter to design
See `scipy.signal.iirdesign` for details.
flayout : string
Valid `output` argument for `scipy.signal.iirdesign`.
- If `ba`, returns numerators/denominators of the transfer functions,
used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, returns a series of second-order filters,
used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
- If `zpk`, returns zeros, poles, and system gains of the transfer functions.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry comprises the filter coefficients for a single filter.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter.
Notes
-----
This function caches at level 10.
See Also
--------
scipy.signal.iirdesign
Raises
------
ParameterError
If `center_freqs` is `None`.
If `sample_rates` is `None`.
If `center_freqs.shape` does not match `sample_rates.shape`.
'''
if center_freqs is None:
raise ParameterError('center_freqs must be provided.')
if sample_rates is None:
raise ParameterError('sample_rates must be provided.')
if center_freqs.shape != sample_rates.shape:
raise ParameterError('Number of provided center_freqs and sample_rates must be equal.')
nyquist = 0.5 * sample_rates
filter_bandwidths = center_freqs / float(Q)
filterbank = []
for cur_center_freq, cur_nyquist, cur_bw in zip(center_freqs, nyquist, filter_bandwidths):
passband_freqs = [cur_center_freq - 0.5 * cur_bw, cur_center_freq + 0.5 * cur_bw] / cur_nyquist
stopband_freqs = [cur_center_freq - cur_bw, cur_center_freq + cur_bw] / cur_nyquist
cur_filter = scipy.signal.iirdesign(passband_freqs, stopband_freqs,
passband_ripple, stopband_attenuation,
analog=False, ftype=ftype, output=flayout)
filterbank.append(cur_filter)
return filterbank, sample_rates | python | {
"resource": ""
} |
q267963 | mr_frequencies | test | def mr_frequencies(tuning):
r'''Helper function for generating center frequency and sample rate pairs.
This function will return center frequency and corresponding sample rates
to obtain similar pitch filterbank settings as described in [1]_.
Instead of starting with MIDI pitch `A0`, we start with `C0`.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440, measure as a fraction of the equally
tempered semitone (1/12 of an octave).
Returns
-------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter, used for multirate filterbank.
Notes
-----
This function caches at level 10.
See Also
--------
librosa.filters.semitone_filterbank
librosa.filters._multirate_fb
'''
center_freqs = midi_to_hz(np.arange(24 + tuning, 109 + tuning))
sample_rates = np.asarray(len(np.arange(0, 36)) * [882, ] +
len(np.arange(36, 70)) * [4410, ] +
len(np.arange(70, 85)) * [22050, ])
return center_freqs, sample_rates | python | {
"resource": ""
} |
q267964 | __window_ss_fill | test | def __window_ss_fill(x, win_sq, n_frames, hop_length): # pragma: no cover
'''Helper function for window sum-square calculation.'''
n = len(x)
n_fft = len(win_sq)
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))] | python | {
"resource": ""
} |
q267965 | window_sumsquare | test | def window_sumsquare(window, n_frames, hop_length=512, win_length=None, n_fft=2048,
dtype=np.float32, norm=None):
'''
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout()
'''
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm)**2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x | python | {
"resource": ""
} |
q267966 | diagonal_filter | test | def diagonal_filter(window, n, slope=1.0, angle=None, zero_mean=False):
'''Build a two-dimensional diagonal filter.
This is primarily used for smoothing recurrence or self-similarity matrices.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window function to use for the filter.
See `get_window` for details.
Note that the window used here should be non-negative.
n : int > 0
the length of the filter
slope : float
The slope of the diagonal filter to produce
angle : float or None
If given, the slope parameter is ignored,
and angle directly sets the orientation of the filter (in radians).
Otherwise, angle is inferred as `arctan(slope)`.
zero_mean : bool
If True, a zero-mean filter is used.
Otherwise, a non-negative averaging filter is used.
This should be enabled if you want to enhance paths and suppress
blocks.
Returns
-------
kernel : np.ndarray, shape=[(m, m)]
The 2-dimensional filter kernel
Notes
-----
This function caches at level 10.
'''
if angle is None:
angle = np.arctan(slope)
win = np.diag(get_window(window, n, fftbins=False))
if not np.isclose(angle, np.pi/4):
win = scipy.ndimage.rotate(win, 45 - angle * 180 / np.pi,
order=5, prefilter=False)
np.clip(win, 0, None, out=win)
win /= win.sum()
if zero_mean:
win -= win.mean()
return win | python | {
"resource": ""
} |
q267967 | spectral_centroid | test | def spectral_centroid(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
freq=None, win_length=None, window='hann', center=True,
pad_mode='reflect'):
'''Compute the spectral centroid.
Each frame of a magnitude spectrogram is normalized and treated as a
distribution over frequency bins, from which the mean (centroid) is
extracted per frame.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
centroid : np.ndarray [shape=(1, t)]
centroid frequencies
See Also
--------
librosa.core.stft
Short-time Fourier Transform
librosa.core.ifgram
Instantaneous-frequency spectrogram
Examples
--------
From time-series input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> cent = librosa.feature.spectral_centroid(y=y, sr=sr)
>>> cent
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
From spectrogram input:
>>> S, phase = librosa.magphase(librosa.stft(y=y))
>>> librosa.feature.spectral_centroid(S=S)
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
Using variable bin center frequencies:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> if_gram, D = librosa.ifgram(y)
>>> librosa.feature.spectral_centroid(S=np.abs(D), freq=if_gram)
array([[ 4420.719, 625.769, ..., 5011.86 , 5221.492]])
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(cent.T, label='Spectral centroid')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, cent.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral centroid is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral centroid is only defined '
'with non-negative energies')
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
if freq.ndim == 1:
freq = freq.reshape((-1, 1))
# Column-normalize S
return np.sum(freq * util.normalize(S, norm=1, axis=0),
axis=0, keepdims=True) | python | {
"resource": ""
} |
q267968 | spectral_rolloff | test | def spectral_rolloff(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, roll_percent=0.85):
'''Compute roll-off frequency.
The roll-off frequency is defined for each frame as the center frequency
for a spectrogram bin such that at least roll_percent (0.85 by default)
of the energy of the spectrum in this frame is contained in this bin and
the bins below. This can be used to, e.g., approximate the maximum (or
minimum) frequency by setting roll_percent to a value close to 1 (or 0).
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
.. note:: `freq` is assumed to be sorted in increasing order
roll_percent : float [0 < roll_percent < 1]
Roll-off percentage.
Returns
-------
rolloff : np.ndarray [shape=(1, t)]
roll-off frequency for each frame
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Approximate maximum frequencies with roll_percent=0.85 (default)
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
>>> rolloff
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # Approximate minimum frequencies with roll_percent=0.1
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.1)
>>> rolloff
array([[ 75.36621094, 64.59960938, 64.59960938, ..., 75.36621094,
75.36621094, 64.59960938]])
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_rolloff(S=S, sr=sr)
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # With a higher roll percentage:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.95)
array([[ 10012.939, 3003.882, ..., 10034.473, 10077.539]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(rolloff.T, label='Roll-off frequency')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, rolloff.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
if not 0.0 < roll_percent < 1.0:
raise ParameterError('roll_percent must lie in the range (0, 1)')
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral rolloff is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral rolloff is only defined '
'with non-negative energies')
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
# Make sure that frequency can be broadcast
if freq.ndim == 1:
freq = freq.reshape((-1, 1))
total_energy = np.cumsum(S, axis=0)
threshold = roll_percent * total_energy[-1]
ind = np.where(total_energy < threshold, np.nan, 1)
return np.nanmin(ind * freq, axis=0, keepdims=True) | python | {
"resource": ""
} |
q267969 | spectral_flatness | test | def spectral_flatness(y=None, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
amin=1e-10, power=2.0):
'''Compute spectral flatness
Spectral flatness (or tonality coefficient) is a measure to
quantify how much noise-like a sound is, as opposed to being
tone-like [1]_. A high spectral flatness (closer to 1.0)
indicates the spectrum is similar to white noise.
It is often converted to decibel.
.. [1] Dubnov, Shlomo "Generalization of spectral flatness
measure for non-gaussian linear processes"
IEEE Signal Processing Letters, 2004, Vol. 11.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
S : np.ndarray [shape=(d, t)] or None
(optional) pre-computed spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
amin : float > 0 [scalar]
minimum threshold for `S` (=added noise floor for numerical stability)
power : float > 0 [scalar]
Exponent for the magnitude spectrogram.
e.g., 1 for energy, 2 for power, etc.
Power spectrogram is usually used for computing spectral flatness.
Returns
-------
flatness : np.ndarray [shape=(1, t)]
spectral flatness for each frame.
The returned value is in [0, 1] and often converted to dB scale.
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> flatness = librosa.feature.spectral_flatness(y=y)
>>> flatness
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_flatness(S=S)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From power spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> S_power = S ** 2
>>> librosa.feature.spectral_flatness(S=S_power, power=1.0)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
'''
if amin <= 0:
raise ParameterError('amin must be strictly positive')
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
power=1., win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral flatness is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral flatness is only defined '
'with non-negative energies')
S_thresh = np.maximum(amin, S ** power)
gmean = np.exp(np.mean(np.log(S_thresh), axis=0, keepdims=True))
amean = np.mean(S_thresh, axis=0, keepdims=True)
return gmean / amean | python | {
"resource": ""
} |
q267970 | poly_features | test | def poly_features(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
order=1, freq=None):
'''Get coefficients of fitting an nth-order polynomial to the columns
of a spectrogram.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
order : int > 0
order of the polynomial to fit
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
Returns
-------
coefficients : np.ndarray [shape=(order+1, t)]
polynomial coefficients for each frame.
`coeffecients[0]` corresponds to the highest degree (`order`),
`coefficients[1]` corresponds to the next highest degree (`order-1`),
down to the constant term `coefficients[order]`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
Fit a degree-0 polynomial (constant) to each frame
>>> p0 = librosa.feature.poly_features(S=S, order=0)
Fit a linear polynomial to each frame
>>> p1 = librosa.feature.poly_features(S=S, order=1)
Fit a quadratic to each frame
>>> p2 = librosa.feature.poly_features(S=S, order=2)
Plot the results for comparison
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> ax = plt.subplot(4,1,1)
>>> plt.plot(p2[2], label='order=2', alpha=0.8)
>>> plt.plot(p1[1], label='order=1', alpha=0.8)
>>> plt.plot(p0[0], label='order=0', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Constant')
>>> plt.legend()
>>> plt.subplot(4,1,2, sharex=ax)
>>> plt.plot(p2[1], label='order=2', alpha=0.8)
>>> plt.plot(p1[0], label='order=1', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Linear')
>>> plt.subplot(4,1,3, sharex=ax)
>>> plt.plot(p2[0], label='order=2', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Quadratic')
>>> plt.subplot(4,1,4, sharex=ax)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
# If frequencies are constant over frames, then we only need to fit once
if freq.ndim == 1:
coefficients = np.polyfit(freq, S, order)
else:
# Else, fit each frame independently and stack the results
coefficients = np.concatenate([[np.polyfit(freq[:, i], S[:, i], order)]
for i in range(S.shape[1])], axis=0).T
return coefficients | python | {
"resource": ""
} |
q267971 | zero_crossing_rate | test | def zero_crossing_rate(y, frame_length=2048, hop_length=512, center=True,
**kwargs):
'''Compute the zero-crossing rate of an audio time series.
Parameters
----------
y : np.ndarray [shape=(n,)]
Audio time series
frame_length : int > 0
Length of the frame over which to compute zero crossing rates
hop_length : int > 0
Number of samples to advance for each frame
center : bool
If `True`, frames are centered by padding the edges of `y`.
This is similar to the padding in `librosa.core.stft`,
but uses edge-value copies instead of reflection.
kwargs : additional keyword arguments
See `librosa.core.zero_crossings`
.. note:: By default, the `pad` parameter is set to `False`, which
differs from the default specified by
`librosa.core.zero_crossings`.
Returns
-------
zcr : np.ndarray [shape=(1, t)]
`zcr[0, i]` is the fraction of zero crossings in the
`i` th frame
See Also
--------
librosa.core.zero_crossings
Compute zero-crossings in a time-series
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.zero_crossing_rate(y)
array([[ 0.134, 0.139, ..., 0.387, 0.322]])
'''
util.valid_audio(y)
if center:
y = np.pad(y, int(frame_length // 2), mode='edge')
y_framed = util.frame(y, frame_length, hop_length)
kwargs['axis'] = 0
kwargs.setdefault('pad', False)
crossings = zero_crossings(y_framed, **kwargs)
return np.mean(crossings, axis=0, keepdims=True) | python | {
"resource": ""
} |
q267972 | chroma_stft | test | def chroma_stft(y=None, sr=22050, S=None, norm=np.inf, n_fft=2048,
hop_length=512, win_length=None, window='hann', center=True,
pad_mode='reflect', tuning=None, **kwargs):
"""Compute a chromagram from a waveform or power spectrogram.
This implementation is derived from `chromagram_E` [1]_
.. [1] Ellis, Daniel P.W. "Chroma feature analysis and synthesis"
2007/04/21
http://labrosa.ee.columbia.edu/matlab/chroma-ansyn/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
power spectrogram
norm : float or None
Column-wise normalization.
See `librosa.util.normalize` for details.
If `None`, no normalization is performed.
n_fft : int > 0 [scalar]
FFT window size if provided `y, sr` instead of `S`
hop_length : int > 0 [scalar]
hop length if provided `y, sr` instead of `S`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
tuning : float in `[-0.5, 0.5)` [scalar] or None.
Deviation from A440 tuning in fractional bins (cents).
If `None`, it is automatically estimated.
kwargs : additional keyword arguments
Arguments to parameterize chroma filters.
See `librosa.filters.chroma` for details.
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
Normalized energy for each chroma bin at each frame.
See Also
--------
librosa.filters.chroma
Chroma filter bank construction
librosa.util.normalize
Vector normalization
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.chroma_stft(y=y, sr=sr)
array([[ 0.974, 0.881, ..., 0.925, 1. ],
[ 1. , 0.841, ..., 0.882, 0.878],
...,
[ 0.658, 0.985, ..., 0.878, 0.764],
[ 0.969, 0.92 , ..., 0.974, 0.915]])
Use an energy (magnitude) spectrum instead of power spectrogram
>>> S = np.abs(librosa.stft(y))
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.884, 0.91 , ..., 0.861, 0.858],
[ 0.963, 0.785, ..., 0.968, 0.896],
...,
[ 0.871, 1. , ..., 0.928, 0.829],
[ 1. , 0.982, ..., 0.93 , 0.878]])
Use a pre-computed power spectrogram with a larger frame
>>> S = np.abs(librosa.stft(y, n_fft=4096))**2
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.685, 0.477, ..., 0.961, 0.986],
[ 0.674, 0.452, ..., 0.952, 0.926],
...,
[ 0.844, 0.575, ..., 0.934, 0.869],
[ 0.793, 0.663, ..., 0.964, 0.972]])
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Chromagram')
>>> plt.tight_layout()
"""
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=2,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
n_chroma = kwargs.get('n_chroma', 12)
if tuning is None:
tuning = estimate_tuning(S=S, sr=sr, bins_per_octave=n_chroma)
# Get the filter bank
if 'A440' not in kwargs:
kwargs['A440'] = 440.0 * 2.0**(float(tuning) / n_chroma)
chromafb = filters.chroma(sr, n_fft, **kwargs)
# Compute raw chroma
raw_chroma = np.dot(chromafb, S)
# Compute normalization factor for each frame
return util.normalize(raw_chroma, norm=norm, axis=0) | python | {
"resource": ""
} |
q267973 | chroma_cqt | test | def chroma_cqt(y=None, sr=22050, C=None, hop_length=512, fmin=None,
norm=np.inf, threshold=0.0, tuning=None, n_chroma=12,
n_octaves=7, window=None, bins_per_octave=None, cqt_mode='full'):
r'''Constant-Q chromagram
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0
sampling rate of `y`
C : np.ndarray [shape=(d, t)] [Optional]
a pre-computed constant-Q spectrogram
hop_length : int > 0
number of samples between successive chroma frames
fmin : float > 0
minimum frequency to analyze in the CQT.
Default: 'C1' ~= 32.7 Hz
norm : int > 0, +-np.inf, or None
Column-wise normalization of the chromagram.
threshold : float
Pre-normalization energy threshold. Values below the
threshold are discarded, resulting in a sparse chromagram.
tuning : float
Deviation (in cents) from A440 tuning
n_chroma : int > 0
Number of chroma bins to produce
n_octaves : int > 0
Number of octaves to analyze above `fmin`
window : None or np.ndarray
Optional window parameter to `filters.cq_to_chroma`
bins_per_octave : int > 0
Number of bins per octave in the CQT.
Default: matches `n_chroma`
cqt_mode : ['full', 'hybrid']
Constant-Q transform mode
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
The output chromagram
See Also
--------
librosa.util.normalize
librosa.core.cqt
librosa.core.hybrid_cqt
chroma_stft
Examples
--------
Compare a long-window STFT chromagram to the CQT chromagram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=15)
>>> chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr,
... n_chroma=12, n_fft=4096)
>>> chroma_cq = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma_stft, y_axis='chroma')
>>> plt.title('chroma_stft')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(chroma_cq, y_axis='chroma', x_axis='time')
>>> plt.title('chroma_cqt')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
cqt_func = {'full': cqt, 'hybrid': hybrid_cqt}
if bins_per_octave is None:
bins_per_octave = n_chroma
# Build the CQT if we don't have one already
if C is None:
C = np.abs(cqt_func[cqt_mode](y, sr=sr,
hop_length=hop_length,
fmin=fmin,
n_bins=n_octaves * bins_per_octave,
bins_per_octave=bins_per_octave,
tuning=tuning))
# Map to chroma
cq_to_chr = filters.cq_to_chroma(C.shape[0],
bins_per_octave=bins_per_octave,
n_chroma=n_chroma,
fmin=fmin,
window=window)
chroma = cq_to_chr.dot(C)
if threshold is not None:
chroma[chroma < threshold] = 0.0
# Normalize
if norm is not None:
chroma = util.normalize(chroma, norm=norm, axis=0)
return chroma | python | {
"resource": ""
} |
q267974 | melspectrogram | test | def melspectrogram(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
power=2.0, **kwargs):
"""Compute a mel-scaled spectrogram.
If a spectrogram input `S` is provided, then it is mapped directly onto
the mel basis `mel_f` by `mel_f.dot(S)`.
If a time-series input `y, sr` is provided, then its magnitude spectrogram
`S` is first computed, and then mapped onto the mel scale by
`mel_f.dot(S**power)`. By default, `power=2` operates on a power spectrum.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)]
spectrogram
n_fft : int > 0 [scalar]
length of the FFT window
hop_length : int > 0 [scalar]
number of samples between successive frames.
See `librosa.core.stft`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
power : float > 0 [scalar]
Exponent for the magnitude melspectrogram.
e.g., 1 for energy, 2 for power, etc.
kwargs : additional keyword arguments
Mel filter bank parameters.
See `librosa.filters.mel` for details.
Returns
-------
S : np.ndarray [shape=(n_mels, t)]
Mel spectrogram
See Also
--------
librosa.filters.mel
Mel filter bank construction
librosa.core.stft
Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.melspectrogram(y=y, sr=sr)
array([[ 2.891e-07, 2.548e-03, ..., 8.116e-09, 5.633e-09],
[ 1.986e-07, 1.162e-02, ..., 9.332e-08, 6.716e-09],
...,
[ 3.668e-09, 2.029e-08, ..., 3.208e-09, 2.864e-09],
[ 2.561e-10, 2.096e-09, ..., 7.543e-10, 6.101e-10]])
Using a pre-computed power spectrogram
>>> D = np.abs(librosa.stft(y))**2
>>> S = librosa.feature.melspectrogram(S=D)
>>> # Passing through arguments to the Mel filters
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
... fmax=8000)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(librosa.power_to_db(S,
... ref=np.max),
... y_axis='mel', fmax=8000,
... x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Mel spectrogram')
>>> plt.tight_layout()
"""
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=power,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Build a Mel filter
mel_basis = filters.mel(sr, n_fft, **kwargs)
return np.dot(mel_basis, S) | python | {
"resource": ""
} |
q267975 | __jaccard | test | def __jaccard(int_a, int_b): # pragma: no cover
'''Jaccard similarity between two intervals
Parameters
----------
int_a, int_b : np.ndarrays, shape=(2,)
Returns
-------
Jaccard similarity between intervals
'''
ends = [int_a[1], int_b[1]]
if ends[1] < ends[0]:
ends.reverse()
starts = [int_a[0], int_b[0]]
if starts[1] < starts[0]:
starts.reverse()
intersection = ends[0] - starts[1]
if intersection < 0:
intersection = 0.
union = ends[1] - starts[0]
if union > 0:
return intersection / union
return 0.0 | python | {
"resource": ""
} |
q267976 | __match_interval_overlaps | test | def __match_interval_overlaps(query, intervals_to, candidates): # pragma: no cover
'''Find the best Jaccard match from query to candidates'''
best_score = -1
best_idx = -1
for idx in candidates:
score = __jaccard(query, intervals_to[idx])
if score > best_score:
best_score, best_idx = score, idx
return best_idx | python | {
"resource": ""
} |
q267977 | __match_intervals | test | def __match_intervals(intervals_from, intervals_to, strict=True): # pragma: no cover
'''Numba-accelerated interval matching algorithm.
'''
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side='right')
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side='left')
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i]-1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i]+1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i]-1]
else:
output[i] = start_index[search_ends[i]+1]
return output | python | {
"resource": ""
} |
q267978 | match_intervals | test | def match_intervals(intervals_from, intervals_to, strict=True):
'''Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element `[a, b]` of `intervals_from` is matched to the
element `[c, d]` of `intervals_to` which maximizes the
Jaccard similarity between the intervals:
`max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|`
In `strict=True` mode, if there is no interval with positive
intersection with `[a,b]`, an exception is thrown.
In `strict=False` mode, any interval `[a, b]` that has no
intersection with any element of `intervals_to` is instead
matched to the interval `[c, d]` which minimizes
`min(|b - c|, |a - d|)`
that is, the disjoint interval `[c, d]` with a boundary closest
to `[a, b]`.
.. note:: An element of `intervals_to` may be matched to multiple
entries of `intervals_from`.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The `i` th interval spans time `intervals_from[i, 0]`
to `intervals_from[i, 1]`.
`intervals_from[0, 0]` should be 0, `intervals_from[-1, 1]`
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to `intervals_from`.
strict : bool
If `True`, intervals can only match if they intersect.
If `False`, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in `intervals_from`, the
corresponding interval in `intervals_to`.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If `strict=True` and some element of `intervals_from` is disjoint from
every element of `intervals_to`.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in `strict` mode
because `[6, 7]` is disjoint from all intervals in `ints_from`.
With `strict=False`, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
'''
if len(intervals_from) == 0 or len(intervals_to) == 0:
raise ParameterError('Attempting to match empty interval list')
# Verify that the input intervals has correct shape and size
valid_intervals(intervals_from)
valid_intervals(intervals_to)
try:
return __match_intervals(intervals_from, intervals_to, strict=strict)
except ParameterError:
six.reraise(ParameterError,
ParameterError('Unable to match intervals with strict={}'.format(strict)),
sys.exc_info()[2]) | python | {
"resource": ""
} |
q267979 | match_events | test | def match_events(events_from, events_to, left=True, right=True):
'''Match one set of events to another.
This is useful for tasks such as matching beats to the nearest
detected onsets, or frame-aligned events to the nearest zero-crossing.
.. note:: A target event may be matched to multiple source events.
Examples
--------
>>> # Sources are multiples of 7
>>> s_from = np.arange(0, 100, 7)
>>> s_from
array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,
98])
>>> # Targets are multiples of 10
>>> s_to = np.arange(0, 100, 10)
>>> s_to
array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
>>> # Find the matching
>>> idx = librosa.util.match_events(s_from, s_to)
>>> idx
array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])
>>> # Print each source value to its matching target
>>> zip(s_from, s_to[idx])
[(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),
(42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),
(84, 80), (91, 90), (98, 90)]
Parameters
----------
events_from : ndarray [shape=(n,)]
Array of events (eg, times, sample or frame indices) to match from.
events_to : ndarray [shape=(m,)]
Array of events (eg, times, sample or frame indices) to
match against.
left : bool
right : bool
If `False`, then matched events cannot be to the left (or right)
of source events.
Returns
-------
event_mapping : np.ndarray [shape=(n,)]
For each event in `events_from`, the corresponding event
index in `events_to`.
`event_mapping[i] == arg min |events_from[i] - events_to[:]|`
See Also
--------
match_intervals
Raises
------
ParameterError
If either array of input events is not the correct shape
'''
if len(events_from) == 0 or len(events_to) == 0:
raise ParameterError('Attempting to match empty event list')
# If we can't match left or right, then only strict equivalence
# counts as a match.
if not (left or right) and not np.all(np.in1d(events_from, events_to)):
raise ParameterError('Cannot match events with left=right=False '
'and events_from is not contained '
'in events_to')
# If we can't match to the left, then there should be at least one
# target event greater-equal to every source event
if (not left) and max(events_to) < max(events_from):
raise ParameterError('Cannot match events with left=False '
'and max(events_to) < max(events_from)')
# If we can't match to the right, then there should be at least one
# target event less-equal to every source event
if (not right) and min(events_to) > min(events_from):
raise ParameterError('Cannot match events with right=False '
'and min(events_to) > min(events_from)')
# array of matched items
output = np.empty_like(events_from, dtype=np.int)
return __match_events_helper(output, events_from, events_to, left, right) | python | {
"resource": ""
} |
q267980 | salience | test | def salience(S, freqs, h_range, weights=None, aggregate=None,
filter_peaks=True, fill_value=np.nan, kind='linear', axis=0):
"""Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.average
if weights is None:
weights = np.ones((len(h_range), ))
else:
weights = np.array(weights, dtype=float)
S_harm = interp_harmonics(S, freqs, h_range, kind=kind, axis=axis)
if aggregate is np.average:
S_sal = aggregate(S_harm, axis=0, weights=weights)
else:
S_sal = aggregate(S_harm, axis=0)
if filter_peaks:
S_peaks = scipy.signal.argrelmax(S, axis=0)
S_out = np.empty(S.shape)
S_out.fill(fill_value)
S_out[S_peaks[0], S_peaks[1]] = S_sal[S_peaks[0], S_peaks[1]]
S_sal = S_out
return S_sal | python | {
"resource": ""
} |
q267981 | interp_harmonics | test | def interp_harmonics(x, freqs, h_range, kind='linear', fill_value=0, axis=0):
'''Compute the energy at harmonics of time-frequency representation.
Given a frequency-based energy representation such as a spectrogram
or tempogram, this function computes the energy at the chosen harmonics
of the frequency axis. (See examples below.)
The resulting harmonic array can then be used as input to a salience
computation.
Parameters
----------
x : np.ndarray
The input energy
freqs : np.ndarray, shape=(X.shape[axis])
The frequency values corresponding to X's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
Returns
-------
x_harm : np.ndarray, shape=(len(h_range), [x.shape])
`x_harm[i]` will have the same shape as `x`, and measure
the energy at the `h_range[i]` harmonic of each frequency.
See Also
--------
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate sub-harmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3, 2, i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# X_out will be the same shape as X, plus a leading
# axis that has length = len(h_range)
out_shape = [len(h_range)]
out_shape.extend(x.shape)
x_out = np.zeros(out_shape, dtype=x.dtype)
if freqs.ndim == 1 and len(freqs) == x.shape[axis]:
harmonics_1d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
elif freqs.ndim == 2 and freqs.shape == x.shape:
harmonics_2d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
else:
raise ParameterError('freqs.shape={} does not match '
'input shape={}'.format(freqs.shape, x.shape))
return x_out | python | {
"resource": ""
} |
q267982 | harmonics_1d | test | def harmonics_1d(harmonic_out, x, freqs, h_range, kind='linear',
fill_value=0, axis=0):
'''Populate a harmonic tensor from a time-frequency representation.
Parameters
----------
harmonic_out : np.ndarray, shape=(len(h_range), X.shape)
The output array to store harmonics
X : np.ndarray
The input energy
freqs : np.ndarray, shape=(x.shape[axis])
The frequency values corresponding to x's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate subharmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3,2,i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# Note: this only works for fixed-grid, 1d interpolation
f_interp = scipy.interpolate.interp1d(freqs, x,
kind=kind,
axis=axis,
copy=False,
bounds_error=False,
fill_value=fill_value)
idx_out = [slice(None)] * harmonic_out.ndim
# Compute the output index of the interpolated values
interp_axis = 1 + (axis % x.ndim)
# Iterate over the harmonics range
for h_index, harmonic in enumerate(h_range):
idx_out[0] = h_index
# Iterate over frequencies
for f_index, frequency in enumerate(freqs):
# Offset the output axis by 1 to account for the harmonic index
idx_out[interp_axis] = f_index
# Estimate the harmonic energy at this frequency across time
harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency) | python | {
"resource": ""
} |
q267983 | harmonics_2d | test | def harmonics_2d(harmonic_out, x, freqs, h_range, kind='linear', fill_value=0,
axis=0):
'''Populate a harmonic tensor from a time-frequency representation with
time-varying frequencies.
Parameters
----------
harmonic_out : np.ndarray
The output array to store harmonics
x : np.ndarray
The input energy
freqs : np.ndarray, shape=x.shape
The frequency values corresponding to each element of `x`
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself. Values less than one (e.g., 1/2) correspond to
sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
harmonics_1d
'''
idx_in = [slice(None)] * x.ndim
idx_freq = [slice(None)] * x.ndim
idx_out = [slice(None)] * harmonic_out.ndim
# This is the non-interpolation axis
ni_axis = (1 + axis) % x.ndim
# For each value in the non-interpolated axis, compute its harmonics
for i in range(x.shape[ni_axis]):
idx_in[ni_axis] = slice(i, i + 1)
idx_freq[ni_axis] = i
idx_out[1 + ni_axis] = idx_in[ni_axis]
harmonics_1d(harmonic_out[tuple(idx_out)], x[tuple(idx_in)], freqs[tuple(idx_freq)],
h_range, kind=kind, fill_value=fill_value,
axis=axis) | python | {
"resource": ""
} |
q267984 | load | test | def load(path, sr=22050, mono=True, offset=0.0, duration=None,
dtype=np.float32, res_type='kaiser_best'):
"""Load an audio file as a floating point time series.
Audio will be automatically resampled to the given rate
(default `sr=22050`).
To preserve the native sampling rate of the file, use `sr=None`.
Parameters
----------
path : string, int, or file-like object
path to the input file.
Any codec supported by `soundfile` or `audioread` will work.
If the codec is supported by `soundfile`, then `path` can also be
an open file descriptor (int), or any object implementing Python's
file interface.
If the codec is not supported by `soundfile` (e.g., MP3), then only
string file paths are supported.
sr : number > 0 [scalar]
target sampling rate
'None' uses the native sampling rate
mono : bool
convert signal to mono
offset : float
start reading after this time (in seconds)
duration : float
only load up to this much audio (in seconds)
dtype : numeric type
data type of `y`
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
For alternative resampling modes, see `resample`
.. note::
`audioread` may truncate the precision of the audio data to 16 bits.
See https://librosa.github.io/librosa/ioformats.html for alternate
loading methods.
Returns
-------
y : np.ndarray [shape=(n,) or (2, n)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
Examples
--------
>>> # Load an ogg vorbis file
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename)
>>> y
array([ -4.756e-06, -6.020e-06, ..., -1.040e-06, 0.000e+00], dtype=float32)
>>> sr
22050
>>> # Load a file and resample to 11 KHz
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, sr=11025)
>>> y
array([ -2.077e-06, -2.928e-06, ..., -4.395e-06, 0.000e+00], dtype=float32)
>>> sr
11025
>>> # Load 5 seconds of a file, starting 15 seconds in
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, offset=15.0, duration=5.0)
>>> y
array([ 0.069, 0.1 , ..., -0.101, 0. ], dtype=float32)
>>> sr
22050
"""
try:
with sf.SoundFile(path) as sf_desc:
sr_native = sf_desc.samplerate
if offset:
# Seek to the start of the target read
sf_desc.seek(int(offset * sr_native))
if duration is not None:
frame_duration = int(duration * sr_native)
else:
frame_duration = -1
# Load the target number of frames, and transpose to match librosa form
y = sf_desc.read(frames=frame_duration, dtype=dtype, always_2d=False).T
except RuntimeError as exc:
# If soundfile failed, fall back to the audioread loader
y, sr_native = __audioread_load(path, offset, duration, dtype)
# Final cleanup for dtype and contiguity
if mono:
y = to_mono(y)
if sr is not None:
y = resample(y, sr_native, sr, res_type=res_type)
else:
sr = sr_native
return y, sr | python | {
"resource": ""
} |
q267985 | __audioread_load | test | def __audioread_load(path, offset, duration, dtype):
'''Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
'''
y = []
with audioread.audio_open(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + (int(np.round(sr_native * duration))
* n_channels)
n = 0
for frame in input_file:
frame = util.buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
# offset is after the current frame
# keep reading
continue
if s_end < n_prev:
# we're off the end. stop reading
break
if s_end < n:
# the end is in this frame. crop.
frame = frame[:s_end - n_prev]
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev):]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native | python | {
"resource": ""
} |
q267986 | to_mono | test | def to_mono(y):
'''Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,)
'''
# Validate the buffer. Stereo is ok here.
util.valid_audio(y, mono=False)
if y.ndim > 1:
y = np.mean(y, axis=0)
return y | python | {
"resource": ""
} |
q267987 | resample | test | def resample(y, orig_sr, target_sr, res_type='kaiser_best', fix=True, scale=False, **kwargs):
"""Resample a time series from orig_sr to target_sr
Parameters
----------
y : np.ndarray [shape=(n,) or shape=(2, n)]
audio time series. Can be mono or stereo.
orig_sr : number > 0 [scalar]
original sampling rate of `y`
target_sr : number > 0 [scalar]
target sampling rate
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
To use a faster method, set `res_type='kaiser_fast'`.
To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`.
To use `scipy.signal.resample_poly`, set `res_type='polyphase'`.
.. note::
When using `res_type='polyphase'`, only integer sampling rates are
supported.
fix : bool
adjust the length of the resampled signal to be of size exactly
`ceil(target_sr * len(y) / orig_sr)`
scale : bool
Scale the resampled signal so that `y` and `y_hat` have approximately
equal total energy.
kwargs : additional keyword arguments
If `fix==True`, additional keyword arguments to pass to
`librosa.util.fix_length`.
Returns
-------
y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
`y` resampled from `orig_sr` to `target_sr`
Raises
------
ParameterError
If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both
integer-valued.
See Also
--------
librosa.util.fix_length
scipy.signal.resample
resampy.resample
Notes
-----
This function caches at level 20.
Examples
--------
Downsample from 22 KHz to 8 KHz
>>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050)
>>> y_8k = librosa.resample(y, sr, 8000)
>>> y.shape, y_8k.shape
((1355168,), (491671,))
"""
# First, validate the audio buffer
util.valid_audio(y, mono=False)
if orig_sr == target_sr:
return y
ratio = float(target_sr) / orig_sr
n_samples = int(np.ceil(y.shape[-1] * ratio))
if res_type in ('scipy', 'fft'):
y_hat = scipy.signal.resample(y, n_samples, axis=-1)
elif res_type == 'polyphase':
if int(orig_sr) != orig_sr or int(target_sr) != target_sr:
raise ParameterError('polyphase resampling is only supported for integer-valued sampling rates.')
# For polyphase resampling, we need up- and down-sampling ratios
# We can get those from the greatest common divisor of the rates
# as long as the rates are integrable
orig_sr = int(orig_sr)
target_sr = int(target_sr)
gcd = np.gcd(orig_sr, target_sr)
y_hat = scipy.signal.resample_poly(y, target_sr // gcd, orig_sr // gcd, axis=-1)
else:
y_hat = resampy.resample(y, orig_sr, target_sr, filter=res_type, axis=-1)
if fix:
y_hat = util.fix_length(y_hat, n_samples, **kwargs)
if scale:
y_hat /= np.sqrt(ratio)
return np.ascontiguousarray(y_hat, dtype=y.dtype) | python | {
"resource": ""
} |
q267988 | autocorrelate | test | def autocorrelate(y, max_size=None, axis=-1):
"""Bounded auto-correlation
Parameters
----------
y : np.ndarray
array to autocorrelate
max_size : int > 0 or None
maximum correlation lag.
If unspecified, defaults to `y.shape[axis]` (unbounded)
axis : int
The axis along which to autocorrelate.
By default, the last axis (-1) is taken.
Returns
-------
z : np.ndarray
truncated autocorrelation `y*y` along the specified axis.
If `max_size` is specified, then `z.shape[axis]` is bounded
to `max_size`.
Notes
-----
This function caches at level 20.
Examples
--------
Compute full autocorrelation of y
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=20, duration=10)
>>> librosa.autocorrelate(y)
array([ 3.226e+03, 3.217e+03, ..., 8.277e-04, 3.575e-04], dtype=float32)
Compute onset strength auto-correlation up to 4 seconds
>>> import matplotlib.pyplot as plt
>>> odf = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512)
>>> ac = librosa.autocorrelate(odf, max_size=4* sr / 512)
>>> plt.plot(ac)
>>> plt.title('Auto-correlation')
>>> plt.xlabel('Lag (frames)')
"""
if max_size is None:
max_size = y.shape[axis]
max_size = int(min(max_size, y.shape[axis]))
# Compute the power spectrum along the chosen axis
# Pad out the signal to support full-length auto-correlation.
fft = get_fftlib()
powspec = np.abs(fft.fft(y, n=2 * y.shape[axis] + 1, axis=axis))**2
# Convert back to time domain
autocorr = fft.ifft(powspec, axis=axis)
# Slice down to max_size
subslice = [slice(None)] * autocorr.ndim
subslice[axis] = slice(max_size)
autocorr = autocorr[tuple(subslice)]
if not np.iscomplexobj(y):
autocorr = autocorr.real
return autocorr | python | {
"resource": ""
} |
q267989 | lpc | test | def lpc(y, order):
"""Linear Prediction Coefficients via Burg's method
This function applies Burg's method to estimate coefficients of a linear
filter on `y` of order `order`. Burg's method is an extension to the
Yule-Walker approach, which are both sometimes referred to as LPC parameter
estimation by autocorrelation.
It follows the description and implementation approach described in the
introduction in [1]_. N.B. This paper describes a different method, which
is not implemented here, but has been chosen for its clear explanation of
Burg's technique in its introduction.
.. [1] Larry Marple
A New Autoregressive Spectrum Analysis Algorithm
IEEE Transactions on Accoustics, Speech, and Signal Processing
vol 28, no. 4, 1980
Parameters
----------
y : np.ndarray
Time series to fit
order : int > 0
Order of the linear filter
Returns
-------
a : np.ndarray of length order + 1
LP prediction error coefficients, i.e. filter denominator polynomial
Raises
------
ParameterError
- If y is not valid audio as per `util.valid_audio`
- If order < 1 or not integer
FloatingPointError
- If y is ill-conditioned
See also
--------
scipy.signal.lfilter
Examples
--------
Compute LP coefficients of y at order 16 on entire series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=10)
>>> librosa.lpc(y, 16)
Compute LP coefficients, and plot LP estimate of original series
>>> import matplotlib.pyplot as plt
>>> import scipy
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=0.020)
>>> a = librosa.lpc(y, 2)
>>> y_hat = scipy.signal.lfilter([0] + -1*a[1:], [1], y)
>>> plt.figure()
>>> plt.plot(y)
>>> plt.plot(y_hat)
>>> plt.legend(['y', 'y_hat'])
>>> plt.title('LP Model Forward Prediction')
"""
if not isinstance(order, int) or order < 1:
raise ParameterError("order must be an integer > 0")
util.valid_audio(y, mono=True)
return __lpc(y, order) | python | {
"resource": ""
} |
q267990 | clicks | test | def clicks(times=None, frames=None, sr=22050, hop_length=512,
click_freq=1000.0, click_duration=0.1, click=None, length=None):
"""Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
"""
# Compute sample positions from time or frames
if times is None:
if frames is None:
raise ParameterError('either "times" or "frames" must be provided')
positions = frames_to_samples(frames, hop_length=hop_length)
else:
# Convert times to positions
positions = time_to_samples(times, sr=sr)
if click is not None:
# Check that we have a well-formed audio buffer
util.valid_audio(click, mono=True)
else:
# Create default click signal
if click_duration <= 0:
raise ParameterError('click_duration must be strictly positive')
if click_freq <= 0:
raise ParameterError('click_freq must be strictly positive')
angular_freq = 2 * np.pi * click_freq / float(sr)
click = np.logspace(0, -10,
num=int(np.round(sr * click_duration)),
base=2.0)
click *= np.sin(angular_freq * np.arange(len(click)))
# Set default length
if length is None:
length = positions.max() + click.shape[0]
else:
if length < 1:
raise ParameterError('length must be a positive integer')
# Filter out any positions past the length boundary
positions = positions[positions < length]
# Pre-allocate click signal
click_signal = np.zeros(length, dtype=np.float32)
# Place clicks
for start in positions:
# Compute the end-point of this click
end = start + click.shape[0]
if end >= length:
click_signal[start:] += click[:length - start]
else:
# Normally, just add a click here
click_signal[start:end] += click
return click_signal | python | {
"resource": ""
} |
q267991 | tone | test | def tone(frequency, sr=22050, length=None, duration=None, phi=None):
"""Returns a pure tone signal. The signal generated is a cosine wave.
Parameters
----------
frequency : float > 0
frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal. When both `duration` and `length` are defined,
`length` would take priority.
duration : float > 0
desired duration in seconds. When both `duration` and `length` are defined, `length` would take priority.
phi : float or None
phase offset, in radians. If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
tone_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized pure sine tone signal
Raises
------
ParameterError
- If `frequency` is not provided.
- If neither `length` nor `duration` are provided.
Examples
--------
>>> # Generate a pure sine tone A4
>>> tone = librosa.tone(440, duration=1)
>>> # Or generate the same signal using `length`
>>> tone = librosa.tone(440, sr=22050, length=22050)
Display spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=tone)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
"""
if frequency is None:
raise ParameterError('"frequency" must be provided')
# Compute signal length
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
length = duration * sr
if phi is None:
phi = -np.pi * 0.5
step = 1.0 / sr
return np.cos(2 * np.pi * frequency * (np.arange(step * length, step=step)) + phi) | python | {
"resource": ""
} |
q267992 | chirp | test | def chirp(fmin, fmax, sr=22050, length=None, duration=None, linear=False, phi=None):
"""Returns a chirp signal that goes from frequency `fmin` to frequency `fmax`
Parameters
----------
fmin : float > 0
initial frequency
fmax : float > 0
final frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both `duration` and `length` are defined, `length` would take priority.
duration : float > 0
desired duration in seconds.
When both `duration` and `length` are defined, `length` would take priority.
linear : boolean
- If `True`, use a linear sweep, i.e., frequency changes linearly with time
- If `False`, use a exponential sweep.
Default is `False`.
phi : float or None
phase offset, in radians.
If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
chirp_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized chirp signal
Raises
------
ParameterError
- If either `fmin` or `fmax` are not provided.
- If neither `length` nor `duration` are provided.
See Also
--------
scipy.signal.chirp
Examples
--------
>>> # Generate a exponential chirp from A4 to A5
>>> exponential_chirp = librosa.chirp(440, 880, duration=1)
>>> # Or generate the same signal using `length`
>>> exponential_chirp = librosa.chirp(440, 880, sr=22050, length=22050)
>>> # Or generate a linear chirp instead
>>> linear_chirp = librosa.chirp(440, 880, duration=1, linear=True)
Display spectrogram for both exponential and linear chirps
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S_exponential = librosa.feature.melspectrogram(y=exponential_chirp)
>>> ax = plt.subplot(2,1,1)
>>> librosa.display.specshow(librosa.power_to_db(S_exponential, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,2, sharex=ax)
>>> S_linear = librosa.feature.melspectrogram(y=linear_chirp)
>>> librosa.display.specshow(librosa.power_to_db(S_linear, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.tight_layout()
"""
if fmin is None or fmax is None:
raise ParameterError('both "fmin" and "fmax" must be provided')
# Compute signal duration
period = 1.0 / sr
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
else:
duration = period * length
if phi is None:
phi = -np.pi * 0.5
method = 'linear' if linear else 'logarithmic'
return scipy.signal.chirp(
np.arange(duration, step=period),
fmin,
duration,
fmax,
method=method,
phi=phi / np.pi * 180, # scipy.signal.chirp uses degrees for phase offset
) | python | {
"resource": ""
} |
q267993 | __get_files | test | def __get_files(dir_name, extensions):
'''Helper function to get files in a single directory'''
# Expand out the directory
dir_name = os.path.abspath(os.path.expanduser(dir_name))
myfiles = set()
for sub_ext in extensions:
globstr = os.path.join(dir_name, '*' + os.path.extsep + sub_ext)
myfiles |= set(glob.glob(globstr))
return myfiles | python | {
"resource": ""
} |
q267994 | stretch_demo | test | def stretch_demo(input_file, output_file, speed):
'''Phase-vocoder time stretch demo function.
:parameters:
- input_file : str
path to input audio
- output_file : str
path to save output (wav)
- speed : float > 0
speed up by this factor
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# 2. Time-stretch through effects module
print('Playing back at {:3.0f}% speed'.format(speed * 100))
y_stretch = librosa.effects.time_stretch(y, speed)
print('Saving stretched audio to: ', output_file)
librosa.output.write_wav(output_file, y_stretch, sr) | python | {
"resource": ""
} |
q267995 | process_arguments | test | def process_arguments(args):
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='Time stretching example')
parser.add_argument('input_file',
action='store',
help='path to the input file (wav, mp3, etc)')
parser.add_argument('output_file',
action='store',
help='path to the stretched output (wav)')
parser.add_argument('-s', '--speed',
action='store',
type=float,
default=2.0,
required=False,
help='speed')
return vars(parser.parse_args(args)) | python | {
"resource": ""
} |
q267996 | hpss_demo | test | def hpss_demo(input_file, output_harmonic, output_percussive):
'''HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav)
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Separate components with the effects module
print('Separating harmonics and percussives... ')
y_harmonic, y_percussive = librosa.effects.hpss(y)
# 5. Save the results
print('Saving harmonic audio to: ', output_harmonic)
librosa.output.write_wav(output_harmonic, y_harmonic, sr)
print('Saving percussive audio to: ', output_percussive)
librosa.output.write_wav(output_percussive, y_percussive, sr) | python | {
"resource": ""
} |
q267997 | beat_track | test | def beat_track(y=None, sr=22050, onset_envelope=None, hop_length=512,
start_bpm=120.0, tightness=100, trim=True, bpm=None,
units='frames'):
r'''Dynamic programming beat tracker.
Beats are detected in three stages, following the method of [1]_:
1. Measure onset strength
2. Estimate tempo from onset correlation
3. Pick peaks in onset strength approximately consistent with estimated
tempo
.. [1] Ellis, Daniel PW. "Beat tracking by dynamic programming."
Journal of New Music Research 36.1 (2007): 51-60.
http://labrosa.ee.columbia.edu/projects/beattrack/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope.
hop_length : int > 0 [scalar]
number of audio samples between successive `onset_envelope` values
start_bpm : float > 0 [scalar]
initial guess for the tempo estimator (in beats per minute)
tightness : float [scalar]
tightness of beat distribution around tempo
trim : bool [scalar]
trim leading/trailing beats with weak onsets
bpm : float [scalar]
(optional) If provided, use `bpm` as the tempo instead of
estimating it from `onsets`.
units : {'frames', 'samples', 'time'}
The units to encode detected beat events in.
By default, 'frames' are used.
Returns
-------
tempo : float [scalar, non-negative]
estimated global tempo (in beats per minute)
beats : np.ndarray [shape=(m,)]
estimated beat event locations in the specified units
(default is frame indices)
.. note::
If no onset strength could be detected, beat_tracker estimates 0 BPM
and returns an empty list.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
librosa.onset.onset_strength
Examples
--------
Track beats using time series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> tempo
64.599609375
Print the first 20 beat frames
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Or print them as timestamps
>>> librosa.frames_to_time(beats[:20], sr=sr)
array([ 7.43 , 8.29 , 9.218, 10.124, 11.146, 12.19 ,
13.212, 14.141, 15.279, 16.208, 17.113, 18.042,
18.971, 19.9 , 20.805, 21.734, 22.663, 23.591,
24.497, 25.426])
Track beats using a pre-computed onset envelope
>>> onset_env = librosa.onset.onset_strength(y, sr=sr,
... aggregate=np.median)
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
... sr=sr)
>>> tempo
64.599609375
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Plot the beat events against the onset strength envelope
>>> import matplotlib.pyplot as plt
>>> hop_length = 512
>>> plt.figure(figsize=(8, 4))
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=hop_length)
>>> plt.plot(times, librosa.util.normalize(onset_env),
... label='Onset strength')
>>> plt.vlines(times[beats], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> # Limit the plot to a 15-second window
>>> plt.xlim(15, 30)
>>> plt.gca().xaxis.set_major_formatter(librosa.display.TimeFormatter())
>>> plt.tight_layout()
'''
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length,
aggregate=np.median)
# Do we have any onsets to grab?
if not onset_envelope.any():
return (0, np.array([], dtype=int))
# Estimate BPM if one was not provided
if bpm is None:
bpm = tempo(onset_envelope=onset_envelope,
sr=sr,
hop_length=hop_length,
start_bpm=start_bpm)[0]
# Then, run the tracker
beats = __beat_tracker(onset_envelope,
bpm,
float(sr) / hop_length,
tightness,
trim)
if units == 'frames':
pass
elif units == 'samples':
beats = core.frames_to_samples(beats, hop_length=hop_length)
elif units == 'time':
beats = core.frames_to_time(beats, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return (bpm, beats) | python | {
"resource": ""
} |
q267998 | __beat_tracker | test | def __beat_tracker(onset_envelope, bpm, fft_res, tightness, trim):
"""Internal function that tracks beats in an onset strength envelope.
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
bpm : float [scalar]
tempo estimate
fft_res : float [scalar]
resolution of the fft (sr / hop_length)
tightness: float [scalar]
how closely do we adhere to bpm?
trim : bool [scalar]
trim leading/trailing beats with weak onsets?
Returns
-------
beats : np.ndarray [shape=(n,)]
frame numbers of beat events
"""
if bpm <= 0:
raise ParameterError('bpm must be strictly positive')
# convert bpm to a sample period for searching
period = round(60.0 * fft_res / bpm)
# localscore is a smoothed version of AGC'd onset envelope
localscore = __beat_local_score(onset_envelope, period)
# run the DP
backlink, cumscore = __beat_track_dp(localscore, period, tightness)
# get the position of the last beat
beats = [__last_beat(cumscore)]
# Reconstruct the beat path from backlinks
while backlink[beats[-1]] >= 0:
beats.append(backlink[beats[-1]])
# Put the beats in ascending order
# Convert into an array of frame numbers
beats = np.array(beats[::-1], dtype=int)
# Discard spurious trailing beats
beats = __trim_beats(localscore, beats, trim)
return beats | python | {
"resource": ""
} |
q267999 | __beat_local_score | test | def __beat_local_score(onset_envelope, period):
'''Construct the local score for an onset envlope and given period'''
window = np.exp(-0.5 * (np.arange(-period, period+1)*32.0/period)**2)
return scipy.signal.convolve(__normalize_onsets(onset_envelope),
window,
'same') | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.