_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q255900 | get_languages | validation | def get_languages() -> set:
"""Get supported languages."""
try:
languages = cache['languages']
| python | {
"resource": ""
} |
q255901 | get_directory | validation | def get_directory():
"""Get LanguageTool directory."""
try:
language_check_dir = cache['language_check_dir']
except KeyError:
def version_key(string):
return [int(e) if e.isdigit() else e
for e in re.split(r"(\d+)", string)]
def get_lt_dir(base_dir):
paths = [
path for path in
glob.glob(os.path.join(base_dir, 'LanguageTool*'))
if os.path.isdir(path)
]
return max(paths, key=version_key) if paths else None
base_dir = os.path.dirname(sys.argv[0])
language_check_dir = get_lt_dir(base_dir)
if not language_check_dir:
| python | {
"resource": ""
} |
q255902 | set_directory | validation | def set_directory(path=None):
"""Set LanguageTool directory."""
old_path = get_directory()
terminate_server()
cache.clear()
if path:
cache['language_check_dir'] = path
try:
| python | {
"resource": ""
} |
q255903 | LanguageTool.check | validation | def check(self, text: str, srctext=None) -> [Match]:
"""Match text against enabled rules."""
root | python | {
"resource": ""
} |
q255904 | get_newest_possible_languagetool_version | validation | def get_newest_possible_languagetool_version():
"""Return newest compatible version.
>>> version = get_newest_possible_languagetool_version()
>>> version in [JAVA_6_COMPATIBLE_VERSION,
... JAVA_7_COMPATIBLE_VERSION,
... LATEST_VERSION]
True
"""
java_path = find_executable('java')
if not java_path:
# Just ignore this and assume an old version of Java. It might not be
# found because of a PATHEXT-related issue
# (https://bugs.python.org/issue2200).
| python | {
"resource": ""
} |
q255905 | get_common_prefix | validation | def get_common_prefix(z):
"""Get common directory in a zip file if any."""
name_list = z.namelist()
if name_list and | python | {
"resource": ""
} |
q255906 | _ProactorEventLoop._process_events | validation | def _process_events(self, events):
"""Process events from proactor."""
for f, callback, transferred, key, ov in events:
try:
self._logger.debug('Invoking event callback {}'.format(callback))
value = callback(transferred, key, ov)
| python | {
"resource": ""
} |
q255907 | asyncClose | validation | def asyncClose(fn):
"""Allow to run async code before application is closed."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
f = asyncio.ensure_future(fn(*args, **kwargs))
| python | {
"resource": ""
} |
q255908 | asyncSlot | validation | def asyncSlot(*args):
"""Make a Qt async slot run on asyncio loop."""
def outer_decorator(fn):
@Slot(*args)
| python | {
"resource": ""
} |
q255909 | with_logger | validation | def with_logger(cls):
"""Class decorator to add a logger to a class."""
attr_name = '_logger'
cls_name = cls.__qualname__
module | python | {
"resource": ""
} |
q255910 | _SelectorEventLoop._process_event | validation | def _process_event(self, key, mask):
"""Selector has delivered us an event."""
self._logger.debug('Processing event with key {} and mask {}'.format(key, mask))
fileobj, (reader, writer) = key.fileobj, key.data
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
self.remove_reader(fileobj)
else:
self._logger.debug('Invoking reader callback: {}'.format(reader))
reader._run()
| python | {
"resource": ""
} |
q255911 | MibCompiler.addSources | validation | def addSources(self, *sources):
"""Add more ASN.1 MIB source repositories.
MibCompiler.compile will invoke each of configured source objects
in order of their addition asking each to fetch MIB module specified
by name.
Args:
sources: reader object(s)
Returns:
reference to itself (can be used for call chaining)
"""
| python | {
"resource": ""
} |
q255912 | MibCompiler.addSearchers | validation | def addSearchers(self, *searchers):
"""Add more transformed MIBs repositories.
MibCompiler.compile will invoke each of configured searcher objects
in order of their addition asking each if already transformed MIB
module already exists and is more recent than specified.
Args:
searchers: searcher object(s)
Returns:
reference to itself (can be used for call chaining)
"""
| python | {
"resource": ""
} |
q255913 | MibCompiler.addBorrowers | validation | def addBorrowers(self, *borrowers):
"""Add more transformed MIBs repositories to borrow MIBs from.
Whenever MibCompiler.compile encounters MIB module which neither of
the *searchers* can find or fetched ASN.1 MIB module can not be
parsed (due to syntax errors), these *borrowers* objects will be
invoked in order of their addition asking each if already transformed
MIB can be fetched (borrowed).
Args:
borrowers: borrower object(s)
Returns:
| python | {
"resource": ""
} |
q255914 | AIC | validation | def AIC(N, rho, k):
r"""Akaike Information Criterion
:param rho: rho at order k
:param N: sample size
:param k: AR order.
If k is the AR order and N the size of the sample, then Akaike criterion is
.. math:: AIC(k) = \log(\rho_k) + 2\frac{k+1}{N}
::
AIC(64, [0.5,0.3,0.2], [1,2,3])
| python | {
"resource": ""
} |
q255915 | AICc | validation | def AICc(N, rho, k, norm=True):
r"""corrected Akaike information criterion
.. math:: AICc(k) = log(\rho_k) + 2 \frac{k+1}{N-k-2}
| python | {
"resource": ""
} |
q255916 | KIC | validation | def KIC(N, rho, k):
r"""Kullback information criterion
.. math:: KIC(k) = log(\rho_k) + 3 \frac{k+1}{N}
:validation: double checked versus octave.
"""
| python | {
"resource": ""
} |
q255917 | AKICc | validation | def AKICc(N, rho, k):
r"""approximate corrected Kullback information
.. math:: AKICc(k) = log(rho_k) + \frac{p}{N*(N-k)} + (3-\frac{k+2}{N})*\frac{k+1}{N-k-2}
"""
from numpy import log, array
| python | {
"resource": ""
} |
q255918 | FPE | validation | def FPE(N,rho, k=None):
r"""Final prediction error criterion
.. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k
:validation: double checked versus octave.
"""
| python | {
"resource": ""
} |
q255919 | MDL | validation | def MDL(N, rho, k):
r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
"""
from | python | {
"resource": ""
} |
q255920 | aic_eigen | validation | def aic_eigen(s, N):
r"""AIC order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
Given :math:`n` sorted eigen values :math:`\lambda_i` with
:math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)
is:
.. math:: AIC(k) = -2(n-k)N \ln \frac{g(k)}{a(k)} + 2k(2n-k)
where the arithmetic sum :math:`a(k)` is:
.. math:: a(k) = \sum_{i=k+1}^{n}\lambda_i
and the geometric sum :math:`g(k)` is:
.. math:: g(k) = \prod_{i=k+1}^{n} \lambda_i^{-(n-k)}
The number of relevant sinusoids in the signal subspace is determined by
selecting the minimum of `AIC`.
.. seealso:: :func:`~spectrum.eigenfreq.eigen`
.. todo:: define precisely the input parameter N. Should be the input
| python | {
"resource": ""
} |
q255921 | mdl_eigen | validation | def mdl_eigen(s, N):
r"""MDL order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
.. math:: MDL(k) = (n-k)N \ln \frac{g(k)}{a(k)} + 0.5k(2n-k) log(N) | python | {
"resource": ""
} |
q255922 | generate_gallery_rst | validation | def generate_gallery_rst(app):
"""Generate the Main examples gallery reStructuredText
Start the sphinx-gallery configuration and recursively scan the examples
directories in order to populate the examples gallery
"""
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
gallery_conf.update(app.config.sphinx_gallery_conf)
gallery_conf.update(plot_gallery=plot_gallery)
gallery_conf.update(abort_on_example_error=app.builder.config.abort_on_example_error)
# this assures I can call the config in other places
app.config.sphinx_gallery_conf = gallery_conf
app.config.html_static_path.append(glr_path_static())
clean_gallery_out(app.builder.outdir)
examples_dirs = gallery_conf['examples_dirs']
gallery_dirs = gallery_conf['gallery_dirs']
if not isinstance(examples_dirs, list):
examples_dirs = [examples_dirs]
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
mod_examples_dir = os.path.relpath(gallery_conf['mod_example_dir'],
app.builder.srcdir)
seen_backrefs = set()
for examples_dir, gallery_dir in zip(examples_dirs, gallery_dirs):
examples_dir = os.path.relpath(examples_dir,
app.builder.srcdir)
gallery_dir = os.path.relpath(gallery_dir,
| python | {
"resource": ""
} |
q255923 | setup | validation | def setup(app):
"""Setup sphinx-gallery sphinx extension"""
app.add_config_value('plot_gallery', True, 'html')
app.add_config_value('abort_on_example_error', False, 'html')
app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html') | python | {
"resource": ""
} |
q255924 | CORRELATION | validation | def CORRELATION(x, y=None, maxlags=None, norm='unbiased'):
r"""Correlation function
This function should give the same results as :func:`xcorr` but it
returns the positive lags only. Moreover the algorithm does not use
FFT as compared to other algorithms.
:param array x: first data array of length N
:param array y: second data array of length N. If not specified, computes the
autocorrelation.
:param int maxlags: compute cross correlation between [0:maxlags]
when maxlags is not specified, the range of lags is [0:maxlags].
:param str norm: normalisation in ['biased', 'unbiased', None, 'coeff']
* *biased* correlation=raw/N,
* *unbiased* correlation=raw/(N-`|lag|`)
* *coeff* correlation=raw/(rms(x).rms(y))/N
* None correlation=raw
:return:
* a numpy.array correlation sequence, r[1,N]
* a float for the zero-lag correlation, r[0]
The *unbiased* correlation has the form:
.. math::
\hat{r}_{xx} = \frac{1}{N-m}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T
The *biased* correlation differs by the front factor only:
.. math::
\check{r}_{xx} = \frac{1}{N}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T
with :math:`0\leq m\leq N-1`.
.. doctest::
>>> from spectrum import CORRELATION
>>> x = [1,2,3,4,5]
>>> res = CORRELATION(x,x, maxlags=0, norm='biased')
>>> res[0]
11.0
.. note:: this function should be replaced by :func:`xcorr`.
.. seealso:: :func:`xcorr`
"""
assert norm in ['unbiased','biased', 'coeff', None]
#transform lag into list if it is an integer
x = np.array(x)
if y is None:
y = x
else:
y = np.array(y)
# N is the max of x and y
N = max(len(x), len(y))
if len(x) < N:
x = y.copy()
x.resize(N)
if len(y) < N:
y = y.copy()
y.resize(N)
#default lag is N-1
if maxlags is None:
maxlags = N - 1
assert maxlags < N, 'lag must be less than len(x)'
realdata = np.isrealobj(x) and np.isrealobj(y)
#create an autocorrelation array with same length as lag
if realdata | python | {
"resource": ""
} |
q255925 | xcorr | validation | def xcorr(x, y=None, maxlags=None, norm='biased'):
"""Cross-correlation using numpy.correlate
Estimates the cross-correlation (and autocorrelation) sequence of a random
process of length N. By default, there is no normalisation and the output
sequence of the cross-correlation has a length 2*N+1.
:param array x: first data array of length N
:param array y: second data array of length N. If not specified, computes the
autocorrelation.
:param int maxlags: compute cross correlation between [-maxlags:maxlags]
when maxlags is not specified, the range of lags is [-N+1:N-1].
:param str option: normalisation in ['biased', 'unbiased', None, 'coeff']
The true cross-correlation sequence is
.. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])
However, in practice, only a finite segment of one realization of the
infinite-length random process is available.
The correlation is estimated using numpy.correlate(x,y,'full').
Normalisation is handled by this function using the following cases:
* 'biased': Biased estimate of the cross-correlation function
* 'unbiased': Unbiased estimate of the cross-correlation function
* 'coeff': Normalizes the sequence so the autocorrelations at zero
lag is 1.0.
:return:
* a numpy.array containing the cross-correlation sequence (length 2*N-1)
* lags vector
.. note:: If x and y are not the same length, the shorter vector is
zero-padded to the length of the longer vector.
.. rubric:: Examples
| python | {
"resource": ""
} |
q255926 | MINEIGVAL | validation | def MINEIGVAL(T0, T, TOL):
"""Finds the minimum eigenvalue of a Hermitian Toeplitz matrix
The classical power method is used together with a fast Toeplitz
equation solution routine. The eigenvector is normalized to unit length.
:param T0: Scalar corresponding to real matrix element t(0)
:param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix
:param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number.
:return:
* EVAL - Real scalar denoting the minimum eigenvalue of matrix
* EVEC - Array of M complex eigenvector elements associated
.. note::
* External array T must be dimensioned >= M
* array EVEC must be >= M+1
* Internal array E must be dimensioned >= M+1 .
* **dependencies**
* :meth:`spectrum.toeplitz.HERMTOEP`
"""
M = len(T)
eigval = 10
eigvalold = 1
eigvec = numpy.zeros(M+1, dtype=complex)
for k in range(0,M+1):
eigvec[k] = 1+0j
it=0
#print 'initialisation',T0, T, eigval, eigvec
maxit = 15 | python | {
"resource": ""
} |
q255927 | morlet | validation | def morlet(lb, ub, n):
r"""Generate the Morlet waveform
The Morlet waveform is defined as follows:
.. math:: w[x] = \cos{5x} \exp^{-x^2/2}
:param lb: lower bound
:param ub: upper bound
:param int n: waveform data samples
| python | {
"resource": ""
} |
q255928 | chirp | validation | def chirp(t, f0=0., t1=1., f1=100., form='linear', phase=0):
r"""Evaluate a chirp signal at time t.
A chirp signal is a frequency swept cosine wave.
.. math:: a = \pi (f_1 - f_0) / t_1
.. math:: b = 2 \pi f_0
.. math:: y = \cos\left( \pi\frac{f_1-f_0}{t_1} t^2 + 2\pi f_0 t + \rm{phase} \right)
:param array t: times at which to evaluate the chirp signal
:param float f0: frequency at time t=0 (Hz)
:param float t1: time t1
:param float f1: frequency at time t=t1 (Hz)
:param str form: shape of frequency sweep in ['linear', 'quadratic', 'logarithmic']
:param float phase: phase shift at t=0
The parameter **form** can be:
* 'linear' :math:`f(t) = (f_1-f_0)(t/t_1) + f_0`
* 'quadratic' :math:`f(t) = (f_1-f_0)(t/t_1)^2 + f_0`
* 'logarithmic' :math:`f(t) = (f_1-f_0)^{(t/t_1)} + f_0`
Example:
.. plot::
:include-source:
:width: 80%
from spectrum import chirp
from pylab import linspace, plot
t = linspace(0, 1, 1000)
y = chirp(t, form='linear')
plot(y)
y = chirp(t, form='quadratic')
plot(y, 'r')
| python | {
"resource": ""
} |
q255929 | mexican | validation | def mexican(lb, ub, n):
r"""Generate the mexican hat wavelet
The Mexican wavelet is:
.. math:: w[x] = \cos{5x} \exp^{-x^2/2}
:param lb: lower bound
:param ub: upper bound
:param int n: waveform data samples
:return: the waveform
.. plot::
:include-source:
:width: 80%
from spectrum import mexican
from pylab import plot
plot(mexican(0, 10, 100))
"""
| python | {
"resource": ""
} |
q255930 | ac2poly | validation | def ac2poly(data):
"""Convert autocorrelation sequence to prediction polynomial
:param array data: input data (list or numpy.array)
:return:
* AR parameters
* noise variance
This is an alias to::
a, e, c = LEVINSON(data)
| python | {
"resource": ""
} |
q255931 | rc2poly | validation | def rc2poly(kr, r0=None):
"""convert reflection coefficients to prediction filter polynomial
:param k: reflection coefficients
"""
# Initialize the recursion
from .levinson import levup
p = len(kr) #% p is the order of the prediction polynomial.
a = numpy.array([1, kr[0]]) #% a is a true polynomial.
e = numpy.zeros(len(kr))
if r0 is None:
e0 = 0
else:
e0 = r0
e[0] = e0 * | python | {
"resource": ""
} |
q255932 | rc2ac | validation | def rc2ac(k, R0):
"""Convert reflection coefficients to autocorrelation sequence.
:param k: reflection coefficients
:param R0: zero-lag autocorrelation
:returns: the autocorrelation sequence
.. seealso:: :func:`ac2rc`, :func:`poly2rc`, :func:`ac2poly`, | python | {
"resource": ""
} |
q255933 | rc2is | validation | def rc2is(k):
"""Convert reflection coefficients to inverse sine parameters.
:param k: reflection coefficients
:return: inverse sine parameters
.. seealso:: :func:`is2rc`, :func:`rc2poly`, :func:`rc2acC`, :func:`rc2lar`.
Reference: J.R. Deller, J.G. Proakis, J.H.L. Hansen, "Discrete-Time
Processing of Speech Signals", Prentice Hall, Section 7.4.5.
"""
assert numpy.isrealobj(k), 'Inverse sine parameters not | python | {
"resource": ""
} |
q255934 | rc2lar | validation | def rc2lar(k):
"""Convert reflection coefficients to log area ratios.
:param k: reflection coefficients
:return: inverse sine parameters
The log area ratio is defined by G = log((1+k)/(1-k)) , where the K
parameter is the reflection coefficient.
.. seealso:: :func:`lar2rc`, :func:`rc2poly`, :func:`rc2ac`, :func:`rc2ic`.
:References:
[1] J. Makhoul, "Linear Prediction: A Tutorial Review," Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.
"""
| python | {
"resource": ""
} |
q255935 | lar2rc | validation | def lar2rc(g):
"""Convert log area ratios to reflection coefficients.
:param g: log area ratios
:returns: the reflection coefficients
.. seealso: :func:`rc2lar`, :func:`poly2rc`, :func:`ac2rc`, :func:`is2rc`.
:References:
[1] J. Makhoul, "Linear Prediction: A Tutorial Review," Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.
"""
| python | {
"resource": ""
} |
q255936 | lsf2poly | validation | def lsf2poly(lsf):
"""Convert line spectral frequencies to prediction filter coefficients
returns a vector a containing the prediction filter coefficients from a vector lsf of line spectral frequencies.
.. doctest::
>>> from spectrum import lsf2poly
>>> lsf = [0.7842 , 1.5605 , 1.8776 , 1.8984, 2.3593]
>>> a = lsf2poly(lsf)
# array([ 1.00000000e+00, 6.14837835e-01, 9.89884967e-01,
# 9.31594056e-05, 3.13713832e-03, -8.12002261e-03 ])
.. seealso:: poly2lsf, rc2poly, ac2poly, rc2is
"""
# Reference: A.M. Kondoz, "Digital Speech: Coding for Low Bit Rate Communications
# Systems" John Wiley & Sons 1994 ,Chapter 4
# Line spectral frequencies must be real.
lsf = numpy.array(lsf)
if max(lsf) > numpy.pi or min(lsf) < 0:
raise ValueError('Line spectral frequencies must be between 0 and pi.')
p = len(lsf) # model order
# Form zeros using the LSFs and unit amplitudes
z = numpy.exp(1.j * lsf)
# Separate the zeros to those belonging to P and Q
rQ = z[0::2]
rP = z[1::2]
# Include the conjugates as well
rQ = numpy.concatenate((rQ, rQ.conjugate()))
rP = numpy.concatenate((rP, rP.conjugate()))
# Form the polynomials P and Q, note that these should be real
Q | python | {
"resource": ""
} |
q255937 | poly2lsf | validation | def poly2lsf(a):
"""Prediction polynomial to line spectral frequencies.
converts the prediction polynomial specified by A,
into the corresponding line spectral frequencies, LSF.
normalizes the prediction polynomial by A(1).
.. doctest::
>>> from spectrum import poly2lsf
>>> a = [1.0000, 0.6149, 0.9899, 0.0000 ,0.0031, -0.0082]
>>> lsf = poly2lsf(a)
>>> lsf = array([0.7842, 1.5605, 1.8776, 1.8984, 2.3593])
.. seealso:: lsf2poly, poly2rc, poly2qc, rc2is
"""
#Line spectral frequencies are not defined for complex polynomials.
# Normalize the polynomial
a = numpy.array(a)
if a[0] != 1:
a/=a[0]
if max(numpy.abs(numpy.roots(a))) >= 1.0:
error('The polynomial must have all roots inside of the unit circle.');
# Form the sum and differnce filters
p = len(a)-1 # The leading one in the polynomial is not used
a1 = numpy.concatenate((a, numpy.array([0])))
a2 = a1[-1::-1]
P1 = a1 - a2 # Difference filter
Q1 = a1 + a2 # Sum Filter
# | python | {
"resource": ""
} |
q255938 | _swapsides | validation | def _swapsides(data):
"""todo is it really useful ?
Swap sides
.. doctest::
>>> from spectrum import swapsides
>>> x = [-2, -1, 1, 2]
>>> swapsides(x)
array([ 2, | python | {
"resource": ""
} |
q255939 | twosided_2_onesided | validation | def twosided_2_onesided(data):
"""Convert a one-sided PSD to a twosided PSD
In order to keep the power in the onesided PSD the same
as in the twosided version, the onesided values are twice
as much as in the input data (except for the zero-lag value).
::
>>> twosided_2_onesided([10, 2,3,3,2,8])
| python | {
"resource": ""
} |
q255940 | onesided_2_twosided | validation | def onesided_2_twosided(data):
"""Convert a two-sided PSD to a one-sided PSD
In order to keep the power in the twosided PSD the same
as in the onesided version, the twosided values are 2 times
lower than the input data (except for the zero-lag and N-lag
values).
::
>>> twosided_2_onesided([10, | python | {
"resource": ""
} |
q255941 | twosided_2_centerdc | validation | def twosided_2_centerdc(data):
"""Convert a two-sided PSD to a center-dc PSD"""
N = len(data)
# could us int() or // in python 3
newpsd | python | {
"resource": ""
} |
q255942 | centerdc_2_twosided | validation | def centerdc_2_twosided(data):
"""Convert a center-dc PSD to a twosided PSD"""
N = len(data)
| python | {
"resource": ""
} |
q255943 | _twosided_zerolag | validation | def _twosided_zerolag(data, zerolag):
"""Build a symmetric vector out of stricly positive lag vector and zero-lag
.. doctest::
>>> data = [3,2,1]
>>> zerolag = 4
>>> twosided_zerolag(data, zerolag)
array([1, 2, 3, 4, 3, | python | {
"resource": ""
} |
q255944 | data_cosine | validation | def data_cosine(N=1024, A=0.1, sampling=1024., freq=200):
r"""Return a noisy cosine at a given frequency.
:param N: the final data size
:param A: the strength of the noise
:param float sampling: sampling frequency of the input :attr:`data`.
:param float freq: the frequency :math:`f_0` of the cosine.
.. math:: x[t] = cos(2\pi t * f_0) + A w[t]
where | python | {
"resource": ""
} |
q255945 | data_two_freqs | validation | def data_two_freqs(N=200):
"""A simple test example with two close frequencies
"""
nn = arange(N)
xx | python | {
"resource": ""
} |
q255946 | spectrum_data | validation | def spectrum_data(filename):
"""Simple utilities to retrieve data sets from """
import os
import pkg_resources
info = pkg_resources.get_distribution('spectrum')
location = info.location
# first try develop mode
share = | python | {
"resource": ""
} |
q255947 | TimeSeries.plot | validation | def plot(self, **kargs):
"""Plot the data set, using the sampling information to set the x-axis
correctly."""
from pylab import plot, linspace, xlabel, ylabel, grid
time = linspace(1*self.dt, self.N*self.dt, | python | {
"resource": ""
} |
q255948 | readwav | validation | def readwav(filename):
"""Read a WAV file and returns the data and sample rate
::
from spectrum.io import readwav
readwav()
"""
| python | {
"resource": ""
} |
q255949 | _autocov | validation | def _autocov(s, **kwargs):
"""Returns the autocovariance of signal s at all lags.
Adheres to the definition
sxx[k] = E{S[n]S[n+k]} = cov{S[n],S[n+k]}
where E{} is the expectation operator, and S is a zero mean process
"""
# only remove the mean once, if needed
debias = kwargs.pop('debias', True)
| python | {
"resource": ""
} |
q255950 | _remove_bias | validation | def _remove_bias(x, axis):
"Subtracts an estimate of the mean from signal x at axis"
padded_slice = [slice(d) for d in x.shape]
| python | {
"resource": ""
} |
q255951 | get_docstring_and_rest | validation | def get_docstring_and_rest(filename):
"""Separate `filename` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Returns
-------
docstring: str
docstring of `filename`
rest: str
`filename` content without the docstring
"""
with open(filename) as f:
content = f.read()
node = ast.parse(content)
if not isinstance(node, ast.Module):
raise TypeError("This function only supports modules. "
"You provided {0}".format(node.__class__.__name__))
if node.body and isinstance(node.body[0], ast.Expr) and \
isinstance(node.body[0].value, ast.Str):
docstring_node = node.body[0]
docstring = docstring_node.value.s
| python | {
"resource": ""
} |
q255952 | split_code_and_text_blocks | validation | def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, content)
List where each element is a tuple with the label ('text' or 'code'),
and content string of block.
"""
docstring, rest_of_content = get_docstring_and_rest(source_file)
blocks = [('text', docstring)]
pattern = re.compile(
r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)',
flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
match_start_pos, match_end_pos = match.span()
code_block_content = rest_of_content[pos_so_far:match_start_pos]
text_content = match.group('text_content')
sub_pat = re.compile('^#', flags=re.M)
| python | {
"resource": ""
} |
q255953 | codestr2rst | validation | def codestr2rst(codestr, lang='python'):
"""Return reStructuredText code block from code string"""
code_directive = "\n.. code-block:: {0}\n\n".format(lang) | python | {
"resource": ""
} |
q255954 | get_md5sum | validation | def get_md5sum(src_file):
"""Returns md5sum of file"""
with open(src_file, 'r') as src_data:
src_content = src_data.read()
# data needs to be encoded in python3 before hashing
if sys.version_info[0] == 3:
| python | {
"resource": ""
} |
q255955 | check_md5sum_change | validation | def check_md5sum_change(src_file):
"""Returns True if src_file has a different md5sum"""
src_md5 = get_md5sum(src_file)
src_md5_file = src_file + '.md5'
src_file_changed = True
if os.path.exists(src_md5_file):
with open(src_md5_file, 'r') as file_checksum:
ref_md5 = file_checksum.read()
| python | {
"resource": ""
} |
q255956 | _plots_are_current | validation | def _plots_are_current(src_file, image_file):
"""Test existence of image file and no change in md5sum of
example"""
| python | {
"resource": ""
} |
q255957 | save_figures | validation | def save_figures(image_path, fig_count, gallery_conf):
"""Save all open matplotlib figures of the example code-block
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number)
fig_count : int
Previous figure number count. Figure number add from this number
Returns
-------
list of strings containing the full path to each figure
"""
figure_list = []
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
current_fig = image_path.format(fig_count + fig_mngr.num)
fig.savefig(current_fig, **kwargs)
figure_list.append(current_fig)
if gallery_conf.get('find_mayavi_figures', False):
from mayavi import mlab
e = | python | {
"resource": ""
} |
q255958 | scale_image | validation | def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, | python | {
"resource": ""
} |
q255959 | save_thumbnail | validation | def save_thumbnail(image_path, base_image_name, gallery_conf):
"""Save the thumbnail image"""
first_image_file = image_path.format(1)
thumb_dir = os.path.join(os.path.dirname(first_image_file), 'thumb')
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
thumb_file = os.path.join(thumb_dir,
'sphx_glr_%s_thumb.png' % base_image_name)
if os.path.exists(first_image_file):
scale_image(first_image_file, thumb_file, 400, 280)
elif | python | {
"resource": ""
} |
q255960 | execute_script | validation | def execute_script(code_block, example_globals, image_path, fig_count,
src_file, gallery_conf):
"""Executes the code block of the example file"""
time_elapsed = 0
stdout = ''
# We need to execute the code
print('plotting code blocks in %s' % src_file)
plt.close('all')
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
try:
# First cd in the original example dir, so that any file
# created by the example get created in this directory
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, ' ' * 4))
os.chdir(cwd)
figure_list = save_figures(image_path, fig_count, gallery_conf)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
image_list = ""
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
elif len(figure_list) > 1:
image_list = HLIST_HEADER
for figure_name in figure_list:
| python | {
"resource": ""
} |
q255961 | _arburg2 | validation | def _arburg2(X, order):
"""This version is 10 times faster than arburg, but the output rho is not correct.
returns [1 a0,a1, an-1]
"""
x = np.array(X)
N = len(x)
if order <= 0.:
raise ValueError("order must be > 0")
# Initialisation
# ------ rho, den
rho = sum(abs(x)**2.) / N # Eq 8.21 [Marple]_
den = rho * 2. * N
# ------ backward and forward errors
ef = np.zeros(N, dtype=complex)
eb = np.zeros(N, dtype=complex)
for j in range(0, N): #eq 8.11
ef[j] = x[j]
eb[j] = x[j]
# AR order to be stored
a = np.zeros(1, dtype=complex)
a[0] = 1
# ---- rflection coeff to be stored
ref = np.zeros(order, dtype=complex)
temp = 1.
E = np.zeros(order+1)
E[0] = rho
for m in range(0, order):
#print m
# Calculate the next order reflection (parcor) coefficient
efp = ef[1:]
ebp = eb[0:-1]
#print efp, ebp
num = -2.* np.dot(ebp.conj().transpose(), efp)
den = np.dot(efp.conj().transpose(), efp)
| python | {
"resource": ""
} |
q255962 | _numpy_cholesky | validation | def _numpy_cholesky(A, B):
"""Solve Ax=B using numpy cholesky solver
A = LU
in the case where A is square and Hermitian, A = L.L* where L* is
transpoed and conjugate matrix
Ly = b
where
Ux=y
so x = U^{-1} y
where U = L*
| python | {
"resource": ""
} |
q255963 | _numpy_solver | validation | def _numpy_solver(A, B):
"""This function solve Ax=B directly without taking care of the input
| python | {
"resource": ""
} |
q255964 | CHOLESKY | validation | def CHOLESKY(A, B, method='scipy'):
"""Solve linear system `AX=B` using CHOLESKY method.
:param A: an input Hermitian matrix
:param B: an array
:param str method: a choice of method in [numpy, scipy, numpy_solver]
* `numpy_solver` relies entirely on numpy.solver (no cholesky decomposition)
* `numpy` relies on the numpy.linalg.cholesky for the decomposition and
numpy.linalg.solve for the inversion.
* `scipy` uses scipy.linalg.cholesky for the decomposition and
scipy.linalg.cho_solve for the inversion.
.. rubric:: Description
When a matrix is square and Hermitian (symmetric with lower part being
the complex conjugate of the upper one), then the usual triangular
factorization takes on the special form:
.. math:: A = R R^H
where :math:`R` is a lower triangular matrix with nonzero real principal
diagonal element. The input matrix can be made of complex data. Then, the
inversion to find :math:`x` is made as follows:
.. math:: Ry = B
and
.. math:: Rx = y
.. doctest::
>>> import numpy
>>> from spectrum import CHOLESKY
>>> A = numpy.array([[ 2.0+0.j , 0.5-0.5j, -0.2+0.1j],
... | python | {
"resource": ""
} |
q255965 | speriodogram | validation | def speriodogram(x, NFFT=None, detrend=True, sampling=1.,
scale_by_freq=True, window='hamming', axis=0):
"""Simple periodogram, but matrices accepted.
:param x: an array or matrix of data samples.
:param NFFT: length of the data before FFT is computed (zero padding)
:param bool detrend: detrend the data before co,puteing the FFT
:param float sampling: sampling frequency of the input :attr:`data`.
:param scale_by_freq:
:param str window:
:return: 2-sided PSD if complex data, 1-sided if real.
if a matrix is provided (using numpy.matrix), then a periodogram
is computed for each row. The returned matrix has the same shape as the input
matrix.
The mean of the input data is also removed from the data before computing
the psd.
.. plot::
:width: 80%
:include-source:
from pylab import grid, semilogy
from spectrum import data_cosine, speriodogram
data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)
semilogy(speriodogram(data, detrend=False, sampling=1024), marker='o')
grid(True)
.. plot::
:width: 80%
:include-source:
| python | {
"resource": ""
} |
q255966 | WelchPeriodogram | validation | def WelchPeriodogram(data, NFFT=None, sampling=1., **kargs):
r"""Simple periodogram wrapper of numpy.psd function.
:param A: the input data
:param int NFFT: total length of the final data sets (padded
with zero if needed; default is 4096)
:param str window:
:Technical documentation:
When we calculate the periodogram of a set of data we get an estimation
of the spectral density. In fact as we use a Fourier transform and a
truncated segments the spectrum is the convolution of the data with a
rectangular window which Fourier transform is
.. math::
W(s)= \frac{1}{N^2} \left[ \frac{\sin(\pi s)}{\sin(\pi s/N)} \right]^2
Thus oscillations and sidelobes appears around the main frequency. One aim of t he tapering is to reduced this effects. We multiply data by a window whose sidelobes are much smaller than the main lobe. Classical window is hanning window. But other windows are available. However we must take into account this energy and divide the spectrum by energy of taper used. Thus periodogram becomes :
.. math::
D_k \equiv \sum_{j=0}^{N-1}c_jw_j \; e^{2\pi ijk/N} \qquad k=0,...,N-1 | python | {
"resource": ""
} |
q255967 | DaniellPeriodogram | validation | def DaniellPeriodogram(data, P, NFFT=None, detrend='mean', sampling=1.,
scale_by_freq=True, window='hamming'):
r"""Return Daniell's periodogram.
To reduce fast fluctuations of the spectrum one idea proposed by daniell
is to average each value with points in its neighboorhood. It's like
a low filter.
.. math:: \hat{P}_D[f_i]= \frac{1}{2P+1} \sum_{n=i-P}^{i+P} \tilde{P}_{xx}[f_n]
where P is the number of points to average.
Daniell's periodogram is the convolution of the spectrum with a low filter:
.. math:: \hat{P}_D(f)= \hat{P}_{xx}(f)*H(f)
Example::
>>> DaniellPeriodogram(data, 8)
if N/P is not integer, the final values of the original PSD are not used.
using DaniellPeriodogram(data, 0) should give the original PSD.
"""
psd = speriodogram(data, NFFT=NFFT, detrend=detrend, sampling=sampling,
scale_by_freq=scale_by_freq, window=window)
if len(psd) % 2 == 1:
datatype = 'real'
else:
datatype = 'complex'
N = len(psd)
_slice = 2 * P + 1
if datatype == 'real': #must get odd value
newN = np.ceil(psd.size/float(_slice))
if newN % 2 == 0:
newN = psd.size/_slice
else:
| python | {
"resource": ""
} |
q255968 | Range.centerdc_gen | validation | def centerdc_gen(self):
"""Return the centered frequency range as a generator.
::
>>> print(list(Range(8).centerdc_gen()))
[-0.5, -0.375, -0.25, -0.125, 0.0, 0.125, | python | {
"resource": ""
} |
q255969 | Range.onesided_gen | validation | def onesided_gen(self):
"""Return the one-sided frequency range as a generator.
If :attr:`N` is even, the length is N/2 + 1.
If :attr:`N` is odd, the length is (N+1)/2.
::
>>> print(list(Range(8).onesided()))
| python | {
"resource": ""
} |
q255970 | Spectrum.plot | validation | def plot(self, filename=None, norm=False, ylim=None,
sides=None, **kargs):
"""a simple plotting routine to plot the PSD versus frequency.
:param str filename: save the figure into a file
:param norm: False by default. If True, the PSD is normalised.
:param ylim: readjust the y range .
:param sides: if not provided, :attr:`sides` is used. See :attr:`sides`
for details.
:param kargs: any optional argument accepted by :func:`pylab.plot`.
.. plot::
:width: 80%
:include-source:
from spectrum import *
p = Periodogram(marple_data)
p.plot(norm=True, marker='o')
"""
import pylab
from pylab import ylim as plt_ylim
#First, check that psd attribute is up-to-date
# just to get the PSD to be recomputed if needed
_ = self.psd
# check that the input sides parameter is correct if provided
if sides is not None:
if sides not in self._sides_choices:
raise errors.SpectrumChoiceError(sides, self._sides_choices)
# if sides is provided but identical to the current psd, nothing to do.
# if sides not provided, let us use self.sides
if sides is None or sides == self.sides:
frequencies = self.frequencies()
psd = self.psd
sides = self.sides
elif sides is not None:
# if sides argument is different from the attribute, we need to
# create a new PSD/Freq ; indeed we do not want to change the
# attribute itself
# if data is complex, one-sided is wrong in any case.
if self.datatype == 'complex':
if sides == 'onesided':
raise ValueError("sides cannot be one-sided with complex data")
logging.debug("sides is different from the one provided. Converting PSD")
frequencies = self.frequencies(sides=sides)
psd = self.get_converted_psd(sides)
| python | {
"resource": ""
} |
q255971 | Spectrum.power | validation | def power(self):
r"""Return the power contained in the PSD
if scale_by_freq is False, the power is:
.. math:: P = N \sum_{k=1}^{N} P_{xx}(k)
else, it is
.. math:: P = \sum_{k=1}^{N} P_{xx}(k) \frac{df}{2\pi}
.. todo:: check these equations
| python | {
"resource": ""
} |
q255972 | ipy_notebook_skeleton | validation | def ipy_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
| python | {
"resource": ""
} |
q255973 | rst2md | validation | def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
| python | {
"resource": ""
} |
q255974 | Notebook.add_markdown_cell | validation | def add_markdown_cell(self, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
| python | {
"resource": ""
} |
q255975 | Notebook.save_file | validation | def save_file(self):
"""Saves the notebook to a file"""
with open(self.write_file, 'w') as out_nb:
| python | {
"resource": ""
} |
q255976 | arma2psd | validation | def arma2psd(A=None, B=None, rho=1., T=1., NFFT=4096, sides='default',
norm=False):
r"""Computes power spectral density given ARMA values.
This function computes the power spectral density values
given the ARMA parameters of an ARMA model. It assumes that
the driving sequence is a white noise process of zero mean and
variance :math:`\rho_w`. The sampling frequency and noise variance are
used to scale the PSD output, which length is set by the user with the
`NFFT` parameter.
:param array A: Array of AR parameters (complex or real)
:param array B: Array of MA parameters (complex or real)
:param float rho: White noise variance to scale the returned PSD
:param float T: Sample interval in seconds to scale the returned PSD
:param int NFFT: Final size of the PSD
:param str sides: Default PSD is two-sided, but sides can be set to centerdc.
.. warning:: By convention, the AR or MA arrays does not contain the
A0=1 value.
If :attr:`B` is None, the model is a pure AR model. If :attr:`A` is None,
the model is a pure MA model.
:return: two-sided PSD
.. rubric:: Details:
AR case: the power spectral density is:
.. math:: P_{ARMA}(f) = T \rho_w \left|\frac{B(f)}{A(f)}\right|^2
where:
.. math:: A(f) = 1 + \sum_{k=1}^q b(k) e^{-j2\pi fkT}
.. math:: B(f) = 1 + \sum_{k=1}^p a(k) e^{-j2\pi fkT}
.. rubric:: **Example:**
.. plot::
:width: 80%
:include-source:
import spectrum.arma
from pylab import plot, log10, legend
plot(10*log10(spectrum.arma.arma2psd([1,0.5],[0.5,0.5])), label='ARMA(2,2)')
plot(10*log10(spectrum.arma.arma2psd([1,0.5],None)), label='AR(2)')
plot(10*log10(spectrum.arma.arma2psd(None,[0.5,0.5])), label='MA(2)')
legend()
:References: [Marple]_ | python | {
"resource": ""
} |
q255977 | arma_estimate | validation | def arma_estimate(X, P, Q, lag):
"""Autoregressive and moving average estimators.
This function provides an estimate of the autoregressive
parameters, the moving average parameters, and the driving
white noise variance of an ARMA(P,Q) for a complex or real data sequence.
The parameters are estimated using three steps:
* Estimate the AR parameters from the original data based on a least
squares modified Yule-Walker technique,
* Produce a residual time sequence by filtering the original data
with a filter based on the AR parameters,
* Estimate the MA parameters from the residual time sequence.
:param array X: Array of data samples (length N)
:param int P: Desired number of AR parameters
:param int Q: Desired number of MA parameters
:param int lag: Maximum lag to use for autocorrelation estimates
:return:
* A - Array of complex P AR parameter estimates
* B - Array of complex Q MA parameter estimates
* RHO - White noise variance estimate
.. note::
* lag must be >= Q (MA order)
**dependencies**:
* :meth:`spectrum.correlation.CORRELATION`
* :meth:`spectrum.covar.arcovar`
* :meth:`spectrum.arma.ma`
.. plot::
:width: 80%
:include-source:
from spectrum import arma_estimate, arma2psd, marple_data
import pylab
a,b, rho = arma_estimate(marple_data, 15, 15, 30)
psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True)
pylab.plot(10 * pylab.log10(psd))
pylab.ylim([-50,0])
:reference: [Marple]_
"""
R = CORRELATION(X, maxlags=lag, norm='unbiased')
R0 = R[0]
#C Estimate the AR parameters (no error weighting is used).
#C Number of equation errors is M-Q .
MPQ = lag - Q + P
N = len(X)
Y = np.zeros(N-P, dtype=complex)
for K in range(0, MPQ):
KPQ = K + Q - P+1
if KPQ < 0:
Y[K] = R[-KPQ].conjugate()
if KPQ == 0:
Y[K] = R0
| python | {
"resource": ""
} |
q255978 | ma | validation | def ma(X, Q, M):
"""Moving average estimator.
This program provides an estimate of the moving average parameters
and driving noise variance for a data sequence based on a
long AR model and a least squares fit.
:param array X: The input data array
:param int Q: Desired MA model order (must be >0 and <M)
:param int M: Order of "long" AR model (suggest at least 2*Q )
:return:
* MA - Array of Q complex MA parameter estimates
* RHO - Real scalar of white noise variance estimate
.. plot::
:width: 80%
:include-source:
from | python | {
"resource": ""
} |
q255979 | CORRELOGRAMPSD | validation | def CORRELOGRAMPSD(X, Y=None, lag=-1, window='hamming',
norm='unbiased', NFFT=4096, window_params={},
correlation_method='xcorr'):
"""PSD estimate using correlogram method.
:param array X: complex or real data samples X(1) to X(N)
:param array Y: complex data samples Y(1) to Y(N). If provided, computes
the cross PSD, otherwise the PSD is returned
:param int lag: highest lag index to compute. Must be less than N
:param str window_name: see :mod:`window` for list of valid names
:param str norm: one of the valid normalisation of :func:`xcorr` (biased,
unbiased, coeff, None)
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:param str correlation_method: either `xcorr` or `CORRELATION`.
CORRELATION should be removed in the future.
:return:
* Array of real (cross) power spectral density estimate values. This is
a two sided array with negative values following the positive ones
whatever is the input data (real or complex).
.. rubric:: Description:
The exact power spectral density is the Fourier transform of the
autocorrelation sequence:
.. math:: P_{xx}(f) = T \sum_{m=-\infty}^{\infty} r_{xx}[m] exp^{-j2\pi fmT}
The correlogram method of PSD estimation substitutes a finite sequence of
autocorrelation estimates :math:`\hat{r}_{xx}` in place of :math:`r_{xx}`.
This estimation can be computed with :func:`xcorr` or :func:`CORRELATION` by
chosing a proprer lag `L`. The estimated PSD is then
.. math:: \hat{P}_{xx}(f) = T \sum_{m=-L}^{L} \hat{r}_{xx}[m] exp^{-j2\pi fmT}
The lag index must be less than the number of data samples `N`. Ideally, it
should be around `L/10` [Marple]_ so as to avoid greater statistical
variance associated with higher lags.
To reduce the leakage of the implicit rectangular window and therefore to
reduce the bias in the estimate, a tapering window is normally used and lead
to the so-called Blackman and Tukey correlogram:
.. math:: \hat{P}_{BT}(f) = T \sum_{m=-L}^{L} w[m] \hat{r}_{xx}[m] exp^{-j2\pi fmT}
The correlogram for the cross power spectral estimate is
.. math:: \hat{P}_{xx}(f) = T \sum_{m=-L}^{L} \hat{r}_{xx}[m] exp^{-j2\pi fmT}
which is computed if :attr:`Y` is not provide. In such case,
:math:`r_{yx} = r_{xy}` so we compute the correlation only once.
.. plot::
:width: 80%
:include-source:
from spectrum import CORRELOGRAMPSD, marple_data
from spectrum.tools import cshift
from pylab import log10, axis, grid, plot,linspace
psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)
f = linspace(-0.5, 0.5, len(psd))
psd = cshift(psd, len(psd)/2)
plot(f, 10*log10(psd/max(psd)))
axis([-0.5,0.5,-50,0])
grid(True)
.. seealso:: :func:`create_window`, :func:`CORRELATION`, :func:`xcorr`,
:class:`pcorrelogram`.
"""
N = len(X)
assert | python | {
"resource": ""
} |
q255980 | _get_data | validation | def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
| python | {
"resource": ""
} |
q255981 | _select_block | validation | def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
| python | {
"resource": ""
} |
q255982 | _parse_dict_recursive | validation | def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
| python | {
"resource": ""
} |
q255983 | parse_sphinx_searchindex | validation | def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = | python | {
"resource": ""
} |
q255984 | embed_code_links | validation | def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
# No need to waste time embedding hyperlinks when not running the examples
# XXX: also at the time of writing this fixes make html-noplot
# for some reason I don't fully understand
if not app.builder.config.plot_gallery:
return
# XXX: Whitelist of builders for which it makes sense to embed
# hyperlinks inside the example html. Note that the link embedding
# require searchindex.js to exist for the links to the local doc
# and there does not seem to be a good way of knowing which
# builders creates a searchindex.js.
if app.builder.name not in ['html', 'readthedocs']:
| python | {
"resource": ""
} |
q255985 | SphinxDocLinkResolver._get_link | validation | def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
| python | {
"resource": ""
} |
q255986 | tf2zp | validation | def tf2zp(b,a):
"""Convert transfer function filter parameters to zero-pole-gain form
Find the zeros, poles, and gains of this continuous-time system:
.. warning:: b and a must have the same length.
::
from spectrum import tf2zp
b = [2,3,0]
a = [1, 0.4, 1]
[z,p,k] = tf2zp(b,a) % Obtain zero-pole-gain form
z =
1.5
0
p =
-0.2000 + 0.9798i
-0.2000 - 0.9798i
k =
2
:param b: numerator
:param a: denominator
:param fill: If True, check that the length of a and b are the same. If not, create a copy of the shortest element and append zeros to it.
:return: z (zeros), p (poles), g (gain)
Convert transfer function f(x)=sum(b*x^n)/sum(a*x^n) to
zero-pole-gain form f(x)=g*prod(1-z*x)/prod(1-p*x)
.. todo:: See if tf2ss followed by ss2zp gives better results. | python | {
"resource": ""
} |
q255987 | eqtflength | validation | def eqtflength(b,a):
"""Given two list or arrays, pad with zeros the shortest array
:param b: list or array
:param a: list or array
.. doctest::
>>> from spectrum.transfer import eqtflength
>>> a = [1,2]
>>> b = [1,2,3,4]
>>> a, b, = eqtflength(a,b)
"""
d = abs(len(b)-len(a))
if d != 0:
if len(a) > len(b):
try:
b.extend([0.]*d)
| python | {
"resource": ""
} |
q255988 | ss2zpk | validation | def ss2zpk(a,b,c,d, input=0):
"""State-space representation to zero-pole-gain representation.
:param A: ndarray State-space representation of linear system.
:param B: ndarray State-space representation of linear system.
:param C: ndarray State-space representation of linear system.
:param D: ndarray State-space representation of linear system.
:param int input: optional For multiple-input systems, the input to use.
:return: | python | {
"resource": ""
} |
q255989 | zpk2tf | validation | def zpk2tf(z, p, k):
r"""Return polynomial transfer function representation from zeros and poles
:param ndarray z: Zeros of the transfer function.
:param ndarray p: Poles of the transfer function.
:param float k: System gain.
:return:
b : ndarray Numerator polynomial.
a : ndarray Numerator and denominator polynomials.
:func:`zpk2tf` forms transfer function polynomials from the zeros, poles, and gains
of a system in factored form.
zpk2tf(z,p,k) finds a rational transfer function
.. math:: \frac{B(s)}{A(s)} = \frac{b_1 s^{n-1}+\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\dots a_{m-1}s+a_m}
given a system in factored transfer function form
.. math:: H(s) = \frac{Z(s)}{P(s)} = k \frac{(s-z_1)(s-z_2)\dots(s-z_m)}{(s-p_1)(s-p_2)\dots(s-p_n)}
with p being the pole locations, and z the zero locations, with as many.
The gains for each numerator transfer function are in vector k.
The zeros and poles must | python | {
"resource": ""
} |
q255990 | zpk2ss | validation | def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
:param sequence z,p: Zeros and poles.
:param float k: | python | {
"resource": ""
} |
q255991 | enbw | validation | def enbw(data):
r"""Computes the equivalent noise bandwidth
.. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2}
.. doctest::
>>> from spectrum import create_window, enbw
>>> w = create_window(64, 'rectangular')
>>> enbw(w)
1.0
The following table contains the ENBW values for some of the
implemented windows in this module (with N=16384). They have been
double checked against litterature (Source: [Harris]_, [Marple]_).
If not present, it means that it has not been checked.
=================== ============ =============
name ENBW litterature
=================== ============ =============
rectangular 1. 1.
triangle 1.3334 1.33
Hann 1.5001 1.5
Hamming 1.3629 1.36
blackman | python | {
"resource": ""
} |
q255992 | _kaiser | validation | def _kaiser(n, beta):
"""Independant Kaiser window
For the definition of the Kaiser window, see A. V. Oppenheim & R. W. Schafer, "Discrete-Time Signal Processing".
The continuous version of | python | {
"resource": ""
} |
q255993 | window_visu | validation | def window_visu(N=51, name='hamming', **kargs):
"""A Window visualisation tool
:param N: length of the window
:param name: name of the window
:param NFFT: padding used by the FFT
:param mindB: the minimum frequency power in dB
:param maxdB: the maximum frequency power in dB
:param kargs: optional arguments passed to :func:`create_window`
This function plot the window shape and its equivalent in the Fourier domain.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'kaiser', beta=8.)
| python | {
"resource": ""
} |
q255994 | window_kaiser | validation | def window_kaiser(N, beta=8.6, method='numpy'):
r"""Kaiser window
:param N: window length
:param beta: kaiser parameter (default is 8.6)
To obtain a Kaiser window that designs an FIR filter with
sidelobe attenuation of :math:`\alpha` dB, use the following :math:`\beta` where
:math:`\beta = \pi \alpha`.
.. math::
w_n = \frac{I_0\left(\pi\alpha\sqrt{1-\left(\frac{2n}{M}-1\right)^2}\right)} {I_0(\pi \alpha)}
where
* :math:`I_0` is the zeroth order Modified Bessel function of the first kind.
* :math:`\alpha` is a real number that determines the shape of the
window. It determines the trade-off between main-lobe width and side
lobe level.
* the length of the sequence is N=M+1.
The Kaiser window can approximate many other windows by varying
the :math:`\beta` parameter:
===== ========================
beta Window shape
===== ========================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
===== ========================
.. plot::
:width: 80%
:include-source:
from pylab import plot, | python | {
"resource": ""
} |
q255995 | window_blackman | validation | def window_blackman(N, alpha=0.16):
r"""Blackman window
:param N: window length
.. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1})
with
.. math::
a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16
When :math:`\alpha=0.16`, this is the unqualified Blackman window with
:math:`a_0=0.48` and :math:`a_2=0.08`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman')
.. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`,
this implementation is valid | python | {
"resource": ""
} |
q255996 | window_gaussian | validation | def window_gaussian(N, alpha=2.5):
r"""Gaussian window
:param N: window length
.. math:: \exp^{-0.5 \left( \sigma\frac{n}{N/2} \right)^2}
with :math:`\frac{N-1}{2}\leq n \leq \frac{N-1}{2}`.
.. note:: N-1 is used to be in agreement with octave convention. The ENBW of
| python | {
"resource": ""
} |
q255997 | window_cosine | validation | def window_cosine(N):
r"""Cosine tapering window also known as sine window.
:param N: window length
.. math:: w(n) = \cos\left(\frac{\pi n}{N-1} - \frac{\pi}{2}\right) = \sin \left(\frac{\pi n}{N-1}\right)
.. plot::
:width: 80%
:include-source:
from | python | {
"resource": ""
} |
q255998 | window_lanczos | validation | def window_lanczos(N):
r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
| python | {
"resource": ""
} |
q255999 | window_bartlett_hann | validation | def window_bartlett_hann(N):
r"""Bartlett-Hann window
:param N: window length
.. math:: w(n) = a_0 + a_1 \left| \frac{n}{N-1} -\frac{1}{2}\right| - a_2 \cos \left( \frac{2\pi n}{N-1} \right)
with :math:`a_0 = 0.62`, :math:`a_1 = 0.48` and :math:`a_2=0.38`
.. plot::
:width: 80%
:include-source:
from | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.