code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
GPULoad = namedtuple('GPULoad', ['processor', 'memory', 'weighted'])
gpus = GPUtil.getGPUs()
load = []
for g in gpus:
wload = (wproc * g.load + wmem * g.memoryUtil) / (wproc + wmem)
load.append(GPULoad(g.load, g.memoryUtil, wload))
return load | def gpu_load(wproc=0.5, wmem=0.5) | Return a list of namedtuples representing the current load for
each GPU device. The processor and memory loads are fractions
between 0 and 1. The weighted load represents a weighted average
of processor and memory loads using the parameters `wproc` and
`wmem` respectively. | 2.72842 | 2.268685 | 1.202644 |
gl = gpu_load(wproc=wproc, wmem=wmem)
# return np.argsort(np.asarray(gl)[:, -1]).tolist()
return [idx for idx, load in sorted(enumerate(
[g.weighted for g in gl]), key=(lambda x: x[1]))] | def device_by_load(wproc=0.5, wmem=0.5) | Get a list of GPU device ids ordered by increasing weighted
average of processor and memory load. | 5.861517 | 5.087439 | 1.152155 |
ids = device_by_load(wproc=wproc, wmem=wmem)
cp.cuda.Device(ids[0]).use()
return ids[0] | def select_device_by_load(wproc=0.5, wmem=0.5) | Set the current device for cupy as the device with the lowest
weighted average of processor and memory load. | 4.169538 | 3.842701 | 1.085054 |
# See goo.gl/BVJ7MN
path = name.split('.')
setattr(reduce(getattr, path[:-1], obj), path[-1], value) | def rsetattr(obj, name, value) | Recursive version of :func:`setattr`. | 10.594243 | 8.682003 | 1.220253 |
spec = importlib.util.find_spec(name)
mod = importlib.util.module_from_spec(spec)
mod.__spec__ = spec
mod.__loader__ = spec.loader
spec.loader.exec_module(mod)
return mod | def load_module(name) | Load the named module without registering it in ``sys.modules``.
Parameters
----------
name : string
Module name
Returns
-------
mod : module
Loaded module | 1.951022 | 2.180264 | 0.894856 |
if attrib is None:
attrib = {}
spec = importlib.util.find_spec(name)
spec.name = pname
if pfile is not None:
spec.origin = pfile
spec.loader.name = pname
mod = importlib.util.module_from_spec(spec)
mod.__spec__ = spec
mod.__loader__ = spec.loader
sys.modules[pna... | def patch_module(name, pname, pfile=None, attrib=None) | Create a patched copy of the named module and register it in
``sys.modules``.
Parameters
----------
name : string
Name of source module
pname : string
Name of patched copy of module
pfile : string or None, optional (default None)
Value to assign as source file name of patched ... | 1.845254 | 1.899432 | 0.971477 |
# Patched module name is constructed from source module name
# by replacing 'sporco.' with 'sporco.cupy.'
pname = re.sub('^sporco.', 'sporco.cupy.', name)
# Attribute dict always maps cupy module to 'np' attribute in
# patched module
if attrib is None:
attrib = {}
attrib.update... | def sporco_cupy_patch_module(name, attrib=None) | Create a copy of the named sporco module, patch it to replace
numpy with cupy, and register it in ``sys.modules``.
Parameters
----------
name : string
Name of source module
attrib : dict or None, optional (default None)
Dict of attribute names and values to assign to patched module
... | 5.381721 | 5.521996 | 0.974597 |
if lst and isinstance(lst[0], cp.ndarray):
return cp.hstack(lst)
else:
return cp.asarray(lst) | def _list2array(lst) | Convert a list to a numpy array. | 4.326571 | 3.67269 | 1.178039 |
dt = cp.dtype(dtype)
if dt == cp.dtype('float128'):
return cp.dtype('complex256')
elif dt == cp.dtype('float64'):
return cp.dtype('complex128')
else:
return cp.dtype('complex64') | def _complex_dtype(dtype) | Patched version of :func:`sporco.linalg.complex_dtype`. | 2.42124 | 2.342216 | 1.033739 |
return cp.empty(shape, dtype, order) | def _pyfftw_empty_aligned(shape, dtype, order='C', n=None) | Patched version of :func:`sporco.linalg.`. | 8.941377 | 11.291378 | 0.791876 |
ashp = list(shape)
raxis = axes[-1]
ashp[raxis] = ashp[raxis] // 2 + 1
cdtype = _complex_dtype(dtype)
return cp.empty(ashp, cdtype, order) | def _pyfftw_rfftn_empty_aligned(shape, axes, dtype, order='C', n=None) | Patched version of :func:`sporco.linalg.pyfftw_rfftn_empty_aligned`. | 5.098057 | 4.973481 | 1.025048 |
if cp.isrealobj(a) and cp.isrealobj(b):
fft = cp.fft.rfftn
ifft = cp.fft.irfftn
else:
fft = cp.fft.fftn
ifft = cp.fft.ifftn
dims = cp.maximum(cp.asarray([a.shape[i] for i in axes]),
cp.asarray([b.shape[i] for i in axes]))
dims = [int(d) for d i... | def _fftconv(a, b, axes=(0, 1)) | Patched version of :func:`sporco.linalg.fftconv`. | 2.083495 | 2.064334 | 1.009282 |
return cp.sum(x * y, axis=axis, keepdims=True) | def _inner(x, y, axis=-1) | Patched version of :func:`sporco.linalg.inner`. | 5.444998 | 6.157441 | 0.884296 |
return cp.linalg.cholesky(A), True | def _cho_factor(A, lower=True, check_finite=True) | Implementaton of :func:`scipy.linalg.cho_factor` using
a function supported in cupy. | 11.568629 | 10.37046 | 1.115537 |
L = c_and_lower[0]
y = cpxl.solve_triangular(L, b, trans=0, lower=True,
check_finite=check_finite)
return cpxl.solve_triangular(L, y, trans=1, lower=True,
check_finite=check_finite) | def _cho_solve(c_and_lower, b, check_finite=True) | Implementaton of :func:`scipy.linalg.cho_solve` using
a function supported in cupy. | 3.010496 | 3.055739 | 0.985194 |
N, M = A.shape
if N >= M:
c, lwr = _cho_factor(
A.T.dot(A) + rho * cp.identity(M, dtype=A.dtype), lower=lower,
check_finite=check_finite)
else:
c, lwr = _cho_factor(
A.dot(A.T) + rho * cp.identity(N, dtype=A.dtype), lower=lower,
check_fin... | def _linalg_cho_factor(A, rho, lower=False, check_finite=True) | Patched version of :func:`sporco.linalg.cho_factor`. | 2.229822 | 2.246597 | 0.992534 |
N, M = A.shape
if N >= M:
x = (b - _cho_solve((c, lwr), b.dot(A).T,
check_finite=check_finite).T.dot(A.T)) / rho
else:
x = _cho_solve((c, lwr), b.T, check_finite=check_finite).T
return x | def _cho_solve_AATI(A, rho, b, c, lwr, check_finite=True) | Patched version of :func:`sporco.linalg.cho_solve_AATI`. | 3.37619 | 3.424026 | 0.986029 |
def keyfunc(entry):
if slemap is not None:
rle = slemap(entry)
if rle in reflist:
# Ordering index taken from reflist
return reflist.index(rle)
else:
# Ordering index taken from sortlist, offset
# by the length of reflist so t... | def sort_by_list_order(sortlist, reflist, reverse=False, fltr=False,
slemap=None) | Sort a list according to the order of entries in a reference list.
Parameters
----------
sortlist : list
List to be sorted
reflist : list
Reference list defining sorting order
reverse : bool, optional (default False)
Flag indicating whether to sort in reverse order
fltr : bool... | 2.938124 | 2.799891 | 1.049371 |
if modpath is None:
modpath = os.path.split(filepath)[-1]
modlst = []
for ff, name, ispkg in pkgutil.iter_modules([filepath]):
if not skipunder or name[0] != '_':
if ispkg:
sublst = get_module_names(os.path.join(filepath, name),
... | def get_module_names(filepath, modpath=None, skipunder=True) | Get a list of modules in a package/subpackage.
Parameters
----------
filepath : string
Filesystem path to the root directory of the package/subpackage
modpath : string or None, optional (default None)
Name of package or module path (in the form 'a.b.c') to a
subpackage
skipunder :... | 2.054189 | 2.032021 | 1.010909 |
# if module argument is a string, try to load the module with the
# specified name
if isinstance(module, str):
module = importlib.import_module(module)
# Get members in the module
members = map(lambda x: x[1], inspect.getmembers(module, type))
# Filter out members that are not de... | def get_module_members(module, type=None) | Get a list of module member objects, excluding members that are
imported from other modules that are not submodules of the specified
moodule. An attempt is made to sort the list in order of definition
in the source file. If the module has an `__all__` attribute, the
list is sorted in the same order, and... | 3.824002 | 3.524034 | 1.08512 |
clslst = get_module_members(module, type=inspect.isclass)
return list(filter(lambda cls: not issubclass(cls, Exception),
clslst)) | def get_module_classes(module) | Get a list of module member classes.
Parameters
----------
module : string or module object
Module for which member list is to be generated
Returns
-------
mbrlst : list
List of module functions | 7.397073 | 8.164795 | 0.905972 |
dw = DocWriter(outpath, tmpltpath)
modlst = get_module_names(modpath, pkgname)
print('Making api docs:', end='')
for modname in modlst:
# Don't generate docs for cupy or cuda subpackages
if 'cupy' in modname or 'cuda' in modname:
continue
try:
mod... | def write_module_docs(pkgname, modpath, tmpltpath, outpath) | Write the autosummary style docs for the specified package.
Parameters
----------
pkgname : string
Name of package to document
modpath : string
Path to package source root directory
tmpltpath : string
Directory path for autosummary template files
outpath : string
Directo... | 4.388559 | 4.605159 | 0.952966 |
modname = module.__name__
# Based on code in generate_autosummary_docs in https://git.io/fxpJS
ns = {}
ns['members'] = dir(module)
ns['functions'] = list(map(lambda x: x.__name__,
get_module_functions(module)))
ns['classes'] =... | def write(self, module) | Write the RST source document for generating the docs for
a specified module.
Parameters
----------
module : module object
Module for which member list is to be generated | 3.102756 | 3.007672 | 1.031614 |
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Yss = n... | def uinit(self, ushape) | Return initialiser for working variable U. | 5.902278 | 5.599053 | 1.054157 |
r
ngsit = 0
gsrrs = np.inf
YU = self.Y - self.U
SYU = self.S + YU[..., -1]
YU[..., -1] = 0.0
ATYU = self.cnst_AT(YU)
while gsrrs > self.opt['GSTol'] and ngsit < self.opt['MaxGSIter']:
self.X = self.GaussSeidelStep(
SYU, self.X,... | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 6.5452 | 6.105378 | 1.072038 |
r
self.Y[..., 0:-1] = sp.prox_l2(
self.AX[..., 0:-1] + self.U[..., 0:-1],
(self.lmbda/self.rho)*self.Wtvna, axis=self.saxes)
self.Y[..., -1] = sp.prox_l1(
self.AX[..., -1] + self.U[..., -1] - self.S,
(1.0/self.rho)*self.Wdf) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 6.328407 | 5.827257 | 1.086001 |
r
return np.concatenate(
[sl.Gax(X, ax)[..., np.newaxis] for ax in self.axes] +
[X[..., np.newaxis],], axis=X.ndim) | def cnst_A(self, X) | r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A \mathbf{x} = (G_r^T \;\; G_c^T
\;\; I)^T \mathbf{x}`. | 12.39864 | 12.664168 | 0.979033 |
if self.opt['gEvalY']:
return self.Y
else:
return self.cnst_A(None, self.Xf) - self.cnst_c() | def obfn_gvar(self) | Variable to be evaluated in computing regularisation term,
depending on 'gEvalY' option value. | 20.26001 | 9.403053 | 2.15462 |
r
gvr = self.obfn_gvar()
dfd = np.sum(self.Wdf * np.abs(gvr[..., -1]))
reg = np.sum(self.Wtv * np.sqrt(np.sum(gvr[..., 0:-1]**2,
axis=self.saxes)))
obj = dfd + self.lmbda*reg
return (obj, dfd, reg) | def eval_objfn(self) | r"""Compute components of objective function as well as total
contribution to objective function. Data fidelity term is
:math:`\| W_{\mathrm{df}} (H \mathbf{x} - \mathbf{s}) \|_1` and
regularisation term is :math:`\| W_{\mathrm{tv}}
\sqrt{(G_r \mathbf{x})^2 + (G_c \mathbf{x})^2}\|_1`. | 8.170078 | 5.258115 | 1.553804 |
r
if Xf is None:
Xf = sl.rfftn(X, axes=self.axes)
return sl.irfftn(self.GAf*Xf[..., np.newaxis], self.axsz,
axes=self.axes) | def cnst_A(self, X, Xf=None) | r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A \mathbf{x} = (G_r^T \;\;
G_c^T \;\; H)^T \mathbf{x}`. | 7.960934 | 7.507594 | 1.060384 |
r
Xf = sl.rfftn(X, axes=self.axes)
return np.sum(sl.irfftn(np.conj(self.GAf)*Xf, self.axsz,
axes=self.axes), axis=self.Y.ndim-1) | def cnst_AT(self, X) | r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
a component of ADMM problem constraint. In this case
:math:`A^T \mathbf{x} = (G_r^T \;\; G_c^T \;\; H^T) \mathbf{x}`. | 8.62545 | 10.074998 | 0.856124 |
r
nl0 = np.sum(np.abs(x) > eps, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl0.size == 1:
nl0 = nl0.ravel()[0]
return nl0 | def norm_l0(x, axis=None, eps=0.0) | r"""Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
--... | 3.860999 | 4.373949 | 0.882726 |
r
return (np.abs(v) >= np.sqrt(2.0 * alpha)) * v | def prox_l0(v, alpha) | r"""Compute the proximal operator of the :math:`\ell_0` "norm" (hard
thresholding)
.. math::
\mathrm{prox}_{\alpha f}(v) = \mathcal{S}_{0,\alpha}(\mathbf{v})
= \left\{ \begin{array}{ccc} 0 & \text{if} &
| v | < \sqrt{2 \alpha} \\ v &\text{if} &
| v | \geq \sqrt{2 \alpha} \end{array} \... | 12.162974 | 12.967484 | 0.937959 |
r
nl1 = np.sum(np.abs(x), axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl1.size == 1:
nl1 = nl1.ravel()[0]
return nl1 | def norm_l1(x, axis=None) | r"""Compute the :math:`\ell_1` norm
.. math::
\| \mathbf{x} \|_1 = \sum_i | x_i |
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)... | 3.703688 | 4.196648 | 0.882535 |
r
if have_numexpr:
return ne.evaluate(
'where(abs(v)-alpha > 0, where(v >= 0, 1, -1) * (abs(v)-alpha), 0)'
)
else:
return np.sign(v) * (np.clip(np.abs(v) - alpha, 0, float('Inf'))) | def prox_l1(v, alpha) | r"""Compute the proximal operator of the :math:`\ell_1` norm (scalar
shrinkage/soft thresholding)
.. math::
\mathrm{prox}_{\alpha f}(\mathbf{v}) =
\mathcal{S}_{1,\alpha}(\mathbf{v}) = \mathrm{sign}(\mathbf{v})
\odot \max(0, |\mathbf{v}| - \alpha)
where :math:`f(\mathbf{x}) = \|\mathbf{x... | 4.142653 | 3.985151 | 1.039522 |
r
nl2 = np.sum(x**2, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl2.size == 1:
nl2 = nl2.ravel()[0]
return nl2 | def norm_2l2(x, axis=None) | r"""Compute the squared :math:`\ell_2` norm
.. math::
\| \mathbf{x} \|_2^2 = \sum_i x_i^2
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (defau... | 4.080359 | 4.660476 | 0.875524 |
r
a = np.sqrt(np.sum(v**2, axis=axis, keepdims=True))
b = np.maximum(0, a - alpha)
b = sl.zdivide(b, a)
return np.asarray(b * v, dtype=v.dtype) | def prox_l2(v, alpha, axis=None) | r"""Compute the proximal operator of the :math:`\ell_2` norm (vector
shrinkage/soft thresholding)
.. math::
\mathrm{prox}_{\alpha f}(\mathbf{v}) = \mathcal{S}_{2,\alpha}
(\mathbf{v}) = \frac{\mathbf{v}} {\|\mathbf{v}\|_2} \max(0,
\|\mathbf{v}\|_2 - \alpha) \;,
where :math:`f(\mathbf{x}) = \... | 4.94183 | 4.985424 | 0.991256 |
r
d = np.sqrt(np.sum(v**2, axis=axis, keepdims=True))
return np.asarray((d <= gamma) * v +
(d > gamma) * (gamma * sl.zdivide(v, d)),
dtype=v.dtype) | def proj_l2(v, gamma, axis=None) | r"""Compute the projection operator of the :math:`\ell_2` norm.
The projection operator of the uncentered :math:`\ell_2` norm,
.. math::
\mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \;
\text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma
can be computed as :mat... | 5.90472 | 6.387062 | 0.924481 |
r
if method is None:
if axis is None:
method = 'scalarroot'
else:
method = 'sortcumsum'
if method == 'scalarroot':
if axis is not None:
raise ValueError('Method scalarroot only supports axis=None')
return _proj_l1_scalar_root(v, gamma)
... | def proj_l1(v, gamma, axis=None, method=None) | r"""Projection operator of the :math:`\ell_1` norm.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
gamma : float
Parameter :math:`\gamma`
axis : None or int or tuple of ints, optional (default None)
Axes of `v` over which to compute the :math:`\ell_1` norm. I... | 3.738196 | 3.011578 | 1.241275 |
r
if norm_l1(v) <= gamma:
return v
else:
av = np.abs(v)
fn = lambda t: np.sum(np.maximum(0, av - t)) - gamma
t = optim.brentq(fn, 0, av.max())
return prox_l1(v, t) | def _proj_l1_scalar_root(v, gamma) | r"""Projection operator of the :math:`\ell_1` norm. The solution is
computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`.
There is no `axis` parameter since the algorithm for computing the
solution treats the input `v` as a single vector.
Parameters
----------
v : array_like
... | 5.226776 | 5.627725 | 0.928755 |
r
if axis is None and norm_l1(v) <= gamma:
return v
if axis is not None and axis < 0:
axis = v.ndim + axis
av = np.abs(v)
vs = np.sort(av, axis=axis)
if axis is None:
N = v.size
c = 1.0 / np.arange(1, N + 1, dtype=v.dtype).reshape(v.shape)
vs = vs[::-1].r... | def _proj_l1_sortsum(v, gamma, axis=None) | r"""Projection operator of the :math:`\ell_1` norm. The solution is
computed via the method of :cite:`duchi-2008-efficient`.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
gamma : float
Parameter :math:`\gamma`
axis : None or int, optional (default None)
... | 2.52052 | 2.604955 | 0.967587 |
if xmethod == 'admm':
isx = {'XPrRsdl': 'PrimalRsdl', 'XDlRsdl': 'DualRsdl',
'XRho': 'Rho'}
else:
isx = {'X_F_Btrack': 'F_Btrack', 'X_Q_Btrack': 'Q_Btrack',
'X_ItBt': 'IterBTrack', 'X_L': 'L', 'X_Rsdl': 'Rsdl'}
if not opt['AccurateDFid']:
isx.updat... | def isxmap(xmethod, opt) | Return ``isxmap`` argument for ``.IterStatsConfig`` initialiser. | 6.619286 | 6.083604 | 1.088053 |
fld = ['Iter', 'ObjFun', 'DFid', 'RegL1', 'Cnstr']
if xmethod == 'admm':
fld.extend(['XPrRsdl', 'XDlRsdl', 'XRho'])
else:
if opt['CBPDN', 'BackTrack', 'Enabled']:
fld.extend(['X_F_Btrack', 'X_Q_Btrack', 'X_ItBt', 'X_L',
'X_Rsdl'])
else:
... | def isfld(xmethod, dmethod, opt) | Return ``isfld`` argument for ``.IterStatsConfig`` initialiser. | 3.745851 | 3.621971 | 1.034202 |
txt = ['Itn', 'Fnc', 'DFid', u('ℓ1'), 'Cnstr']
if xmethod == 'admm':
txt.extend(['r_X', 's_X', u('ρ_X')])
else:
if opt['CBPDN', 'BackTrack', 'Enabled']:
txt.extend(['F_X', 'Q_X', 'It_X', 'L_X'])
else:
txt.append('L_X')
if dmethod != 'fista':
... | def hdrtxt(xmethod, dmethod, opt) | Return ``hdrtxt`` argument for ``.IterStatsConfig`` initialiser. | 4.31033 | 4.040159 | 1.066872 |
hdr = {'Itn': 'Iter', 'Fnc': 'ObjFun', 'DFid': 'DFid',
u('ℓ1'): 'RegL1', 'Cnstr': 'Cnstr'}
if xmethod == 'admm':
hdr.update({'r_X': 'XPrRsdl', 's_X': 'XDlRsdl', u('ρ_X'): 'XRho'})
else:
if opt['CBPDN', 'BackTrack', 'Enabled']:
hdr.update({'F_X': 'X_F_Btrack', 'Q_... | def hdrmap(xmethod, dmethod, opt) | Return ``hdrmap`` argument for ``.IterStatsConfig`` initialiser. | 3.574571 | 3.450693 | 1.035899 |
vlst = []
# Iterate over the fields of the IterationStats namedtuple
# to be populated with values. If a field name occurs as a
# key in the isxmap dictionary, use the corresponding key
# value as a field name in the isx namedtuple for the X
# step object and ap... | def iterstats(self, j, t, isx, isd, evl) | Construct IterationStats namedtuple from X step and D step
IterationStats namedtuples.
Parameters
----------
j : int
Iteration number
t : float
Iteration time
isx : namedtuple
IterationStats namedtuple from X step object
isd : namedt... | 4.35595 | 3.674941 | 1.185312 |
itdsp = tuple([getattr(itst, self.hdrmap[col]) for col in self.hdrtxt])
print(self.fmtstr % itdsp) | def printiterstats(self, itst) | Print iteration statistics.
Parameters
----------
itst : namedtuple
IterationStats namedtuple as returned by :meth:`iterstats` | 13.566798 | 15.297673 | 0.886854 |
# Print header and separator strings
if self.opt['Verbose'] and self.opt['StatusHeader']:
self.isc.printheader()
# Reset timer
self.timer.start(['solve', 'solve_wo_eval'])
# Main optimisation iterations
for self.j in range(self.j, self.j + self.opt... | def solve(self) | Start (or re-start) optimisation. This method implements the
framework for the alternation between `X` and `D` updates in a
dictionary learning algorithm. There is sufficient flexibility
in specifying the two updates that it calls that it is
usually not necessary to override this method ... | 3.6361 | 3.177059 | 1.144486 |
r
return np.sum(np.linalg.svd(sl.promote16(X), compute_uv=False)) | def norm_nuclear(X) | r"""Compute the nuclear norm
.. math::
\| X \|_* = \sum_i \sigma_i
where :math:`\sigma_i` are the singular values of matrix :math:`X`.
Parameters
----------
X : array_like
Input array :math:`X`
Returns
-------
nncl : float
Nuclear norm of `X` | 22.265709 | 24.017197 | 0.927074 |
r
Usvd, s, Vsvd = sl.promote16(V, fn=np.linalg.svd, full_matrices=False)
ss = np.maximum(0, s - alpha)
return np.dot(Usvd, np.dot(np.diag(ss), Vsvd)), ss | def prox_nuclear(V, alpha) | r"""Proximal operator of the nuclear norm :cite:`cai-2010-singular`
with parameter :math:`\alpha`.
Parameters
----------
v : array_like
Input array :math:`V`
alpha : float
Parameter :math:`\alpha`
Returns
-------
X : ndarray
Output array
s : ndarray
Singula... | 8.22264 | 7.606225 | 1.081041 |
@wraps(func)
def wrapper(*args, **kwargs):
warn("Deprecated, this will be removed in the future", DeprecationWarning)
return func(*args, **kwargs)
wrapper.__doc__ = "Deprecated.\n" + (wrapper.__doc__ or "")
return wrapper | def deprecate(func) | A deprecation warning emmiter as a decorator. | 3.238297 | 3.095883 | 1.046001 |
package_contents = glob(os.path.join(package_path[0], pattern))
relative_path_names = (os.path.split(name)[1] for name in package_contents)
no_ext_names = (os.path.splitext(name)[0] for name in relative_path_names)
return sorted(set(no_ext_names)) | def get_module_names(package_path, pattern="lazy_*.py*") | All names in the package directory that matches the given glob, without
their extension. Repeated names should appear only once. | 2.793641 | 2.587332 | 1.079738 |
def get_module(name):
return __import__(".".join([package_name, name]), fromlist=[package_name])
return [get_module(name) for name in module_names] | def get_modules(package_name, module_names) | List of module objects from the package, keeping the name order. | 3.278722 | 3.077725 | 1.065307 |
from .lazy_text import rst_table, small_doc
max_width = width - max(len(k) for k, v in pairs)
table = [(k, small_doc(v, max_width=max_width)) for k, v in pairs]
return rst_table(table, (key_header, descr_header)) | def summary_table(pairs, key_header, descr_header="Description", width=78) | List of one-liner strings containing a reStructuredText summary table
for the given pairs ``(name, object)``. | 4.627011 | 4.257323 | 1.086836 |
return "\n".join(
[docstring, "Summary of {}:".format(summary_type), ""] +
summary_table(pairs, key_header) + [""]
) | def docstring_with_summary(docstring, pairs, key_header, summary_type) | Return a string joining the docstring with the pairs summary table. | 4.940751 | 4.091342 | 1.207611 |
pairs = [(name, getattr(module, name)) for name in module.__all__]
kws = dict(key_header="Name", summary_type="module contents")
module.__doc__ = docstring_with_summary(module.__doc__, pairs, **kws) | def append_summary_to_module_docstring(module) | Change the ``module.__doc__`` docstring to include a summary table based
on its contents as declared on ``module.__all__``. | 7.943245 | 5.775457 | 1.375345 |
module_names = get_module_names(package_path)
modules = get_modules(package_name, module_names)
dunder_all = dunder_all_concat(modules)
for module in modules:
append_summary_to_module_docstring(module)
pairs = list(zip(module_names, modules))
kws = dict(key_header="Module", summary_type="package modu... | def init_package(package_path, package_name, docstring) | Package initialization, to be called only by ``__init__.py``.
- Find all module names;
- Import all modules (so they're already cached on sys.modules), in
the sorting order (this might make difference on cyclic imports);
- Update all module docstrings (with the summary of its contents);
- Build a module su... | 4.93599 | 4.930942 | 1.001024 |
class Memoizer(dict):
def __missing__(self, args):
val = func(*args)
self[args] = val
return val
memory = Memoizer()
@wraps(func)
def wrapper(*args):
return memory[args]
return wrapper | def memoize(func) | Decorator for unerasable memoization based on function arguments, for
functions without keyword arguments. | 2.794914 | 2.824507 | 0.989523 |
with closing(wave.open(fname, "wb")) as wave_file:
wave_file.setnchannels(1)
wave_file.setsampwidth(2)
wave_file.setframerate(rate)
for chunk in chunks((clip(sig) * 2 ** 15).map(int), dfmt="h", padval=0):
wave_file.writeframes(chunk) | def save_to_16bit_wave_file(fname, sig, rate) | Save a given signal ``sig`` to file ``fname`` as a 16-bit one-channel wave
with the given ``rate`` sample rate. | 4.13928 | 4.098064 | 1.010057 |
list_env = list(env)
return chain.from_iterable(synth(freq) * list_env for freq in freq_gen()) | def new_note_track(env, synth) | Audio track with the frequencies.
Parameters
----------
env:
Envelope Stream (which imposes the duration).
synth:
One-argument function that receives a frequency (in rad/sample) and
returns a Stream instance (a synthesized note).
Returns
-------
Endless Stream instance that joins synthesized... | 12.843017 | 11.792911 | 1.089046 |
first_dur, a, d, r, gain = [
(30 * ms, 10 * ms, 8 * ms, 10 * ms, .4),
(60 * ms, 20 * ms, 8 * ms, 20 * ms, .5)
][idx]
env = chain(adsr(first_dur, a=a, d=d, s=.2, r=r),
adsr(dur - first_dur,
a=10 * ms, d=30 * ms, s=.2, r=dur - 50 * ms))
result = gauss_noise(dur) * env *... | def unpitched_high(dur, idx) | Non-harmonic treble/higher frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note. | 4.701402 | 5.256299 | 0.894432 |
env = sinusoid(lag2freq(dur * 2)).limit(dur) ** 2
freq = 40 + 20 * sinusoid(1000 * Hz, phase=uniform(-pi, pi)) # Hz
result = (low_table(freq * Hz) + low_table(freq * 1.1 * Hz)) * env * .5
return list(result) | def unpitched_low(dur, idx) | Non-harmonic bass/lower frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note. | 13.835667 | 12.623241 | 1.096047 |
out = Streamix()
sig = thub(sig, copies + 1)
out.add(0, sig * pamp) # Original
remain = 1 - pamp
for unused in xrange(copies):
gain = remain * pamp
out.add(dur / copies, sig * gain)
remain -= gain
return out | def geometric_delay(sig, dur, copies, pamp=.5) | Delay effect by copying data (with Streamix).
Parameters
----------
sig:
Input signal (an iterable).
dur:
Duration, in samples.
copies:
Number of times the signal will be replayed in the given duration. The
signal is played copies + 1 times.
pamp:
The relative remaining amplitude fracti... | 10.191913 | 8.790524 | 1.15942 |
@wraps(func)
def new_func(*args, **kwargs):
return Stream(func(*args, **kwargs))
if module_name is not None:
new_func.__module__ = module_name
return new_func | def tostream(func, module_name=None) | Decorator to convert the function output into a Stream. Useful for
generator functions.
Note
----
Always use the ``module_name`` input when "decorating" a function that was
defined in other module. | 2.112066 | 2.097078 | 1.007147 |
return StreamTeeHub(data, n) if isinstance(data, Iterable) else data | def thub(data, n) | Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
----... | 26.601858 | 9.29944 | 2.860587 |
return Stream(blocks(iter(self), *args, **kwargs)) | def blocks(self, *args, **kwargs) | Interface to apply audiolazy.blocks directly in a stream, returning
another stream. Use keyword args. | 14.93889 | 8.47455 | 1.762794 |
if n is None:
return next(self._data)
if isinf(n) and n > 0:
return constructor(self._data)
if isinstance(n, float):
n = rint(n) if n > 0 else 0 # So this works with -inf and nan
return constructor(next(self._data) for _ in xrange(n)) | def take(self, n=None, constructor=list) | Returns a container with the n first elements from the Stream, or less if
there aren't enough. Use this without args if you need only one element
outside a list.
Parameters
----------
n :
Number of elements to be taken. Defaults to None.
Rounded when it's a float, and this can be ``inf`... | 5.33353 | 5.395486 | 0.988517 |
a, b = it.tee(self._data) # 2 generators, not thread-safe
self._data = a
return Stream(b) | def copy(self) | Returns a "T" (tee) copy of the given stream, allowing the calling
stream to continue being used. | 14.038761 | 15.220596 | 0.922353 |
return self.copy().take(n=n, constructor=constructor) | def peek(self, n=None, constructor=list) | Sees/peeks the next few items in the Stream, without removing them.
Besides that this functions keeps the Stream items, it's the same to the
``Stream.take()`` method.
See Also
--------
Stream.take :
Returns the n first elements from the Stream, removing them.
Note
----
When appl... | 10.441288 | 10.842007 | 0.96304 |
def skipper(data):
for _ in xrange(int(round(n))):
next(data)
for el in data:
yield el
self._data = skipper(self._data)
return self | def skip(self, n) | Throws away the first ``n`` values from the Stream.
Note
----
Performs the evaluation lazily, i.e., the values are thrown away only
after requesting the next value. | 6.125956 | 7.667025 | 0.799 |
data = self._data
self._data = (next(data) for _ in xrange(int(round(n))))
return self | def limit(self, n) | Enforces the Stream to finish after ``n`` items. | 8.478392 | 7.154133 | 1.185104 |
self._data = it.chain(self._data, Stream(*other)._data)
return self | def append(self, *other) | Append self with other stream(s). Chaining this way has the behaviour:
``self = Stream(self, *others)`` | 9.857842 | 8.085769 | 1.21916 |
self._data = xmap(func, self._data)
return self | def map(self, func) | A lazy way to apply the given function to each element in the stream.
Useful for type casting, like:
>>> from audiolazy import count
>>> count().take(5)
[0, 1, 2, 3, 4]
>>> my_stream = count().map(float)
>>> my_stream.take(5) # A float counter
[0.0, 1.0, 2.0, 3.0, 4.0] | 8.439907 | 13.031556 | 0.647652 |
self._data = xfilter(func, self._data)
return self | def filter(self, func) | A lazy way to skip elements in the stream that gives False for the given
function. | 9.349102 | 9.69997 | 0.963828 |
if self._iters:
a, b = it.tee(self._iters[0])
self._iters[0] = a
return Stream(b)
iter(self) | def copy(self) | Returns a new "T" (tee) copy of this StreamTeeHub without consuming
any of the copies done with the constructor. | 8.601243 | 7.856328 | 1.094817 |
if delta < 0:
raise ValueError("Delta time should be always positive")
self._not_playing.append((delta, iter(data))) | def add(self, delta, data) | Adds (enqueues) an iterable event to the mixer.
Parameters
----------
delta :
Time in samples since last added event. This can be zero and can be
float. Use "s" object from sHz for time conversion.
data :
Iterable (e.g. a list, a tuple, a Stream) to be "played" by the mixer at
t... | 14.810489 | 14.272264 | 1.037711 |
" Return series of accumulated sums. "
iterator = iter(iterable)
sum_data = next(iterator)
yield sum_data
for el in iterator:
sum_data += el
yield sum_data | def accumulate(iterable) | Return series of accumulated sums. | 5.95764 | 4.556781 | 1.307423 |
if isinstance(data, (Stream, Iterator)):
return tuple(Stream(cp) for cp in it.tee(data, n))
else:
return tuple(data for unused in xrange(n)) | def tee(data, n=2) | Tee or "T" copy to help working with Stream instances as well as with
numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Size of returned tuple. Defaults to 2.
Returns
-------
Tuple of n independent Stream instances, if the input is a Stream or an
iterator, otherw... | 6.524023 | 5.072466 | 1.286164 |
if isinstance(n, float):
if n.is_integer():
n = int(n)
if not isinstance(n, INT_TYPES):
raise TypeError("Non-integer input (perhaps you need Euler Gamma "
"function or Gauss Pi function)")
if n < 0:
raise ValueError("Input shouldn't be negative")
return reduce(operator.m... | def factorial(n) | Factorial function that works with really big numbers. | 5.747155 | 5.582108 | 1.029567 |
ctrl_is_down = wx.GetKeyState(wx.WXK_CONTROL)
ms = wx.GetMouseState()
# New initialization when keys pressed change
if self._key_state != ctrl_is_down:
self._key_state = ctrl_is_down
# Keep state at click
self._click_ms_x, self._click_ms_y = ms.x, ms.y
self._click_frame_x,... | def on_timer(self, evt) | Keep watching the mouse displacement via timer
Needed since EVT_MOVE doesn't happen once the mouse gets outside the
frame | 3.809452 | 3.756391 | 1.014126 |
odd_numbers = thub(count(start=1, step=2), 2)
return Stream(1, -1) * x ** odd_numbers / odd_numbers | def mgl_seq(x) | Sequence whose sum is the Madhava-Gregory-Leibniz series.
[x, -x^3/3, x^5/5, -x^7/7, x^9/9, -x^11/11, ...]
Returns
-------
An endless sequence that has the property
``atan(x) = sum(mgl_seq(x))``.
Usually you would use the ``atan()`` function, not this one. | 21.978134 | 27.078547 | 0.811644 |
acc = 1 / (1 - z ** -1) # Accumulator filter
return acc(mgl_seq(x)).skip(n-1).take() | def atan_mgl(x, n=10) | Finds the arctan using the Madhava-Gregory-Leibniz series. | 38.852192 | 30.520763 | 1.272976 |
prod = lambda args: reduce(operator.mul, args)
xv, yv = xzip(*pairs)
return lambda k: sum( yv[j] * prod( (k - rk) / (rj - rk)
for rk in xv if rj != rk )
for j, rj in enumerate(xv) ) | def lagrange(pairs) | Waring-Lagrange interpolator function.
Parameters
----------
pairs :
Iterable with pairs (tuples with two values), corresponding to points
``(x, y)`` of the function.
Returns
-------
A function that returns the interpolator result for a given ``x``. | 6.12748 | 6.491543 | 0.943917 |
sig = Stream(sig)
threshold = .5 * (order + 1)
step = old / new
data = deque([zero] * (order + 1), maxlen=order + 1)
data.extend(sig.take(rint(threshold)))
idx = int(threshold)
isig = iter(sig)
if isinstance(step, Iterable):
step = iter(step)
while True:
yield lagrange(enumerate(data))(... | def resample(sig, old=1, new=1, order=3, zero=0.) | Generic resampler based on Waring-Lagrange interpolators.
Parameters
----------
sig :
Input signal (any iterable).
old :
Time duration reference (defaults to 1, allowing percentages to the ``new``
keyword argument). This can be float number, or perhaps a Stream instance.
new :
Time duration t... | 4.062773 | 4.000345 | 1.015606 |
if self._data:
for key in xrange(self.order + 1):
yield self[key] | def values(self) | Array values generator for powers from zero to upper power. Useful to cast
as list/tuple and for numpy/scipy integration (be careful: numpy use the
reversed from the output of this function used as input to a list or a
tuple constructor). | 11.441985 | 10.603851 | 1.079041 |
if sort == "auto":
sort = self.is_laurent()
if sort:
keys = sorted(self._data, reverse=reverse)
elif reverse:
keys = reversed(list(self._data))
else:
keys = self._data
return ((k, self._data[k]) for k in keys) | def terms(self, sort="auto", reverse=False) | Pairs (2-tuple) generator where each tuple has a (power, value) term,
perhaps sorted by power. Useful for casting as dict.
Parameters
----------
sort :
A boolean value or ``"auto"`` (default) which chooses whether the terms
should be sorted. The ``"auto"`` value means ``True`` for Laurent
... | 3.451045 | 3.212451 | 1.074272 |
return all(isinstance(k, INT_TYPES) and k >= 0 for k in self._data) | def is_polynomial(self) | Tells whether it is a linear combination of natural powers of ``x``. | 10.089851 | 7.589397 | 1.329467 |
if not self.is_polynomial():
raise AttributeError("Power needs to be positive integers")
return max(key for key in self._data) if self._data else 0 | def order(self) | Finds the polynomial order.
Examples
--------
>>> (x + 4).order
1
>>> (x + 4 - x ** 18).order
18
>>> (x - x).order
0
>>> (x ** -3 + 4).order
Traceback (most recent call last):
...
AttributeError: Power needs to be positive integers | 16.027031 | 6.668391 | 2.403433 |
return Poly(OrderedDict((k, v.copy() if isinstance(v, Stream) else v)
for k, v in iteritems(self._data)),
zero=self.zero if zero is None else zero) | def copy(self, zero=None) | Returns a Poly instance with the same terms, but as a "T" (tee) copy
when they're Stream instances, allowing maths using a polynomial more
than once. | 5.562087 | 3.454023 | 1.610321 |
d = self._data
for unused in xrange(n):
d = OrderedDict((k - 1, k * v) for k, v in iteritems(d) if k != 0)
return Poly(d, zero=self.zero) | def diff(self, n=1) | Differentiate (n-th derivative, where the default n is 1). | 6.895467 | 7.120469 | 0.968401 |
if -1 in self._data:
raise ValueError("Unable to integrate term that powers to -1")
return Poly(OrderedDict((k + 1, v / (k + 1))
for k, v in iteritems(self._data)),
zero=self.zero) | def integrate(self) | Integrate without adding an integration constant. | 9.45982 | 9.286891 | 1.018621 |
import numpy as np
return np.roots(list(self.values())[::-1]).tolist() | def roots(self) | Returns a list with all roots. Needs Numpy. | 9.256514 | 5.490882 | 1.685797 |
ns = {}
exec(data, ns)
return eval(expr, ns) | def _exec_eval(data, expr) | Internal function to isolate an exec. Executes ``data`` and returns the
``expr`` evaluation afterwards. | 6.244967 | 5.791548 | 1.07829 |
alpha = e ** (-delay / tau)
return 1 / (1 - alpha * z ** -delay) | def comb(delay, tau=inf) | Feedback comb filter for a given time constant (and delay).
``y[n] = x[n] + alpha * y[n - delay]``
Parameters
----------
delay :
Feedback delay (lag), in number of samples.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples, which
allows finding ``alpha = e ** (-delay / tau)`... | 13.820597 | 14.612479 | 0.945808 |
bandwidth = thub(bandwidth, 1)
R = exp(-bandwidth * .5)
R = thub(R, 5)
cost = cos(freq) * (2 * R) / (1 + R ** 2)
cost = thub(cost, 2)
gain = (1 - R ** 2) * sqrt(1 - cost ** 2)
denominator = 1 - 2 * R * cost * z ** -1 + R ** 2 * z ** -2
return gain / denominator | def resonator(freq, bandwidth) | Resonator filter with 2-poles (conjugated pair) and no zeros (constant
numerator), with exponential approximation for bandwidth calculation.
Parameters
----------
freq :
Resonant frequency in rad/sample (max gain).
bandwidth :
Bandwidth frequency range in rad/sample following the equation:
``R... | 6.892045 | 6.694896 | 1.029448 |
R = thub(exp(cutoff - pi), 2)
return (1 - R) / (1 + R * z ** -1) | def highpass(cutoff) | This strategy uses an exponential approximation for cut-off frequency
calculation, found by matching the one-pole Laplace lowpass filter
and mirroring the resulting filter to get a highpass. | 25.145327 | 28.369822 | 0.886341 |
R = thub(exp(cutoff - pi), 2)
G = (R + 1) / 2
return G * (1 + z ** -1) / (1 + R * z ** -1) | def lowpass(cutoff) | This strategy uses an exponential approximation for cut-off frequency
calculation, found by matching the single pole and single zero Laplace
highpass filter and mirroring the resulting filter to get a lowpass. | 12.233552 | 13.371251 | 0.914915 |
R = thub(exp(-cutoff), 2)
G = (R + 1) / 2
return G * (1 - z ** -1) / (1 - R * z ** -1) | def highpass(cutoff) | This strategy uses an exponential approximation for cut-off frequency
calculation, found by matching the single pole and single zero Laplace
highpass filter. | 12.112935 | 13.987846 | 0.865961 |
return all(isinstance(filt, LinearFilter) or
(hasattr(filt, "is_linear") and filt.is_linear())
for filt in self.callables) | def is_linear(self) | Tests whether all filters in the list are linear. CascadeFilter and
ParallelFilter instances are also linear if all filters they group are
linear. | 6.330026 | 4.953768 | 1.27782 |
return self.is_linear() and all(filt.is_lti() for filt in self.callables) | def is_lti(self) | Tests whether all filters in the list are linear time invariant (LTI).
CascadeFilter and ParallelFilter instances are also LTI if all filters
they group are LTI. | 13.763544 | 6.392511 | 2.153073 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.