code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
# Extract AMS part of ystep argument so that it is not
# affected by the main part of the ystep
amidx = self.index_addmsk()
Yi = self.cbpdn.AX[amidx] + self.cbpdn.U[amidx]
# Perform main part of ystep from inner cbpdn object
self.inner_ystep()
# Apply ma... | def ystep(self) | This method is inserted into the inner cbpdn object,
replacing its own ystep method, thereby providing a hook for
applying the additional steps necessary for the AMS method. | 12.227668 | 7.712631 | 1.585408 |
# Get inner cbpdn object gvar
gv = self.inner_obfn_gvar().copy()
# Set slice corresponding to the coefficient map of the final
# filter (the impulse inserted for the AMS method) to zero so
# that it does not affect the results (e.g. l1 norm) computed
# from this... | def obfn_gvar(self) | This method is inserted into the inner cbpdn object,
replacing its own obfn_gvar method, thereby providing a hook for
applying the additional steps necessary for the AMS method. | 21.560003 | 12.817202 | 1.682115 |
Di = np.concatenate((D, sl.atleast_nd(D.ndim, self.imp)),
axis=D.ndim-1)
self.cbpdn.setdict(Di) | def setdict(self, D=None) | Set dictionary array. | 14.124273 | 13.999721 | 1.008897 |
if D is not None:
self.D = np.asarray(D, dtype=self.dtype)
self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
if self.opt['HighMemSolve'] and self.cri.Cd == 1:
self.c = sl.solvedbd_sm_c(
self.Df, np.conj(self.Df),
(self.mu / ... | def setdict(self, D=None) | Set dictionary array. | 6.535255 | 6.388721 | 1.022936 |
g0v = self.obfn_g0(self.obfn_g0var())
g1v = self.obfn_g1(self.obfn_g1var())
rgr = sl.rfl2norm2(np.sqrt(self.GHGf * np.conj(self.Xf) * self.Xf),
self.cri.Nv, self.cri.axisN)/2.0
obj = g0v + self.lmbda*g1v + self.mu*rgr
return (obj, g0v, g1v, rg... | def eval_objfn(self) | Compute components of regularisation function as well as total
contribution to objective function. | 6.949391 | 6.396599 | 1.08642 |
r
return np.sum(np.abs(self.W * self.obfn_g0var())) | def obfn_g0(self, Y0) | r"""Compute :math:`g_0(\mathbf{y}_0)` component of ADMM objective
function. | 21.162226 | 17.850582 | 1.18552 |
if self.opt['HighMemSolve'] and self.cri.Cd == 1:
self.c = sl.solvedbd_sm_c(
self.Df, np.conj(self.Df),
(self.mu / self.rho) * self.GHGf + 1.0, self.cri.axisM) | def rhochange(self) | Updated cached c array when rho changes. | 24.35239 | 21.617069 | 1.126535 |
# Call solve method of inner cbpdn object
Xi = self.cbpdn.solve()
# Copy attributes from inner cbpdn object
self.timer = self.cbpdn.timer
self.itstat = self.cbpdn.itstat
# Return result of inner cbpdn object
return Xi | def solve(self) | Call the solve method of the inner cbpdn object and return the
result. | 6.022942 | 3.424233 | 1.758917 |
if X is None:
X = self.getcoef()
Xf = sl.rfftn(X, None, self.cbpdn.cri.axisN)
slc = (slice(None),)*self.dimN + \
(slice(self.chncs[b], self.chncs[b+1]),)
Sf = np.sum(self.cbpdn.Df[slc] * Xf, axis=self.cbpdn.cri.axisM)
return sl.irfftn(Sf, self.... | def reconstruct(self, b, X=None) | Reconstruct representation of signal b in signal set. | 4.84065 | 4.610575 | 1.049901 |
# Check that nstnm is an attribute of cls
if nstnm in cls.__dict__:
# Get the attribute of cls by its name
nst = cls.__dict__[nstnm]
# Check that the attribute is a class
if isinstance(nst, type):
# Get the module in which the outer class is defined
... | def _fix_nested_class_lookup(cls, nstnm) | Fix name lookup problem that prevents pickling of classes with
nested class definitions. The approach is loosely based on that
implemented at https://git.io/viGqU , simplified and modified to
work in both Python 2.7 and Python 3.x.
Parameters
----------
cls : class
Outer class to which fi... | 3.728541 | 3.550979 | 1.050004 |
# Extended name for the class that will be added to the module namespace
extnm = '_' + cls.__name__ + '_' + pstfx
# Get the module in which the dynamic class is defined
mdl = sys.modules[cls.__module__]
# Allow lookup of the dynamically generated class within the module via
# its extended ... | def _fix_dynamic_class_lookup(cls, pstfx) | Fix name lookup problem that prevents pickling of dynamically
defined classes.
Parameters
----------
cls : class
Dynamically generated class to which fix is to be applied
pstfx : string
Postfix that can be used to identify dynamically generated classes
that are equivalent by const... | 4.364373 | 4.472595 | 0.975803 |
if fmtmap is None:
fmtmap = {}
fwdthn = fprec + fwdthdlt
# Construct a list specifying the format string for each field.
# Use format string from fmtmap if specified, otherwise use
# a %d specifier with field width fwdth0 for the first field,
# or a %e specifier with field width f... | def solve_status_str(hdrlbl, fmtmap=None, fwdth0=4, fwdthdlt=6,
fprec=2) | Construct header and format details for status display of an
iterative solver.
Parameters
----------
hdrlbl : tuple of strings
Tuple of field header strings
fmtmap : dict or None, optional (default None)
A dict providing a mapping from field header strings to print
format strings,... | 3.659066 | 3.41891 | 1.070244 |
# Take no action of self.dtype exists and is not None
if not hasattr(self, 'dtype') or self.dtype is None:
# DataType option overrides explicitly specified data type
if opt['DataType'] is None:
self.dtype = dtype
else:
self.dt... | def set_dtype(self, opt, dtype) | Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.Constrai... | 6.041076 | 4.819912 | 1.253358 |
# If `val` is None and `dval` is not None, replace it with dval
if dval is not None and val is None:
val = dval
# If dtype is not None, assume val is numeric and convert it to
# type dtype
if dtype is not None and val is not None:
if isinstance(... | def set_attr(self, name, val, dval=None, dtype=None, reset=False) | Set an object attribute by its name. The attribute value
can be specified as a primary value `val`, and as default
value 'dval` that will be used if the primary value is None.
This arrangement allows an attribute to be set from an entry
in an options object, passed as `val`, while specif... | 2.847087 | 2.656147 | 1.071886 |
rank = comm.Get_rank() # Id of this process
size = comm.Get_size() # Total number of processes in communicator
end = 0
# The scan should be done with ints, not floats
ranklen = int(arrlen / size)
if rank < arrlen % size:
ranklen += 1
# Compute upper limit based on the sizes c... | def _get_rank_limits(comm, arrlen) | Determine the chunk of the grid that has to be computed per
process. The grid has been 'flattened' and has arrlen length. The
chunk assigned to each process depends on its rank in the MPI
communicator.
Parameters
----------
comm : MPI communicator object
Describes topology of network: num... | 6.298768 | 7.143213 | 0.881784 |
# Open status display
fmtstr, nsep = self.display_start()
# Start solve timer
self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
# Main optimisation iterations
for self.k in range(self.k, self.k + self.op... | def solve(self) | Start (or re-start) optimisation. This method implements the
framework for the iterations of a FISTA algorithm. There is
sufficient flexibility in overriding the component methods that
it calls that it is usually not necessary to override this method
in derived clases.
If option... | 4.200945 | 3.408813 | 1.232378 |
if grad is None:
grad = self.eval_grad()
V = self.Y - (1. / self.L) * grad
self.X = self.eval_proxop(V)
return grad | def proximal_step(self, grad=None) | Compute proximal update (gradient descent + regularization). | 6.553319 | 6.237304 | 1.050665 |
# Update t step
tprv = self.t
self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2))
# Update Y
if not self.opt['FastSolve']:
self.Yprv = self.Y.copy()
self.Y = self.X + ((tprv - 1.) / self.t) * (self.X - self.Xprv) | def combination_step(self) | Build next update by a smart combination of previous updates.
(standard FISTA :cite:`beck-2009-fast`). | 5.54596 | 5.047621 | 1.098727 |
gradY = self.eval_grad() # Given Y(f), this updates computes gradY(f)
maxiter = self.L_maxiter
iterBTrack = 0
linesearch = 1
while linesearch and iterBTrack < maxiter:
self.proximal_step(gradY) # Given gradY(f), L, this updates X(f)
f = sel... | def standard_backtrack(self) | Estimate step size L by computing a linesearch that
guarantees that F <= Q according to the standard FISTA
backtracking strategy in :cite:`beck-2009-fast`.
This also updates variable Y. | 6.459764 | 5.784864 | 1.116666 |
self.L *= self.L_gamma_d
maxiter = self.L_maxiter
iterBTrack = 0
linesearch = 1
self.store_Yprev()
while linesearch and iterBTrack < maxiter:
t = float(1. + np.sqrt(1. + 4. * self.L * self.Tk)) / (2. * self.L)
T = self.Tk + t
... | def robust_backtrack(self) | Estimate step size L by computing a linesearch that
guarantees that F <= Q according to the robust FISTA
backtracking strategy in :cite:`florea-2017-robust`.
This also updates all the supporting variables. | 5.558023 | 5.233961 | 1.061915 |
r = self.rsdl()
adapt_tol = self.opt['RelStopTol']
if self.opt['AutoStop', 'Enabled']:
adapt_tol = self.tau0 / (1. + self.k)
return r, adapt_tol | def compute_residuals(self) | Compute residuals and stopping thresholds. | 11.950301 | 9.632377 | 1.240639 |
dict = {'Itn': 'Iter'}
dict.update(cls.hdrval_objfun)
dict.update({'Rsdl': 'Rsdl', 'F': 'F_Btrack', 'Q': 'Q_Btrack',
'It_Bt': 'IterBTrack', 'L': 'L'})
return dict | def hdrval(cls) | Construct dictionary mapping display column title to
IterationStats entries. | 11.914172 | 11.775415 | 1.011784 |
tk = self.timer.elapsed(self.opt['IterTimer'])
tpl = (k,) + self.eval_objfn() + \
(frcxd, self.F, self.Q, self.iterBTrack, self.L) + \
self.itstat_extra() + (tk,)
return type(self).IterationStats(*tpl) | def iteration_stats(self, k, frcxd) | Construct iteration stats record tuple. | 12.796123 | 10.833296 | 1.181185 |
fval = self.obfn_f(self.X)
gval = self.obfn_g(self.X)
obj = fval + gval
return (obj, fval, gval) | def eval_objfn(self) | Compute components of objective function as well as total
contribution to objective function. | 3.906413 | 3.385605 | 1.15383 |
if gradf is None:
gradf = self.eval_grad()
self.Vf[:] = self.Yf - (1. / self.L) * gradf
V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)
self.X[:] = self.eval_proxop(V)
self.Xf = sl.rfftn(self.X, None, self.cri.axisN)
return gradf | def proximal_step(self, gradf=None) | Compute proximal update (gradient descent + constraint).
Variables are mapped back and forth between input and
frequency domains. | 5.157978 | 4.792406 | 1.076282 |
# Update t step
tprv = self.t
self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2))
# Update Y
if not self.opt['FastSolve']:
self.Yfprv = self.Yf.copy()
self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv) | def combination_step(self) | Update auxiliary state by a smart combination of previous
updates in the frequency domain (standard FISTA
:cite:`beck-2009-fast`). | 6.152603 | 5.671999 | 1.084733 |
r
return np.sum(np.real(np.conj(Dxy) * gradY)) | def eval_linear_approx(self, Dxy, gradY) | r"""Compute term :math:`\langle \nabla f(\mathbf{y}),
\mathbf{x} - \mathbf{y} \rangle` (in frequency domain) that is
part of the quadratic function :math:`Q_L` used for
backtracking. Since this class computes the backtracking in
the DFT, it is important to preserve the DFT scaling. | 9.453895 | 7.436784 | 1.271234 |
r = np.asarray(vref, dtype=np.float64).ravel()
c = np.asarray(vcmp, dtype=np.float64).ravel()
return np.mean(np.abs(r - c)**2) | def mse(vref, vcmp) | Compute Mean Squared Error (MSE) between two images.
Parameters
----------
vref : array_like
Reference image
vcmp : array_like
Comparison image
Returns
-------
x : float
MSE between `vref` and `vcmp` | 2.358032 | 2.491932 | 0.946267 |
dv = np.var(vref)
with np.errstate(divide='ignore'):
rt = dv / mse(vref, vcmp)
return 10.0 * np.log10(rt) | def snr(vref, vcmp) | Compute Signal to Noise Ratio (SNR) of two images.
Parameters
----------
vref : array_like
Reference image
vcmp : array_like
Comparison image
Returns
-------
x : float
SNR of `vcmp` with respect to `vref` | 4.513562 | 5.886715 | 0.766737 |
if rng is None:
rng = vref.max() - vref.min()
dv = (rng + 0.0)**2
with np.errstate(divide='ignore'):
rt = dv / mse(vref, vcmp)
return 10.0 * np.log10(rt) | def psnr(vref, vcmp, rng=None) | Compute Peak Signal to Noise Ratio (PSNR) of two images. The PSNR
calculation defaults to using the less common definition in terms
of the actual range (i.e. max minus min) of the reference signal
instead of the maximum possible range for the data type
(i.e. :math:`2^b-1` for a :math:`b` bit representat... | 3.906975 | 4.410536 | 0.885828 |
msedeg = mse(vref, vdeg)
mserst = mse(vref, vrst)
with np.errstate(divide='ignore'):
rt = msedeg / mserst
return 10.0 * np.log10(rt) | def isnr(vref, vdeg, vrst) | Compute Improvement Signal to Noise Ratio (ISNR) for reference,
degraded, and restored images.
Parameters
----------
vref : array_like
Reference image
vdeg : array_like
Degraded image
vrst : array_like
Restored image
Returns
-------
x : float
ISNR of `vrst` ... | 3.357614 | 3.987182 | 0.842102 |
blrvar = np.var(vblr)
nsevar = np.var(vnsy - vblr)
with np.errstate(divide='ignore'):
rt = blrvar / nsevar
return 10.0 * np.log10(rt) | def bsnr(vblr, vnsy) | Compute Blurred Signal to Noise Ratio (BSNR) for a blurred and noisy
image.
Parameters
----------
vblr : array_like
Blurred noise free image
vnsy : array_like
Blurred image with additive noise
Returns
-------
x : float
BSNR of `vnsy` with respect to `vblr` and `vdeg` | 4.302083 | 4.584685 | 0.93836 |
# Calculate difference, promoting to float if vref and vcmp have integer
# dtype
emap = np.asarray(vref, dtype=np.float64) - \
np.asarray(vcmp, dtype=np.float64)
# Input images in reference code on which this implementation is
# based are assumed to be on range [0,...,255].
if resc... | def pamse(vref, vcmp, rescale=True) | Compute Perceptual-fidelity Aware Mean Squared Error (PAMSE) IQA metric
:cite:`xue-2013-perceptual`. This implementation is a translation of the
reference Matlab implementation provided by the authors of
:cite:`xue-2013-perceptual`.
Parameters
----------
vref : array_like
Reference image
... | 5.663913 | 5.489201 | 1.031828 |
# Input images in reference code on which this implementation is
# based are assumed to be on range [0,...,255].
if rescale:
scl = (255.0 / vref.max())
else:
scl = np.float32(1.0)
T = 170.0
dwn = 2
dx = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]]) / 3.0
dy = dx.T... | def gmsd(vref, vcmp, rescale=True, returnMap=False) | Compute Gradient Magnitude Similarity Deviation (GMSD) IQA metric
:cite:`xue-2014-gradient`. This implementation is a translation of the
reference Matlab implementation provided by the authors of
:cite:`xue-2014-gradient`.
Parameters
----------
vref : array_like
Reference image
vcmp :... | 2.369661 | 2.285211 | 1.036955 |
# Extract method selection argument or set default
if 'method' in kwargs:
method = kwargs['method']
del kwargs['method']
else:
method = 'cns'
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM
e... | def ConvCnstrMODMaskDcpl(*args, **kwargs) | A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
with Mask Decoupling problems, and returns an object instantiated
with the provided. parameters. The wrapper is designed to allow the
appropriate object to be created by calli... | 3.524389 | 2.565576 | 1.373722 |
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM.Options
elif method == 'cg':
base = ConvCnstrMODMaskDcpl_CG.Options
elif method == 'cns':
base = ConvCnstrMODMaskDcpl_Consensus.Options
else:
raise ... | def ConvCnstrMODMaskDcplOptions(opt=None, method='cns') | A wrapper function that dynamically defines a class derived from
the Options class associated with one of the implementations of
the Convolutional Constrained MOD with Mask Decoupling problem,
and returns an object instantiated with the provided parameters.
The wrapper is designed to allow the appropri... | 3.765089 | 3.562943 | 1.056736 |
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Ub0 = (s... | def uinit(self, ushape) | Return initialiser for working variable U | 7.614602 | 7.113163 | 1.070494 |
r
if self.opt['LinSolveCheck']:
Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
ZHop = lambda x: sl.inner(np.conj(self.Zf), x,
axis=self.cri.axisK)
ax = ZHop(Zop(self.Xf)) + self.Xf
self.xrrs = sl.rrs(ax, b)... | def xstep_check(self, b) | r"""Check the minimisation of the Augmented Lagrangian with
respect to :math:`\mathbf{x}` by method `xstep` defined in
derived classes. This method should be called at the end of any
`xstep` method. | 8.155298 | 8.05528 | 1.012417 |
r
AXU = self.AX + self.U
Y0 = (self.rho*(self.block_sep0(AXU) - self.S)) / (self.W**2 +
self.rho)
Y1 = self.Pcn(self.block_sep1(AXU))
self.Y = self.block_cat(Y0, Y1) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 11.955232 | 10.629848 | 1.124685 |
self.AXnr = self.cnst_A(self.X, self.Xf)
if self.rlx == 1.0:
self.AX = self.AXnr
else:
alpha = self.rlx
self.AX = alpha*self.AXnr + (1-alpha)*self.block_cat(
self.var_y0() + self.S, self.var_y1()) | def relax_AX(self) | Implement relaxation if option ``RelaxParam`` != 1.0. | 7.152822 | 6.593637 | 1.084807 |
r
return np.swapaxes(
Y[(slice(None),)*self.blkaxis + (slice(0, self.blkidx),)],
self.cri.axisK, self.cri.axisM) | def block_sep0(self, Y) | r"""Separate variable into component corresponding to
:math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`. The method from
parent class :class:`.ADMMTwoBlockCnstrnt` is overridden here to
allow swapping of K (multi-image) and M (filter) axes in block 0
so that it can be concatenated on axis M... | 12.225887 | 11.334359 | 1.078657 |
r
return np.concatenate((np.swapaxes(Y0, self.cri.axisK,
self.cri.axisM), Y1),
axis=self.blkaxis) | def block_cat(self, Y0, Y1) | r"""Concatenate components corresponding to :math:`\mathbf{y}_0`
and :math:`\mathbf{y}_1` to form :math:`\mathbf{y}\;\;`. The
method from parent class :class:`.ADMMTwoBlockCnstrnt` is
overridden here to allow swapping of K (multi-image) and M
(filter) axes in block 0 so that it can be co... | 12.573731 | 9.642084 | 1.304047 |
r
# This calculation involves non-negligible computational cost. It
# should be possible to disable relevant diagnostic information
# (dual residual) to avoid this cost.
Y0f = sl.rfftn(Y0, None, self.cri.axisN)
return sl.irfftn(sl.inner(np.conj(self.Zf), Y0f,
... | def cnst_A0T(self, Y0) | r"""Compute :math:`A_0^T \mathbf{y}_0` component of
:math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`). | 9.822392 | 9.259719 | 1.060766 |
dfd = self.obfn_g0(self.obfn_g0var())
cns = self.obfn_g1(self.obfn_g1var())
return (dfd, cns) | def eval_objfn(self) | Compute components of regularisation function as well as total
contribution to objective function. | 7.549992 | 6.788647 | 1.11215 |
r
return np.linalg.norm((self.Pcn(Y1) - Y1)) | def obfn_g1(self, Y1) | r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective
function. | 20.126724 | 18.154442 | 1.108639 |
return self.rho*np.linalg.norm(self.cnst_AT(self.U)) | def rsdl_s(self, Yprev, Y) | Compute dual residual vector. | 21.36132 | 18.776247 | 1.137678 |
r
self.YU[:] = self.Y - self.U
self.block_sep0(self.YU)[:] += self.S
YUf = sl.rfftn(self.YU, None, self.cri.axisN)
b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
axis=self.cri.axisK) + self.block_sep1(YUf)
self.Xf[:] = sl.solvemdbi_ism(self.Zf... | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 6.254999 | 5.856908 | 1.067969 |
super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN)
if self.rlx == 1.0:
self.AX1 = s... | def relax_AX(self) | The parent class method that this method overrides only
implements the relaxation step for the variables of the baseline
consensus algorithm. This method calls the overridden method and
then implements the relaxation step for the additional variables
required for the mask decoupling modi... | 8.452846 | 7.280375 | 1.161045 |
self.YU1[:] = self.Y1 - self.U1
self.ZSf = np.conj(self.Zf) * (self.Sf + sl.rfftn(
self.YU1, None, self.cri.axisN))
rho = self.rho
self.rho = 1.0
super(ConvCnstrMODMaskDcpl_Consensus, self).xstep()
self.rho = rho | def xstep(self) | The xstep of the baseline consensus class from which this
class is derived is re-used to implement the xstep of the
modified algorithm by replacing ``self.ZSf``, which is constant
in the baseline algorithm, with a quantity derived from the
additional variables ``self.Y1`` and ``self.U1``... | 14.0013 | 7.814492 | 1.791709 |
super(ConvCnstrMODMaskDcpl_Consensus, self).ystep()
AXU1 = self.AX1 + self.U1
self.Y1 = self.rho*(AXU1 - self.S) / (self.W**2 + self.rho) | def ystep(self) | The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm. | 19.719498 | 16.462637 | 1.197833 |
super(ConvCnstrMODMaskDcpl_Consensus, self).ustep()
self.U1 += self.AX1 - self.Y1 - self.S | def ustep(self) | The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm. | 35.940769 | 34.290634 | 1.048122 |
r
Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
- self.Sf
return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
self.cri.axisN))**2) / 2.0 | def obfn_dfd(self) | r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`. | 12.523978 | 12.950996 | 0.967028 |
# The full primary residual is straightforward to compute from
# the primary residuals for the baseline algorithm and for the
# additional variables
r0 = self.rsdl_r(self.AXnr, self.Y)
r1 = self.AX1nr - self.Y1 - self.S
r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))... | def compute_residuals(self) | Compute residuals and stopping thresholds. The parent class
method is overridden to ensure that the residual calculations
include the additional variables introduced in the modification
to the baseline algorithm. | 3.894473 | 3.692116 | 1.054808 |
super(RobustPCA, self).solve()
return self.X, self.Y | def solve(self) | Start (or re-start) optimisation. | 15.100923 | 12.232013 | 1.234541 |
r
self.X, self.ss = sp.prox_nuclear(self.S - self.Y - self.U,
1/self.rho) | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 26.283745 | 18.246349 | 1.440493 |
r
self.Y = np.asarray(sp.prox_l1(self.S - self.AX - self.U,
self.lmbda/self.rho), dtype=self.dtype) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 16.272482 | 11.890792 | 1.368494 |
if self.opt['fEvalX']:
return self.X
else:
return self.cnst_c() - self.cnst_B(self.Y) | def obfn_fvar(self) | Variable to be evaluated in computing regularisation term,
depending on 'fEvalX' option value. | 14.122071 | 7.300998 | 1.934266 |
if self.opt['fEvalX']:
rnn = np.sum(self.ss)
else:
rnn = sp.norm_nuclear(self.obfn_fvar())
rl1 = np.sum(np.abs(self.obfn_gvar()))
cns = np.linalg.norm(self.X + self.Y - self.S)
obj = rnn + self.lmbda*rl1
return (obj, rnn, rl1, cns) | def eval_objfn(self) | Compute components of objective function as well as total
contribution to objective function. | 8.285515 | 7.579284 | 1.093179 |
vn = np.sqrt(np.sum(v**2, 0))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype) | def normalise(v) | Normalise columns of matrix.
Parameters
----------
v : array_like
Array with columns to be normalised
Returns
-------
vnrm : ndarray
Normalised array | 3.470863 | 4.154955 | 0.835355 |
self.Z = np.asarray(Z, dtype=self.dtype)
self.SZT = self.S.dot(Z.T)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.lu_factor(Z, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def setcoef(self, Z) | Set coefficient array. | 6.670438 | 6.222125 | 1.072051 |
r
self.X = np.asarray(sl.lu_solve_AATI(self.Z, self.rho, self.SZT +
self.rho*(self.Y - self.U), self.lu, self.piv,),
dtype=self.dtype) | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 20.295996 | 15.869741 | 1.278912 |
self.lu, self.piv = sl.lu_factor(self.Z, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def rhochange(self) | Re-factorise matrix when rho changes | 6.706736 | 4.853309 | 1.381889 |
@functools.wraps(func)
def wrapped(*args, **kwargs):
args = list(args)
for n, a in enumerate(args):
if isinstance(a, np.ndarray):
args[n] = cp.asarray(a)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = cp... | def cupy_wrapper(func) | A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays. | 1.500666 | 1.450309 | 1.034722 |
# Convert int axis into a tuple
if isinstance(axis, int):
axis = (axis,)
# Handle negative axis indices
axis = tuple([k if k >= 0 else x.ndim + k for k in axis])
# Complement of axis set on full set of axes of input v
caxis = tuple(set(range(x.ndim)) - set(axis))
# Permute axes... | def ndto2d(x, axis=-1) | Convert a multi-dimensional array into a 2d array, with the axes
specified by the `axis` parameter flattened into an index along
rows, and the remaining axes flattened into an index along the
columns. This operation can not be properly achieved by a simple
reshape operation since a reshape would shuffle... | 6.075932 | 4.691793 | 1.295013 |
# Extract components of conversion information tuple
xts = rsi[0]
prm = rsi[1]
# Reshape x to the shape obtained after permuting axes in ndto2d
xt = xtr.reshape(xts)
# Undo axis permutation performed in ndto2d
x = np.transpose(xt, np.argsort(prm))
# Return array with shape correspo... | def ndfrom2d(xtr, rsi) | Undo the array shape conversion applied by :func:`ndto2d`,
returning the input 2D array to its original shape.
Parameters
----------
xtr : array_like
Two-dimensional input array
rsi : tuple
A tuple containing the shape of the axis-permuted array and the
permutation order applied ... | 8.286704 | 6.52033 | 1.270903 |
r
AXU = self.AX + self.U
self.Y[..., 0:-1] = sp.prox_l2(AXU[..., 0:-1], self.mu/self.rho)
self.Y[..., -1] = sp.prox_l1(AXU[..., -1],
(self.lmbda/self.rho) * self.Wl1) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 7.71509 | 6.738295 | 1.144962 |
rl1 = np.linalg.norm((self.Wl1 * self.obfn_g1var()).ravel(), 1)
rtv = np.sum(np.sqrt(np.sum(self.obfn_g0var()**2, axis=-1)))
return (self.lmbda*rl1 + self.mu*rtv, rl1, rtv) | def obfn_reg(self) | Compute regularisation term and contribution to objective
function. | 6.693721 | 5.717762 | 1.170689 |
r
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return self.Wtv[..., np.newaxis] * sl.irfftn(
self.Gf * Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN) | def cnst_A0(self, X, Xf=None) | r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`. | 6.154924 | 5.979457 | 1.029345 |
r
return np.concatenate((self.cnst_A0(X, Xf),
self.cnst_A1(X)), axis=-1) | def cnst_A(self, X, Xf=None) | r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots \;\; I)^T \mathbf{x}`. | 6.685651 | 7.274501 | 0.919053 |
r
return np.sum(self.cnst_A0T(X), axis=-1) + self.cnst_A1T(X) | def cnst_AT(self, X) | r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
a component of ADMM problem constraint. In this case
:math:`A^T \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots
\;\; I) \mathbf{x}`. | 7.844149 | 7.456191 | 1.052032 |
# We need to keep the non-relaxed version of AX since it is
# required for computation of primal residual r
self.AXnr = self.cnst_A(self.X, self.Xf)
if self.rlx == 1.0:
# If RelaxParam option is 1.0 there is no relaxation
self.AX = self.AXnr
else... | def relax_AX(self) | Implement relaxation if option ``RelaxParam`` != 1.0. | 5.636659 | 5.013283 | 1.124345 |
if D is not None:
self.D = np.asarray(D, dtype=self.dtype)
self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
self.GDf = self.Gf * (self.Wtv * self.Df)[..., np.newaxis]
# Compute D^H S
self.DSf = np.conj(self.Df) * self.Sf
if self.cri.Cd > 1:
... | def setdict(self, D=None) | Set dictionary array. | 4.428289 | 4.414048 | 1.003226 |
Y1 = Y[..., self.cri.M:]
# If cri.Cd > 1 (multi-channel dictionary), we need to undo the
# reshape performed in block_cat
if self.cri.Cd > 1:
shp = list(Y1.shape)
shp[self.cri.axisM] = self.cri.dimN
shp[self.cri.axisC] = self.cri.Cd
... | def block_sep1(self, Y) | Separate variable into component corresponding to Y1 in Y. | 7.185776 | 6.920424 | 1.038343 |
# Axes are swapped here for similar reasons to those
# motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_cat
Y1sa = np.swapaxes(Y1, self.cri.axisM, -1)[..., 0]
# If cri.Cd > 1 (multi-channel dictionary) Y0 has a singleton
# channel axis but Y1 has a non-singleton ... | def block_cat(self, Y0, Y1) | Concatenate components corresponding to Y0 and Y1 blocks
into Y. | 6.200484 | 6.289706 | 0.985815 |
r
AXU = self.AX + self.U
self.block_sep0(self.Y)[:] = sp.prox_l1(
self.block_sep0(AXU), (self.lmbda/self.rho) * self.Wl1)
self.block_sep1(self.Y)[:] = sp.prox_l2(
self.block_sep1(AXU), self.mu/self.rho, axis=(self.cri.axisC, -1)) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 8.49893 | 7.674831 | 1.107377 |
# Use of self.block_sep0(self.AXnr) instead of self.cnst_A0(self.X)
# reduces number of calls to self.cnst_A0
return self.var_y0() if self.opt['gEvalY'] else \
self.block_sep0(self.AXnr) | def obfn_g0var(self) | Variable to be evaluated in computing the TV regularisation
term, depending on the ``gEvalY`` option value. | 15.614508 | 11.347817 | 1.375992 |
r
# Use of self.block_sep1(self.AXnr) instead of self.cnst_A1(self.X)
# reduces number of calls to self.cnst_A0
return self.var_y1() if self.opt['gEvalY'] else \
self.block_sep1(self.AXnr) | def obfn_g1var(self) | r"""Variable to be evaluated in computing the :math:`\ell_1`
regularisation term, depending on the ``gEvalY`` option value. | 19.168615 | 14.12709 | 1.356869 |
r
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return sl.irfftn(sl.inner(
self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv,
self.cri.axisN) | def cnst_A1(self, X, Xf=None) | r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`. | 6.114775 | 5.646993 | 1.082837 |
r
Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN)
return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv,
self.cri.axisN) | def cnst_A1T(self, Y1) | r"""Compute :math:`A_1^T \mathbf{y}_1` component of
:math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 =
(\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`. | 7.094579 | 7.301353 | 0.97168 |
r
return self.cnst_A0T(self.block_sep0(Y)) + \
np.sum(self.cnst_A1T(self.block_sep1(Y)), axis=-1) | def cnst_AT(self, Y) | r"""Compute :math:`A^T \mathbf{y}`. In this case
:math:`A^T \mathbf{y} = (I \;\; \Gamma_0^T \;\; \Gamma_1^T \;\;
\ldots) \mathbf{y}`. | 7.278774 | 6.485399 | 1.122333 |
self.D = np.asarray(D, dtype=self.dtype)
self.DTS = self.D.T.dot(self.S)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.cho_factor(self.D, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def setdict(self, D) | Set dictionary array. | 5.44842 | 5.121305 | 1.063873 |
r
self.X = np.asarray(sl.cho_solve_ATAI(
self.D, self.rho, self.DTS + self.rho * (self.Y - self.U),
self.lu, self.piv), dtype=self.dtype)
if self.opt['LinSolveCheck']:
b = self.DTS + self.rho * (self.Y - self.U)
ax = self.D.T.dot(self.D.dot(self.... | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 6.5609 | 5.928817 | 1.106612 |
r
return 0.5*np.linalg.norm((self.D.dot(self.obfn_fvar()) - self.S))**2 | def obfn_dfd(self) | r"""Compute data fidelity term :math:`(1/2) \| D \mathbf{x} -
\mathbf{s} \|_2^2`. | 14.818292 | 9.643889 | 1.536547 |
self.lu, self.piv = sl.cho_factor(self.D, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def rhochange(self) | Re-factorise matrix when rho changes. | 7.126117 | 5.195018 | 1.371721 |
r
self.Y = np.asarray(sp.prox_l1(self.AX + self.U,
(self.lmbda / self.rho) * self.wl1),
dtype=self.dtype)
super(BPDN, self).ystep() | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 12.416013 | 9.578526 | 1.296234 |
r
self.Y = np.asarray(sp.prox_l1l2(
self.AX + self.U, (self.lmbda / self.rho) * self.wl1,
self.mu / self.rho, axis=-1), dtype=self.dtype)
GenericBPDN.ystep(self) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 12.015044 | 10.28943 | 1.167708 |
r
self.Y = np.asarray(sp.proj_l1(self.AX + self.U, self.gamma, axis=0),
dtype=self.dtype)
super(BPDNProjL1, self).ystep() | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 15.875358 | 12.187541 | 1.302589 |
dfd = self.obfn_dfd()
prj = sp.proj_l1(self.obfn_gvar(), self.gamma, axis=0)
cns = np.linalg.norm(prj - self.obfn_gvar())
return (dfd, cns) | def eval_objfn(self) | Compute components of regularisation function as well as total
contribution to objective function. | 10.054409 | 8.559482 | 1.174652 |
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
U0 = np.... | def uinit(self, ushape) | Return initialiser for working variable U. | 7.910183 | 7.284658 | 1.085869 |
self.D = np.asarray(D, dtype=self.dtype)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.cho_factor(self.D, 1.0)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def setdict(self, D) | Set dictionary array. | 5.623528 | 5.161973 | 1.089415 |
r
YU = self.Y - self.U
self.X = np.asarray(sl.cho_solve_ATAI(
self.D, 1.0, self.block_sep0(YU) +
self.D.T.dot(self.block_sep1(YU)), self.lu, self.piv),
dtype=self.dtype) | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 11.759018 | 10.354983 | 1.13559 |
r
AXU = self.AX + self.U
Y0 = np.asarray(sp.prox_l1(self.block_sep0(AXU), self.wl1 / self.rho),
dtype=self.dtype)
if self.opt['NonNegCoef']:
Y0[Y0 < 0.0] = 0.0
Y1 = sl.proj_l2ball(self.block_sep1(AXU), self.S, self.epsilon, axes=0)
sel... | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 8.203942 | 7.548796 | 1.086788 |
r
obj = np.linalg.norm((self.wl1 * self.obfn_g0var()).ravel(), 1)
cns = np.linalg.norm(sl.proj_l2ball(
self.obfn_g1var(), self.S, self.epsilon, axes=0) -
self.obfn_g1var())
return (obj, cns) | def eval_objfn(self) | r"""Compute components of objective function as well as total
contribution to objective function. The objective function is
:math:`\| \mathbf{x} \|_1` and the constraint violation
measure is :math:`P(\mathbf{x}) - \mathbf{x}` where
:math:`P(\mathbf{x})` is the projection into the constr... | 11.313956 | 9.695802 | 1.166892 |
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
return (... | def uinit(self, ushape) | Return initialiser for working variable U. | 9.645727 | 8.659322 | 1.113913 |
r
self.X = sl.idctii(self.Gamma*sl.dctii(self.Y + self.S - self.U,
axes=self.axes), axes=self.axes)
if self.opt['LinSolveCheck']:
self.xrrs = sl.rrs(
self.X + (self.lmbda/self.rho) *
sl.idctii((self.Alpha... | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 6.709798 | 6.105496 | 1.098977 |
r
self.Y = sp.prox_l1(self.AX - self.S + self.U, self.Wdf / self.rho) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 33.684063 | 22.966116 | 1.466685 |
self.Gamma = 1.0 / (1.0 + (self.lmbda/self.rho)*(self.Alpha**2)) | def rhochange(self) | Action to be taken when rho parameter is changed. | 9.106956 | 6.954474 | 1.30951 |
if self.opt['gEvalY']:
return self.Y
else:
return self.cnst_A(self.X) - self.cnst_c() | def obfn_gvar(self) | Variable to be evaluated in computing regularisation term,
depending on 'gEvalY' option value. | 15.600191 | 7.136277 | 2.186041 |
r
gvr = self.obfn_gvar()
dfd = np.sum(np.abs(self.Wdf * gvr))
reg = 0.5*np.linalg.norm(
sl.idctii(self.Alpha*sl.dctii(self.X, axes=self.axes),
axes=self.axes))**2
obj = dfd + self.lmbda*reg
return (obj, dfd, reg) | def eval_objfn(self) | r"""Compute components of objective function as well as total
contribution to objective function. Data fidelity term is
:math:`(1/2) \| \mathbf{x} - \mathbf{s} \|_2^2` and
regularisation term is :math:`\| D \mathbf{x} \|_2^2`. | 9.883881 | 8.432636 | 1.172099 |
GPUInfo = namedtuple('GPUInfo', ['name', 'driver', 'totalmem', 'freemem'])
gpus = GPUtil.getGPUs()
info = []
for g in gpus:
info.append(GPUInfo(g.name, g.driver, g.memoryTotal, g.memoryFree))
return info | def gpu_info() | Return a list of namedtuples representing attributes of each GPU
device. | 2.457144 | 2.165399 | 1.134731 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.