text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getPcn(dsz, Nv, dimN=2, dimC=1, crp=False, zm=False): """Construct the constraint set projection function for convolutional dictionary update problem. Parameters dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop` Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices crp : bool, optional (default False) Flag indicating whether the result should be cropped to the support of the largest filter in the dictionary. zm : bool, optional (default False) Flag indicating whether the projection function should include filter mean subtraction Returns ------- fn : function Constraint set projection function """
fncdict = {(False, False): _Pcn, (False, True): _Pcn_zm, (True, False): _Pcn_crp, (True, True): _Pcn_zm_crp} fnc = fncdict[(crp, zm)] return functools.partial(fnc, dsz=dsz, Nv=Nv, dimN=dimN, dimC=dimC)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tiledict(D, sz=None): """Construct an image allowing visualization of dictionary content. Parameters D : array_like Dictionary matrix/array. sz : tuple Size of each block in dictionary. Returns ------- im : ndarray Image tiled with dictionary entries. """
# Handle standard 2D (non-convolutional) dictionary if D.ndim == 2: D = D.reshape((sz + (D.shape[1],))) sz = None dsz = D.shape if D.ndim == 4: axisM = 3 szni = 3 else: axisM = 2 szni = 2 # Construct dictionary atom size vector if not provided if sz is None: sz = np.tile(np.array(dsz[0:2]).reshape([2, 1]), (1, D.shape[axisM])) else: sz = np.array(sum(tuple((x[0:2],) * x[szni] for x in sz), ())).T # Compute the maximum atom dimensions mxsz = np.amax(sz, 1) # Shift and scale values to [0, 1] D = D - D.min() D = D / D.max() # Construct tiled image N = dsz[axisM] Vr = int(np.floor(np.sqrt(N))) Vc = int(np.ceil(N / float(Vr))) if D.ndim == 4: im = np.ones((Vr*mxsz[0] + Vr - 1, Vc*mxsz[1] + Vc - 1, dsz[2])) else: im = np.ones((Vr*mxsz[0] + Vr - 1, Vc*mxsz[1] + Vc - 1)) k = 0 for l in range(0, Vr): for m in range(0, Vc): r = mxsz[0]*l + l c = mxsz[1]*m + m if D.ndim == 4: im[r:(r+sz[0, k]), c:(c+sz[1, k]), :] = D[0:sz[0, k], 0:sz[1, k], :, k] else: im[r:(r+sz[0, k]), c:(c+sz[1, k])] = D[0:sz[0, k], 0:sz[1, k], k] k = k + 1 if k >= N: break if k >= N: break return im
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extractblocks(img, blksz, stpsz=None): """Extract blocks from an ndarray signal into an ndarray. Parameters img : ndarray or tuple of ndarrays nd array of images, or tuple of images blksz : tuple tuple of block sizes, blocks are taken starting from the first index of img stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks Returns ------- blks : ndarray image blocks """
# See http://stackoverflow.com/questions/16774148 and # sklearn.feature_extraction.image.extract_patches_2d if isinstance(img, tuple): img = np.stack(img, axis=-1) if stpsz is None: stpsz = (1,) * len(blksz) imgsz = img.shape # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a - b) / c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) # Calculate the strides for blocks blockstrides = tuple(a * b for a, b in zip_longest(img.strides, stpsz, fillvalue=1)) new_shape = blksz + numblocks new_strides = img.strides[:len(blksz)] + blockstrides blks = np.lib.stride_tricks.as_strided(img, new_shape, new_strides) return np.reshape(blks, blksz + (-1,))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def averageblocks(blks, imgsz, stpsz=None): """Average blocks together from an ndarray to reconstruct ndarray signal. Parameters blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan """
blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a-b)/c)+1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) # Construct an imgs matrix of empty lists imgs = np.zeros(imgsz, dtype=blks.dtype) normalizer = np.zeros(imgsz, dtype=blks.dtype) # Iterate over each block and append the values to the corresponding # imgs cell for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c+b) for a, b, c in zip(pos, blksz, stpsz)) imgs[slices+pos[len(blksz):]] += blks[(Ellipsis, )+pos] normalizer[slices+pos[len(blksz):]] += blks.dtype.type(1) return np.where(normalizer > 0, (imgs/normalizer).astype(blks.dtype), np.nan)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combineblocks(blks, imgsz, stpsz=None, fn=np.median): """Combine blocks from an ndarray to reconstruct ndarray signal. Parameters blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan """
# Construct a vectorized append function def listapp(x, y): x.append(y) veclistapp = np.vectorize(listapp, otypes=[np.object_]) blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) # Construct an imgs matrix of empty lists imgs = np.empty(imgsz, dtype=np.object_) imgs.fill([]) imgs = np.frompyfunc(list, 1, 1)(imgs) # Iterate over each block and append the values to the corresponding # imgs cell for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c + b) for a, b, c in zip_longest(pos, blksz, stpsz, fillvalue=1)) veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze()) return np.vectorize(fn, otypes=[blks.dtype])(imgs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def complex_randn(*args): """Return a complex array of samples drawn from a standard normal distribution. Parameters Dimensions of the random array Returns ------- a : ndarray """
return np.random.randn(*args) + 1j*np.random.randn(*args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spnoise(s, frc, smn=0.0, smx=1.0): """Return image with salt & pepper noise imposed on it. Parameters s : ndarray Input image frc : float Desired fraction of pixels corrupted by noise smn : float, optional (default 0.0) Lower value for noise (pepper) smx : float, optional (default 1.0) Upper value for noise (salt) Returns ------- sn : ndarray Noisy image """
sn = s.copy() spm = np.random.uniform(-1.0, 1.0, s.shape) sn[spm < frc - 1.0] = smn sn[spm > 1.0 - frc] = smx return sn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pca(U, centre=False): """Compute the PCA basis for columns of input array `U`. Parameters U : array_like 2D data array with rows corresponding to different variables and columns corresponding to different observations center : bool, optional (default False) Flag indicating whether to centre data Returns ------- B : ndarray A 2D array representing the PCA basis; each column is a PCA component. B.T is the analysis transform into the PCA representation, and B is the corresponding synthesis transform S : ndarray The eigenvalues of the PCA components C : ndarray or None None if centering is disabled, otherwise the mean of the data matrix subtracted in performing the centering """
if centre: C = np.mean(U, axis=1, keepdims=True) U = U - C else: C = None B, S, _ = np.linalg.svd(U, full_matrices=False, compute_uv=True) return B, S**2, C
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tikhonov_filter(s, lmbda, npd=16): r"""Lowpass filter based on Tikhonov regularization. Lowpass filter image(s) and return low and high frequency components, consisting of the lowpass filtered image and its difference with the input image. The lowpass filter is equivalent to Tikhonov regularization with `lmbda` as the regularization parameter and a discrete gradient as the operator in the regularization term, i.e. the lowpass component is the solution to .. math:: \mathrm{argmin}_\mathbf{x} \; (1/2) \left\|\mathbf{x} - \mathbf{s} \right\|_2^2 + (\lambda / 2) \sum_i \| G_i \mathbf{x} \|_2^2 \;\;, where :math:`\mathbf{s}` is the input image, :math:`\lambda` is the regularization parameter, and :math:`G_i` is an operator that computes the discrete gradient along image axis :math:`i`. Once the lowpass component :math:`\mathbf{x}` has been computed, the highpass component is just :math:`\mathbf{s} - \mathbf{x}`. Parameters s : array_like Input image or array of images. lmbda : float Regularization parameter controlling lowpass filtering. npd : int, optional (default=16) Number of samples to pad at image boundaries. Returns ------- sl : array_like Lowpass image or array of images. sh : array_like Highpass image or array of images. """
grv = np.array([-1.0, 1.0]).reshape([2, 1]) gcv = np.array([-1.0, 1.0]).reshape([1, 2]) Gr = sla.fftn(grv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1)) Gc = sla.fftn(gcv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1)) A = 1.0 + lmbda*np.conj(Gr)*Gr + lmbda*np.conj(Gc)*Gc if s.ndim > 2: A = A[(slice(None),)*2 + (np.newaxis,)*(s.ndim-2)] sp = np.pad(s, ((npd, npd),)*2 + ((0, 0),)*(s.ndim-2), 'symmetric') slp = np.real(sla.ifftn(sla.fftn(sp, axes=(0, 1)) / A, axes=(0, 1))) sl = slp[npd:(slp.shape[0] - npd), npd:(slp.shape[1] - npd)] sh = s - sl return sl.astype(s.dtype), sh.astype(s.dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gaussian(shape, sd=1.0): """Sample a multivariate Gaussian pdf, normalised to have unit sum. Parameters shape : tuple Shape of output array. sd : float, optional (default 1.0) Standard deviation of Gaussian pdf. Returns ------- gc : ndarray Sampled Gaussian pdf. """
gfn = lambda x, sd: np.exp(-(x**2) / (2.0 * sd**2)) / \ (np.sqrt(2.0 * np.pi) *sd) gc = 1.0 if isinstance(shape, int): shape = (shape,) for k, n in enumerate(shape): x = np.linspace(-3.0, 3.0, n).reshape( (1,) * k + (n,) + (1,) * (len(shape) - k - 1)) gc = gc * gfn(x, sd) gc /= np.sum(gc) return gc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convdicts(): """Access a set of example learned convolutional dictionaries. Returns ------- cdd : dict A dict associating description strings with dictionaries represented as ndarrays Examples -------- Print the dict keys to obtain the identifiers of the available dictionaries Select a specific example dictionary using the corresponding identifier """
pth = os.path.join(os.path.dirname(__file__), 'data', 'convdict.npz') npz = np.load(pth) cdd = {} for k in list(npz.keys()): cdd[k] = npz[k] return cdd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def netgetdata(url, maxtry=3, timeout=10): """ Get content of a file via a URL. Parameters url : string URL of the file to be downloaded maxtry : int, optional (default 3) Maximum number of download retries timeout : int, optional (default 10) Timeout in seconds for blocking operations Returns ------- str : io.BytesIO Buffered I/O stream Raises ------ urlerror.URLError (urllib2.URLError in Python 2, urllib.error.URLError in Python 3) If the file cannot be downloaded """
err = ValueError('maxtry parameter should be greater than zero') for ntry in range(maxtry): try: rspns = urlrequest.urlopen(url, timeout=timeout) cntnt = rspns.read() break except urlerror.URLError as e: err = e if not isinstance(e.reason, socket.timeout): raise else: raise err return io.BytesIO(cntnt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def image(self, fname, group=None, scaled=None, dtype=None, idxexp=None, zoom=None, gray=None): """Get named image. Parameters fname : string Filename of image group : string or None, optional (default None) Name of image group scaled : bool or None, optional (default None) np.float32 dtype (True). If the value is None, scaling behaviour is determined by the `scaling` parameter passed to the object initializer, otherwise that selection is overridden. dtype : data-type or None, optional (default None) Desired data type of images. If `scaled` is True and `dtype` is an integer type, the output data type is np.float32. If the value is None, the data type is determined by the `dtype` parameter passed to the object initializer, otherwise that selection is overridden. idxexp : index expression or None, optional (default None) An index expression selecting, for example, a cropped region of the requested image. This selection is applied *before* any `zoom` rescaling so the expression does not need to be modified when the zoom factor is changed. zoom : float or None, optional (default None) Optional rescaling factor to apply to the images. If the value is None, support rescaling behaviour is determined by the `zoom` parameter passed to the object initializer, otherwise that selection is overridden. gray : bool or None, optional (default None) Flag indicating whether RGB images should be converted to grayscale. If the value is None, behaviour is determined by the `gray` parameter passed to the object initializer. Returns ------- img : ndarray Image array Raises ------ IOError If the image is not accessible """
if scaled is None: scaled = self.scaled if dtype is None: if self.dtype is None: dtype = np.uint8 else: dtype = self.dtype if scaled and np.issubdtype(dtype, np.integer): dtype = np.float32 if zoom is None: zoom = self.zoom if gray is None: gray = self.gray if group is None: pth = os.path.join(self.bpth, fname) else: pth = os.path.join(self.bpth, group, fname) try: img = np.asarray(imageio.imread(pth), dtype=dtype) except IOError: raise IOError('Could not access image %s in group %s' % (fname, group)) if scaled: img /= 255.0 if idxexp is not None: img = img[idxexp] if zoom is not None: if img.ndim == 2: img = sni.zoom(img, zoom) else: img = sni.zoom(img, (zoom,)*2 + (1,)*(img.ndim-2)) if gray: img = rgb2gray(img) return img
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def elapsed(self, label=None, total=True): """Get elapsed time since timer start. Parameters label : string, optional (default None) Specify the label of the timer for which the elapsed time is required. If it is ``None``, the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__` is selected. total : bool, optional (default True) If ``True`` return the total elapsed time since the first call of :meth:`start` for the selected timer, otherwise return the elapsed time since the most recent call of :meth:`start` for which there has not been a corresponding call to :meth:`stop`. Returns ------- dlt : float Elapsed time """
# Get current time t = timer() # Default label is self.dfltlbl if label is None: label = self.dfltlbl # Return 0.0 if default timer selected and it is not initialised if label not in self.t0: return 0.0 # Raise exception if timer with specified label does not exist if label not in self.t0: raise KeyError('Unrecognized timer key %s' % label) # If total flag is True return sum of accumulated time from # previous start/stop calls and current start call, otherwise # return just the time since the current start call te = 0.0 if self.t0[label] is not None: te = t - self.t0[label] if total: te += self.td[label] return te
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def elapsed(self, total=True): """Return the elapsed time for the timer. Parameters total : bool, optional (default True) If ``True`` return the total elapsed time since the first call of :meth:`start` for the selected timer, otherwise return the elapsed time since the most recent call of :meth:`start` for which there has not been a corresponding call to :meth:`stop`. Returns ------- dlt : float Elapsed time """
return self.timer.elapsed(self.label, total=total)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach_keypress(fig, scaling=1.1): """ Attach a key press event handler that configures keys for closing a figure and changing the figure size. Keys 'e' and 'c' respectively expand and contract the figure, and key 'q' closes it. **Note:** Resizing may not function correctly with all matplotlib backends (a `bug <https://github.com/matplotlib/matplotlib/issues/10083>`__ has been reported). Parameters fig : :class:`matplotlib.figure.Figure` object Figure to which event handling is to be attached scaling : float, optional (default 1.1) Scaling factor for figure size changes Returns ------- press : function Key press event handler function """
def press(event): if event.key == 'q': plt.close(fig) elif event.key == 'e': fig.set_size_inches(scaling * fig.get_size_inches(), forward=True) elif event.key == 'c': fig.set_size_inches(fig.get_size_inches() / scaling, forward=True) # Avoid multiple event handlers attached to the same figure if not hasattr(fig, '_sporco_keypress_cid'): cid = fig.canvas.mpl_connect('key_press_event', press) fig._sporco_keypress_cid = cid return press
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach_zoom(ax, scaling=2.0): """ Attach an event handler that supports zooming within a plot using the mouse scroll wheel. Parameters ax : :class:`matplotlib.axes.Axes` object Axes to which event handling is to be attached scaling : float, optional (default 2.0) Scaling factor for zooming in and out Returns ------- zoom : function Mouse scroll wheel event handler function """
# See https://stackoverflow.com/questions/11551049 def zoom(event): # Get the current x and y limits cur_xlim = ax.get_xlim() cur_ylim = ax.get_ylim() # Get event location xdata = event.xdata ydata = event.ydata # Return if cursor is not over valid region of plot if xdata is None or ydata is None: return if event.button == 'up': # Deal with zoom in scale_factor = 1.0 / scaling elif event.button == 'down': # Deal with zoom out scale_factor = scaling # Get distance from the cursor to the edge of the figure frame x_left = xdata - cur_xlim[0] x_right = cur_xlim[1] - xdata y_top = ydata - cur_ylim[0] y_bottom = cur_ylim[1] - ydata # Calculate new x and y limits new_xlim = (xdata - x_left * scale_factor, xdata + x_right * scale_factor) new_ylim = (ydata - y_top * scale_factor, ydata + y_bottom * scale_factor) # Ensure that x limit range is no larger than that of the reference if np.diff(new_xlim) > np.diff(zoom.xlim_ref): new_xlim *= np.diff(zoom.xlim_ref) / np.diff(new_xlim) # Ensure that lower x limit is not less than that of the reference if new_xlim[0] < zoom.xlim_ref[0]: new_xlim += np.array(zoom.xlim_ref[0] - new_xlim[0]) # Ensure that upper x limit is not greater than that of the reference if new_xlim[1] > zoom.xlim_ref[1]: new_xlim -= np.array(new_xlim[1] - zoom.xlim_ref[1]) # Ensure that ylim tuple has the smallest value first if zoom.ylim_ref[1] < zoom.ylim_ref[0]: ylim_ref = zoom.ylim_ref[::-1] new_ylim = new_ylim[::-1] else: ylim_ref = zoom.ylim_ref # Ensure that y limit range is no larger than that of the reference if np.diff(new_ylim) > np.diff(ylim_ref): new_ylim *= np.diff(ylim_ref) / np.diff(new_ylim) # Ensure that lower y limit is not less than that of the reference if new_ylim[0] < ylim_ref[0]: new_ylim += np.array(ylim_ref[0] - new_ylim[0]) # Ensure that upper y limit is not greater than that of the reference if new_ylim[1] > ylim_ref[1]: new_ylim -= np.array(new_ylim[1] - ylim_ref[1]) # Return the ylim tuple to its original order if zoom.ylim_ref[1] < zoom.ylim_ref[0]: new_ylim = new_ylim[::-1] # Set new x and y limits ax.set_xlim(new_xlim) ax.set_ylim(new_ylim) # Force redraw ax.figure.canvas.draw() # Record reference x and y limits prior to any zooming zoom.xlim_ref = ax.get_xlim() zoom.ylim_ref = ax.get_ylim() # Get figure for specified axes and attach the event handler fig = ax.get_figure() fig.canvas.mpl_connect('scroll_event', zoom) return zoom
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_notebook_plotting(): """ Configure plotting functions for inline plotting within a Jupyter Notebook shell. This function has no effect when not within a notebook shell, and may therefore be used within a normal python script. """
# Check whether running within a notebook shell and have # not already monkey patched the plot function from sporco.util import in_notebook module = sys.modules[__name__] if in_notebook() and module.plot.__name__ == 'plot': # Set inline backend (i.e. %matplotlib inline) if in a notebook shell set_notebook_plot_backend() # Replace plot function with a wrapper function that discards # its return value (within a notebook with inline plotting, plots # are duplicated if the return value from the original function is # not assigned to a variable) plot_original = module.plot def plot_wrap(*args, **kwargs): plot_original(*args, **kwargs) module.plot = plot_wrap # Replace surf function with a wrapper function that discards # its return value (see comment for plot function) surf_original = module.surf def surf_wrap(*args, **kwargs): surf_original(*args, **kwargs) module.surf = surf_wrap # Replace contour function with a wrapper function that discards # its return value (see comment for plot function) contour_original = module.contour def contour_wrap(*args, **kwargs): contour_original(*args, **kwargs) module.contour = contour_wrap # Replace imview function with a wrapper function that discards # its return value (see comment for plot function) imview_original = module.imview def imview_wrap(*args, **kwargs): imview_original(*args, **kwargs) module.imview = imview_wrap # Disable figure show method (results in a warning if used within # a notebook with inline plotting) import matplotlib.figure def show_disable(self): pass matplotlib.figure.Figure.show = show_disable
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_vars(self, S, dimK): """Initalise variables required for sparse coding and dictionary update for training data `S`."""
Nv = S.shape[0:self.dimN] if self.cri is None or Nv != self.cri.Nv: self.cri = cr.CDU_ConvRepIndexing(self.dsz, S, dimK, self.dimN) if self.opt['CUDA_CBPDN']: if self.cri.Cd > 1 or self.cri.Cx > 1: raise ValueError('CUDA CBPDN solver can only be used for ' 'single channel problems') if self.cri.K > 1: raise ValueError('CUDA CBPDN solver can not be used with ' 'mini-batches') self.Df = sl.pyfftw_byte_aligned(sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)) self.Gf = sl.pyfftw_empty_aligned(self.Df.shape, self.Df.dtype) self.Z = sl.pyfftw_empty_aligned(self.cri.shpX, self.dtype) else: self.Df[:] = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def manage_itstat(self): """Compute, record, and display iteration statistics."""
# Extract and record iteration stats itst = self.iteration_stats() self.itstat.append(itst) self.display_status(self.fmtstr, itst)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_start(self): """Start status display if option selected."""
if self.opt['Verbose'] and self.opt['StatusHeader']: print(self.hdrstr) print("-" * self.nsep)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xstep(self, S, W, lmbda, dimK): """Solve CSC problem for training data `S`."""
if self.opt['CUDA_CBPDN']: Z = cuda.cbpdnmsk(self.D.squeeze(), S[..., 0], W.squeeze(), lmbda, self.opt['CBPDN']) Z = Z.reshape(self.cri.Nv + (1, 1, self.cri.M,)) self.Z[:] = np.asarray(Z, dtype=self.dtype) self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN) self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv, self.cri.axisN) self.xstep_itstat = None else: # Create X update object (external representation is expected!) xstep = cbpdn.ConvBPDNMaskDcpl(self.D.squeeze(), S, lmbda, W, self.opt['CBPDN'], dimK=dimK, dimN=self.cri.dimN) xstep.solve() self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv, self.cri.axisN) self.setcoef(xstep.getcoef()) self.xstep_itstat = xstep.itstat[-1] if xstep.itstat else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def keycmp(a, b, pth=()): """Recurse down the tree of nested dicts `b`, at each level checking that it does not have any keys that are not also at the same level in `a`. The key path is recorded in `pth`. If an unknown key is encountered in `b`, an `UnknownKeyError` exception is raised. If a non-dict value is encountered in `b` for which the corresponding value in `a` is a dict, an `InvalidValueError` exception is raised."""
akey = list(a.keys()) # Iterate over all keys in b for key in list(b.keys()): # If a key is encountered that is not in a, raise an # UnknownKeyError exception. if key not in akey: raise UnknownKeyError(pth + (key,)) else: # If corresponding values in a and b for the same key # are both dicts, recursively call this method for # those values. If the value in a is a dict and the # value in b is not, raise an InvalidValueError # exception. if isinstance(a[key], dict): if isinstance(b[key], dict): keycmp(a[key], b[key], pth + (key,)) else: raise InvalidValueError(pth + (key,))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, d): """Update the dict with the dict tree in parameter d. Parameters d : dict New dict content """
# Call __setitem__ for all keys in d for key in list(d.keys()): self.__setitem__(key, d[key])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self, key, value): """Check whether key,value pair is allowed. The key is allowed if there is a corresponding key in the defaults class attribute dict. The value is not allowed if it is a dict in the defaults dict and not a dict in value. Parameters key : str or tuple of str Dict key value : any Dict value corresponding to key """
# This test necessary to avoid unpickling errors in Python 3 if hasattr(self, 'dflt'): # Get corresponding node to self, as determined by pth # attribute, of the defaults dict tree a = self.__class__.getnode(self.dflt, self.pth) # Raise UnknownKeyError exception if key not in corresponding # node of defaults tree if key not in a: raise UnknownKeyError(self.pth + (key,)) # Raise InvalidValueError if the key value in the defaults # tree is a dict and the value parameter is not a dict and elif isinstance(a[key], dict) and not isinstance(value, dict): raise InvalidValueError(self.pth + (key,))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getparent(d, pth): """Get the parent node of a subdict as specified by the key path in `pth`. Parameters d : dict Dict tree in which access is required pth : str or tuple of str Dict key """
c = d for key in pth[:-1]: if not isinstance(c, dict): raise InvalidValueError(c) elif key not in c: raise UnknownKeyError(pth) else: c = c.__getitem__(key) return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def par_relax_AX(i): """Parallel implementation of relaxation if option ``RelaxParam`` != 1.0. """
global mp_X global mp_Xnr global mp_DX global mp_DXnr mp_Xnr[mp_grp[i]:mp_grp[i+1]] = mp_X[mp_grp[i]:mp_grp[i+1]] mp_DXnr[i] = mp_DX[i] if mp_rlx != 1.0: grpind = slice(mp_grp[i], mp_grp[i+1]) mp_X[grpind] = mp_rlx * mp_X[grpind] + (1-mp_rlx)*mp_Y1[grpind] mp_DX[i] = mp_rlx*mp_DX[i] + (1-mp_rlx)*mp_Y0[i]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def par_final_stepgrp(i): """The parallel step grouping of the final iteration in solve. A cyclic permutation of the steps is done to require only one merge per iteration, requiring unique initial and final step groups. Parameters i : int Index of grouping to update """
par_y0bstep(i) par_y1step(i) par_u0step(i) par_u1step(i)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def par_compute_residuals(i): """Compute components of the residual and stopping thresholds that can be done in parallel. Parameters i : int Index of group to compute """
# Compute the residuals in parallel, need to check if the residuals # depend on alpha global mp_ry0 global mp_ry1 global mp_sy0 global mp_sy1 global mp_nrmAx global mp_nrmBy global mp_nrmu mp_ry0[i] = np.sum((mp_DXnr[i] - mp_Y0[i])**2) mp_ry1[i] = mp_alpha**2*np.sum((mp_Xnr[mp_grp[i]:mp_grp[i+1]]- mp_Y1[mp_grp[i]:mp_grp[i+1]])**2) mp_sy0[i] = np.sum((mp_Y0old[i] - mp_Y0[i])**2) mp_sy1[i] = mp_alpha**2*np.sum((mp_Y1old[mp_grp[i]:mp_grp[i+1]]- mp_Y1[mp_grp[i]:mp_grp[i+1]])**2) mp_nrmAx[i] = np.sum(mp_DXnr[i]**2) + mp_alpha**2 * np.sum( mp_Xnr[mp_grp[i]:mp_grp[i+1]]**2) mp_nrmBy[i] = np.sum(mp_Y0[i]**2) + mp_alpha**2 * np.sum( mp_Y1[mp_grp[i]:mp_grp[i+1]]**2) mp_nrmu[i] = np.sum(mp_U0[i]**2) + np.sum(mp_U1[mp_grp[i]:mp_grp[i+1]]**2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_pool(self): """Initialize multiprocessing pool if necessary."""
# initialize the pool if needed if self.pool is None: if self.nproc > 1: self.pool = mp.Pool(processes=self.nproc) else: self.pool = None else: print('pool already initialized?')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distribute(self, f, n): """Distribute the computations amongst the multiprocessing pools Parameters f : function Function to be distributed to the processors n : int The values in range(0,n) will be passed as arguments to the function f. """
if self.pool is None: return [f(i) for i in range(n)] else: return self.pool.map(f, range(n))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def terminate_pool(self): """Terminate and close the multiprocessing pool if necessary."""
if self.pool is not None: self.pool.terminate() self.pool.join() del(self.pool) self.pool = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eval_objfn(self): """Compute components of regularisation function as well as total objective function. """
dfd = self.obfn_dfd() prj = sp.proj_l1(self.obfn_gvar(), self.gamma, axis=self.cri.axisN + (self.cri.axisC, self.cri.axisM)) cns = np.linalg.norm(prj - self.obfn_gvar()) return (dfd, cns)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ystep(self): """This method is inserted into the inner cbpdn object, replacing its own ystep method, thereby providing a hook for applying the additional steps necessary for the AMS method. """
# Extract AMS part of ystep argument so that it is not # affected by the main part of the ystep amidx = self.index_addmsk() Yi = self.cbpdn.AX[amidx] + self.cbpdn.U[amidx] # Perform main part of ystep from inner cbpdn object self.inner_ystep() # Apply mask to AMS component and insert into Y from inner # cbpdn object Yi[np.where(self.W.astype(np.bool))] = 0.0 self.cbpdn.Y[amidx] = Yi
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def obfn_gvar(self): """This method is inserted into the inner cbpdn object, replacing its own obfn_gvar method, thereby providing a hook for applying the additional steps necessary for the AMS method. """
# Get inner cbpdn object gvar gv = self.inner_obfn_gvar().copy() # Set slice corresponding to the coefficient map of the final # filter (the impulse inserted for the AMS method) to zero so # that it does not affect the results (e.g. l1 norm) computed # from this variable by the inner cbpdn object gv[..., -self.cri.Cd:] = 0 return gv
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(self): """Call the solve method of the inner cbpdn object and return the result. """
# Call solve method of inner cbpdn object Xi = self.cbpdn.solve() # Copy attributes from inner cbpdn object self.timer = self.cbpdn.timer self.itstat = self.cbpdn.itstat # Return result of inner cbpdn object return Xi
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reconstruct(self, b, X=None): """Reconstruct representation of signal b in signal set."""
if X is None: X = self.getcoef() Xf = sl.rfftn(X, None, self.cbpdn.cri.axisN) slc = (slice(None),)*self.dimN + \ (slice(self.chncs[b], self.chncs[b+1]),) Sf = np.sum(self.cbpdn.Df[slc] * Xf, axis=self.cbpdn.cri.axisM) return sl.irfftn(Sf, self.cbpdn.cri.Nv, self.cbpdn.cri.axisN)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fix_dynamic_class_lookup(cls, pstfx): """Fix name lookup problem that prevents pickling of dynamically defined classes. Parameters cls : class Dynamically generated class to which fix is to be applied pstfx : string Postfix that can be used to identify dynamically generated classes that are equivalent by construction """
# Extended name for the class that will be added to the module namespace extnm = '_' + cls.__name__ + '_' + pstfx # Get the module in which the dynamic class is defined mdl = sys.modules[cls.__module__] # Allow lookup of the dynamically generated class within the module via # its extended name setattr(mdl, extnm, cls) # Change the dynamically generated class name to the extended name if hasattr(cls, '__qualname__'): cls.__qualname__ = extnm else: cls.__name__ = extnm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_status_str(hdrlbl, fmtmap=None, fwdth0=4, fwdthdlt=6, fprec=2): """Construct header and format details for status display of an iterative solver. Parameters hdrlbl : tuple of strings Tuple of field header strings fmtmap : dict or None, optional (default None) A dict providing a mapping from field header strings to print format strings, providing a mechanism for fields with print formats that depart from the standard format fwdth0 : int, optional (default 4) Number of characters in first field formatted for integers fwdthdlt : int, optional (default 6) The width of fields formatted for floats is the sum of the value of this parameter and the field precision fprec : int, optional (default 2) Precision of fields formatted for floats Returns ------- hdrstr : string Complete header string fmtstr : string Complete print formatting string for numeric values nsep : integer Number of characters in separator string """
if fmtmap is None: fmtmap = {} fwdthn = fprec + fwdthdlt # Construct a list specifying the format string for each field. # Use format string from fmtmap if specified, otherwise use # a %d specifier with field width fwdth0 for the first field, # or a %e specifier with field width fwdthn and precision # fprec fldfmt = [fmtmap[lbl] if lbl in fmtmap else (('%%%dd' % (fwdth0)) if idx == 0 else (('%%%d.%de' % (fwdthn, fprec)))) for idx, lbl in enumerate(hdrlbl)] fmtstr = (' ').join(fldfmt) # Construct a list of field widths for each field by extracting # field widths from field format strings cre = re.compile(r'%-?(\d+)') fldwid = [] for fmt in fldfmt: mtch = cre.match(fmt) if mtch is None: raise ValueError("Format string '%s' does not contain field " "width" % fmt) else: fldwid.append(int(mtch.group(1))) # Construct list of field header strings formatted to the # appropriate field width, and join to construct a combined field # header string hdrlst = [('%-*s' % (w, t)) for t, w in zip(hdrlbl, fldwid)] hdrstr = (' ').join(hdrlst) return hdrstr, fmtstr, len(hdrstr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_attr(self, name, val, dval=None, dtype=None, reset=False): """Set an object attribute by its name. The attribute value can be specified as a primary value `val`, and as default value 'dval` that will be used if the primary value is None. This arrangement allows an attribute to be set from an entry in an options object, passed as `val`, while specifying a default value to use, passed as `dval` in the event that the options entry is None. Unless `reset` is True, the attribute is only set if it doesn't exist, or if it exists with value None. This arrangement allows for attributes to be set in both base and derived class initialisers, with the derived class value taking preference. Parameters name : string Attribute name val : any Primary attribute value dval : any Default attribute value in case `val` is None dtype : data-type, optional (default None) If the `dtype` parameter is not None, the attribute `name` is set to `val` (which is assumed to be of numeric type) after conversion to the specified type. reset : bool, optional (default False) Flag indicating whether attribute assignment should be conditional on the attribute not existing or having value None. If False, an attribute value other than None will not be overwritten. """
# If `val` is None and `dval` is not None, replace it with dval if dval is not None and val is None: val = dval # If dtype is not None, assume val is numeric and convert it to # type dtype if dtype is not None and val is not None: if isinstance(dtype, type): val = dtype(val) else: val = dtype.type(val) # Set attribute value depending on reset flag and whether the # attribute exists and is None if reset or not hasattr(self, name) or \ (hasattr(self, name) and getattr(self, name) is None): setattr(self, name, val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_rank_limits(comm, arrlen): """Determine the chunk of the grid that has to be computed per process. The grid has been 'flattened' and has arrlen length. The chunk assigned to each process depends on its rank in the MPI communicator. Parameters comm : MPI communicator object Describes topology of network: number of processes, rank arrlen : int Number of points in grid search. Returns ------- begin : int Index, with respect to 'flattened' grid, where the chunk for this process starts. end : int Index, with respect to 'flattened' grid, where the chunk for this process ends. """
rank = comm.Get_rank() # Id of this process size = comm.Get_size() # Total number of processes in communicator end = 0 # The scan should be done with ints, not floats ranklen = int(arrlen / size) if rank < arrlen % size: ranklen += 1 # Compute upper limit based on the sizes covered by the processes # with less rank end = comm.scan(sendobj=ranklen, op=MPI.SUM) begin = end - ranklen return (begin, end)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def relax_AX(self): """The parent class method that this method overrides only implements the relaxation step for the variables of the baseline consensus algorithm. This method calls the overridden method and then implements the relaxation step for the additional variables required for the mask decoupling modification to the baseline algorithm. """
super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX() self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf), axis=self.cri.axisM), self.cri.Nv, self.cri.axisN) if self.rlx == 1.0: self.AX1 = self.AX1nr else: alpha = self.rlx self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xstep(self): """The xstep of the baseline consensus class from which this class is derived is re-used to implement the xstep of the modified algorithm by replacing ``self.ZSf``, which is constant in the baseline algorithm, with a quantity derived from the additional variables ``self.Y1`` and ``self.U1``. It is also necessary to set the penalty parameter to unity for the duration of the x step. """
self.YU1[:] = self.Y1 - self.U1 self.ZSf = np.conj(self.Zf) * (self.Sf + sl.rfftn( self.YU1, None, self.cri.axisN)) rho = self.rho self.rho = 1.0 super(ConvCnstrMODMaskDcpl_Consensus, self).xstep() self.rho = rho
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_residuals(self): """Compute residuals and stopping thresholds. The parent class method is overridden to ensure that the residual calculations include the additional variables introduced in the modification to the baseline algorithm. """
# The full primary residual is straightforward to compute from # the primary residuals for the baseline algorithm and for the # additional variables r0 = self.rsdl_r(self.AXnr, self.Y) r1 = self.AX1nr - self.Y1 - self.S r = np.sqrt(np.sum(r0**2) + np.sum(r1**2)) # The full dual residual is more complicated to compute than the # full primary residual ATU = self.swapaxes(self.U) + sl.irfftn( np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN), self.cri.Nv, self.cri.axisN) s = self.rho * np.linalg.norm(ATU) # The normalisation factor for the full primal residual is also not # straightforward nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 + np.linalg.norm(self.AX1nr)**2) nY = np.sqrt(np.linalg.norm(self.Y)**2 + np.linalg.norm(self.Y1)**2) rn = max(nAX, nY, np.linalg.norm(self.S)) # The normalisation factor for the full dual residual is # straightforward to compute sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 + np.linalg.norm(self.U1)**2) # Final residual values and stopping tolerances depend on # whether standard or normalised residuals are specified via the # options object if self.opt['AutoRho', 'StdResiduals']: epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \ rn*self.opt['RelStopTol'] edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \ sn*self.opt['RelStopTol'] else: if rn == 0.0: rn = 1.0 if sn == 0.0: sn = 1.0 r /= rn s /= sn epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \ self.opt['RelStopTol'] edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \ self.opt['RelStopTol'] return r, s, epri, edua
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def obfn_fvar(self): """Variable to be evaluated in computing regularisation term, depending on 'fEvalX' option value. """
if self.opt['fEvalX']: return self.X else: return self.cnst_c() - self.cnst_B(self.Y)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalise(v): """Normalise columns of matrix. Parameters v : array_like Array with columns to be normalised Returns ------- vnrm : ndarray Normalised array """
vn = np.sqrt(np.sum(v**2, 0)) vn[vn == 0] = 1.0 return np.asarray(v / vn, dtype=v.dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rhochange(self): """Re-factorise matrix when rho changes"""
self.lu, self.piv = sl.lu_factor(self.Z, self.rho) self.lu = np.asarray(self.lu, dtype=self.dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cupy_wrapper(func): """A wrapper function that converts numpy ndarray arguments to cupy arrays, and convert any cupy arrays returned by the wrapped function into numpy ndarrays. """
@functools.wraps(func) def wrapped(*args, **kwargs): args = list(args) for n, a in enumerate(args): if isinstance(a, np.ndarray): args[n] = cp.asarray(a) for k, v in kwargs.items(): if isinstance(v, np.ndarray): kwargs[k] = cp.asarray(v) rtn = func(*args, **kwargs) if isinstance(rtn, (list, tuple)): for n, a in enumerate(rtn): if isinstance(a, cp.core.core.ndarray): rtn[n] = cp.asnumpy(a) else: if isinstance(rtn, cp.core.core.ndarray): rtn = cp.asnumpy(rtn) return rtn return wrapped
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def block_sep1(self, Y): """Separate variable into component corresponding to Y1 in Y."""
Y1 = Y[..., self.cri.M:] # If cri.Cd > 1 (multi-channel dictionary), we need to undo the # reshape performed in block_cat if self.cri.Cd > 1: shp = list(Y1.shape) shp[self.cri.axisM] = self.cri.dimN shp[self.cri.axisC] = self.cri.Cd Y1 = Y1.reshape(shp) # Axes are swapped here for similar reasons to those # motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_sep0 Y1 = np.swapaxes(Y1[..., np.newaxis], self.cri.axisM, -1) return Y1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def block_cat(self, Y0, Y1): """Concatenate components corresponding to Y0 and Y1 blocks into Y. """
# Axes are swapped here for similar reasons to those # motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_cat Y1sa = np.swapaxes(Y1, self.cri.axisM, -1)[..., 0] # If cri.Cd > 1 (multi-channel dictionary) Y0 has a singleton # channel axis but Y1 has a non-singleton channel axis. To make # it possible to concatenate Y0 and Y1, we reshape Y1 by a # partial ravel of axisM and axisC onto axisM. if self.cri.Cd > 1: shp = list(Y1sa.shape) shp[self.cri.axisM] *= shp[self.cri.axisC] shp[self.cri.axisC] = 1 Y1sa = Y1sa.reshape(shp) return np.concatenate((Y0, Y1sa), axis=self.cri.axisM)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def obfn_g0var(self): """Variable to be evaluated in computing the TV regularisation term, depending on the ``gEvalY`` option value. """
# Use of self.block_sep0(self.AXnr) instead of self.cnst_A0(self.X) # reduces number of calls to self.cnst_A0 return self.var_y0() if self.opt['gEvalY'] else \ self.block_sep0(self.AXnr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rhochange(self): """Re-factorise matrix when rho changes."""
self.lu, self.piv = sl.cho_factor(self.D, self.rho) self.lu = np.asarray(self.lu, dtype=self.dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rhochange(self): """Action to be taken when rho parameter is changed."""
self.Gamma = 1.0 / (1.0 + (self.lmbda/self.rho)*(self.Alpha**2))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpu_info(): """Return a list of namedtuples representing attributes of each GPU device. """
GPUInfo = namedtuple('GPUInfo', ['name', 'driver', 'totalmem', 'freemem']) gpus = GPUtil.getGPUs() info = [] for g in gpus: info.append(GPUInfo(g.name, g.driver, g.memoryTotal, g.memoryFree)) return info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpu_load(wproc=0.5, wmem=0.5): """Return a list of namedtuples representing the current load for each GPU device. The processor and memory loads are fractions between 0 and 1. The weighted load represents a weighted average of processor and memory loads using the parameters `wproc` and `wmem` respectively. """
GPULoad = namedtuple('GPULoad', ['processor', 'memory', 'weighted']) gpus = GPUtil.getGPUs() load = [] for g in gpus: wload = (wproc * g.load + wmem * g.memoryUtil) / (wproc + wmem) load.append(GPULoad(g.load, g.memoryUtil, wload)) return load
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def device_by_load(wproc=0.5, wmem=0.5): """Get a list of GPU device ids ordered by increasing weighted average of processor and memory load. """
gl = gpu_load(wproc=wproc, wmem=wmem) # return np.argsort(np.asarray(gl)[:, -1]).tolist() return [idx for idx, load in sorted(enumerate( [g.weighted for g in gl]), key=(lambda x: x[1]))]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_device_by_load(wproc=0.5, wmem=0.5): """Set the current device for cupy as the device with the lowest weighted average of processor and memory load. """
ids = device_by_load(wproc=wproc, wmem=wmem) cp.cuda.Device(ids[0]).use() return ids[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_module(name): """Load the named module without registering it in ``sys.modules``. Parameters name : string Module name Returns ------- mod : module Loaded module """
spec = importlib.util.find_spec(name) mod = importlib.util.module_from_spec(spec) mod.__spec__ = spec mod.__loader__ = spec.loader spec.loader.exec_module(mod) return mod
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patch_module(name, pname, pfile=None, attrib=None): """Create a patched copy of the named module and register it in ``sys.modules``. Parameters name : string Name of source module pname : string Name of patched copy of module pfile : string or None, optional (default None) Value to assign as source file name of patched module attrib : dict or None, optional (default None) Dict of attribute names and values to assign to patched module Returns ------- mod : module Patched module """
if attrib is None: attrib = {} spec = importlib.util.find_spec(name) spec.name = pname if pfile is not None: spec.origin = pfile spec.loader.name = pname mod = importlib.util.module_from_spec(spec) mod.__spec__ = spec mod.__loader__ = spec.loader sys.modules[pname] = mod spec.loader.exec_module(mod) for k, v in attrib.items(): setattr(mod, k, v) return mod
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sporco_cupy_patch_module(name, attrib=None): """Create a copy of the named sporco module, patch it to replace numpy with cupy, and register it in ``sys.modules``. Parameters name : string Name of source module attrib : dict or None, optional (default None) Dict of attribute names and values to assign to patched module Returns ------- mod : module Patched module """
# Patched module name is constructed from source module name # by replacing 'sporco.' with 'sporco.cupy.' pname = re.sub('^sporco.', 'sporco.cupy.', name) # Attribute dict always maps cupy module to 'np' attribute in # patched module if attrib is None: attrib = {} attrib.update({'np': cp}) # Create patched module mod = patch_module(name, pname, pfile='patched', attrib=attrib) mod.__spec__.has_location = False return mod
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _list2array(lst): """Convert a list to a numpy array."""
if lst and isinstance(lst[0], cp.ndarray): return cp.hstack(lst) else: return cp.asarray(lst)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort_by_list_order(sortlist, reflist, reverse=False, fltr=False, slemap=None): """ Sort a list according to the order of entries in a reference list. Parameters sortlist : list List to be sorted reflist : list Reference list defining sorting order reverse : bool, optional (default False) Flag indicating whether to sort in reverse order fltr : bool, optional (default False) Flag indicating whether to filter `sortlist` to remove any entries that are not in `reflist` slemap : function or None, optional (default None) Function mapping a sortlist entry to the form of an entry in `reflist` Returns ------- sortedlist : list Sorted (and possibly filtered) version of sortlist """
def keyfunc(entry): if slemap is not None: rle = slemap(entry) if rle in reflist: # Ordering index taken from reflist return reflist.index(rle) else: # Ordering index taken from sortlist, offset # by the length of reflist so that entries # that are not in reflist retain their order # in sortlist return sortlist.index(entry) + len(reflist) if fltr: if slemap: sortlist = filter(lambda x: slemap(x) in reflist, sortlist) else: sortlist = filter(lambda x: x in reflist, sortlist) return sorted(sortlist, key=keyfunc, reverse=reverse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_module_classes(module): """ Get a list of module member classes. Parameters module : string or module object Module for which member list is to be generated Returns ------- mbrlst : list List of module functions """
clslst = get_module_members(module, type=inspect.isclass) return list(filter(lambda cls: not issubclass(cls, Exception), clslst))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_module_docs(pkgname, modpath, tmpltpath, outpath): """ Write the autosummary style docs for the specified package. Parameters pkgname : string Name of package to document modpath : string Path to package source root directory tmpltpath : string Directory path for autosummary template files outpath : string Directory path for RST output files """
dw = DocWriter(outpath, tmpltpath) modlst = get_module_names(modpath, pkgname) print('Making api docs:', end='') for modname in modlst: # Don't generate docs for cupy or cuda subpackages if 'cupy' in modname or 'cuda' in modname: continue try: mod = importlib.import_module(modname) except ModuleNotFoundError: print('Error importing module %s' % modname) continue # Skip any virtual modules created by the copy-and-patch # approach in sporco.cupy. These should already have been # skipped due to the test for cupy above. if mod.__file__ == 'patched': continue # Construct api docs for the current module if the docs file # does not exist, or if its source file has been updated more # recently than an existing docs file if hasattr(mod, '__path__'): srcpath = mod.__path__[0] else: srcpath = mod.__file__ dstpath = os.path.join(outpath, modname + '.rst') if is_newer_than(srcpath, dstpath): print(' %s' % modname, end='') dw.write(mod) print('')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, module): """ Write the RST source document for generating the docs for a specified module. Parameters module : module object Module for which member list is to be generated """
modname = module.__name__ # Based on code in generate_autosummary_docs in https://git.io/fxpJS ns = {} ns['members'] = dir(module) ns['functions'] = list(map(lambda x: x.__name__, get_module_functions(module))) ns['classes'] = list(map(lambda x: x.__name__, get_module_classes(module))) ns['exceptions'] = list(map(lambda x: x.__name__, get_module_exceptions(module))) ns['fullname'] = modname ns['module'] = modname ns['objname'] = modname ns['name'] = modname.split('.')[-1] ns['objtype'] = 'module' ns['underline'] = len(modname) * '=' rndr = self.template.render(**ns) rstfile = os.path.join(self.outpath, modname + '.rst') with open(rstfile, 'w') as f: f.write(rndr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def isxmap(xmethod, opt): """Return ``isxmap`` argument for ``.IterStatsConfig`` initialiser. """
if xmethod == 'admm': isx = {'XPrRsdl': 'PrimalRsdl', 'XDlRsdl': 'DualRsdl', 'XRho': 'Rho'} else: isx = {'X_F_Btrack': 'F_Btrack', 'X_Q_Btrack': 'Q_Btrack', 'X_ItBt': 'IterBTrack', 'X_L': 'L', 'X_Rsdl': 'Rsdl'} if not opt['AccurateDFid']: isx.update(evlmap(True)) return isx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def isfld(xmethod, dmethod, opt): """Return ``isfld`` argument for ``.IterStatsConfig`` initialiser. """
fld = ['Iter', 'ObjFun', 'DFid', 'RegL1', 'Cnstr'] if xmethod == 'admm': fld.extend(['XPrRsdl', 'XDlRsdl', 'XRho']) else: if opt['CBPDN', 'BackTrack', 'Enabled']: fld.extend(['X_F_Btrack', 'X_Q_Btrack', 'X_ItBt', 'X_L', 'X_Rsdl']) else: fld.extend(['X_L', 'X_Rsdl']) if dmethod != 'fista': fld.extend(['DPrRsdl', 'DDlRsdl', 'DRho']) else: if opt['CCMOD', 'BackTrack', 'Enabled']: fld.extend(['D_F_Btrack', 'D_Q_Btrack', 'D_ItBt', 'D_L', 'D_Rsdl']) else: fld.extend(['D_L', 'D_Rsdl']) fld.append('Time') return fld
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iterstats(self, j, t, isx, isd, evl): """Construct IterationStats namedtuple from X step and D step IterationStats namedtuples. Parameters j : int Iteration number t : float Iteration time isx : namedtuple IterationStats namedtuple from X step object isd : namedtuple IterationStats namedtuple from D step object evl : dict Dict associating result labels with values computed by :meth:`DictLearn.evaluate` """
vlst = [] # Iterate over the fields of the IterationStats namedtuple # to be populated with values. If a field name occurs as a # key in the isxmap dictionary, use the corresponding key # value as a field name in the isx namedtuple for the X # step object and append the value of that field as the # next value in the IterationStats namedtuple under # construction. The isdmap dictionary is handled # correspondingly with respect to the isd namedtuple for # the D step object. There are also two reserved field # names, 'Iter' and 'Time', referring respectively to the # iteration number and run time of the dictionary learning # algorithm. for fnm in self.IterationStats._fields: if fnm in self.isxmap: vlst.append(getattr(isx, self.isxmap[fnm])) elif fnm in self.isdmap: vlst.append(getattr(isd, self.isdmap[fnm])) elif fnm in self.evlmap: vlst.append(evl[fnm]) elif fnm == 'Iter': vlst.append(j) elif fnm == 'Time': vlst.append(t) else: vlst.append(None) return self.IterationStats._make(vlst)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printiterstats(self, itst): """Print iteration statistics. Parameters itst : namedtuple IterationStats namedtuple as returned by :meth:`iterstats` """
itdsp = tuple([getattr(itst, self.hdrmap[col]) for col in self.hdrtxt]) print(self.fmtstr % itdsp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def norm_nuclear(X): r"""Compute the nuclear norm .. math:: \| X \|_* = \sum_i \sigma_i where :math:`\sigma_i` are the singular values of matrix :math:`X`. Parameters X : array_like Input array :math:`X` Returns ------- nncl : float Nuclear norm of `X` """
return np.sum(np.linalg.svd(sl.promote16(X), compute_uv=False))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deprecate(func): """ A deprecation warning emmiter as a decorator. """
@wraps(func) def wrapper(*args, **kwargs): warn("Deprecated, this will be removed in the future", DeprecationWarning) return func(*args, **kwargs) wrapper.__doc__ = "Deprecated.\n" + (wrapper.__doc__ or "") return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_module_names(package_path, pattern="lazy_*.py*"): """ All names in the package directory that matches the given glob, without their extension. Repeated names should appear only once. """
package_contents = glob(os.path.join(package_path[0], pattern)) relative_path_names = (os.path.split(name)[1] for name in package_contents) no_ext_names = (os.path.splitext(name)[0] for name in relative_path_names) return sorted(set(no_ext_names))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_modules(package_name, module_names): """ List of module objects from the package, keeping the name order. """
def get_module(name): return __import__(".".join([package_name, name]), fromlist=[package_name]) return [get_module(name) for name in module_names]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def docstring_with_summary(docstring, pairs, key_header, summary_type): """ Return a string joining the docstring with the pairs summary table. """
return "\n".join( [docstring, "Summary of {}:".format(summary_type), ""] + summary_table(pairs, key_header) + [""] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def memoize(func): """ Decorator for unerasable memoization based on function arguments, for functions without keyword arguments. """
class Memoizer(dict): def __missing__(self, args): val = func(*args) self[args] = val return val memory = Memoizer() @wraps(func) def wrapper(*args): return memory[args] return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_to_16bit_wave_file(fname, sig, rate): """ Save a given signal ``sig`` to file ``fname`` as a 16-bit one-channel wave with the given ``rate`` sample rate. """
with closing(wave.open(fname, "wb")) as wave_file: wave_file.setnchannels(1) wave_file.setsampwidth(2) wave_file.setframerate(rate) for chunk in chunks((clip(sig) * 2 ** 15).map(int), dfmt="h", padval=0): wave_file.writeframes(chunk)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new_note_track(env, synth): """ Audio track with the frequencies. Parameters env: Envelope Stream (which imposes the duration). synth: One-argument function that receives a frequency (in rad/sample) and returns a Stream instance (a synthesized note). Returns ------- Endless Stream instance that joins synthesized notes. """
list_env = list(env) return chain.from_iterable(synth(freq) * list_env for freq in freq_gen())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tostream(func, module_name=None): """ Decorator to convert the function output into a Stream. Useful for generator functions. Note ---- Always use the ``module_name`` input when "decorating" a function that was defined in other module. """
@wraps(func) def new_func(*args, **kwargs): return Stream(func(*args, **kwargs)) if module_name is not None: new_func.__module__ = module_name return new_func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def thub(data, n): """ Tee or "T" hub auto-copier to help working with Stream instances as well as with numbers. Parameters data : Input to be copied. Can be anything. n : Number of copies. Returns ------- A StreamTeeHub instance, if input data is iterable. The data itself, otherwise. Examples -------- With numbers: 0.0 Combining number with iterable: [0.5, 0.2, 0.0] [0.6, 0.2, -0.2] [-0.2, 0.5, -0.5, 0.0] This function can also be used as a an alternative to the Stream constructor when your function has only one parameter, to avoid casting when that's not needed: 250 [500, 500, 500, 500, 500] """
return StreamTeeHub(data, n) if isinstance(data, Iterable) else data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def blocks(self, *args, **kwargs): """ Interface to apply audiolazy.blocks directly in a stream, returning another stream. Use keyword args. """
return Stream(blocks(iter(self), *args, **kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def take(self, n=None, constructor=list): """ Returns a container with the n first elements from the Stream, or less if there aren't enough. Use this without args if you need only one element outside a list. Parameters n : Number of elements to be taken. Defaults to None. Rounded when it's a float, and this can be ``inf`` for taking all. constructor : Container constructor function that can receie a generator as input. Defaults to ``list``. Returns ------- The first ``n`` elements of the Stream sequence, created by the given constructor unless ``n == None``, which means returns the next element from the sequence outside any container. If ``n`` is None, this can raise StopIteration due to lack of data in the Stream. When ``n`` is a number, there's no such exception. Examples -------- [5, 5, 5] 1.2 [1.2] (1.2,) [1, 2] Traceback (most recent call last): StopIteration Taking rounded float quantities and "up to infinity" elements (don't try using ``inf`` with endless Stream instances): [4, 3, 2] [4, 3, 2, 3] [4, 3, 2, 3, 2] See Also -------- Stream.peek : Returns the n first elements from the Stream, without removing them. Note ---- You should avoid using take() as if this would be an iterator. Streams are iterables that can be easily part of a "for" loop, and their iterators (the ones automatically used in for loops) are slightly faster. Use iter() builtin if you need that, instead, or perhaps the blocks method. """
if n is None: return next(self._data) if isinf(n) and n > 0: return constructor(self._data) if isinstance(n, float): n = rint(n) if n > 0 else 0 # So this works with -inf and nan return constructor(next(self._data) for _ in xrange(n))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def skip(self, n): """ Throws away the first ``n`` values from the Stream. Note ---- Performs the evaluation lazily, i.e., the values are thrown away only after requesting the next value. """
def skipper(data): for _ in xrange(int(round(n))): next(data) for el in data: yield el self._data = skipper(self._data) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def limit(self, n): """ Enforces the Stream to finish after ``n`` items. """
data = self._data self._data = (next(data) for _ in xrange(int(round(n)))) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(self, func): """ A lazy way to skip elements in the stream that gives False for the given function. """
self._data = xfilter(func, self._data) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def accumulate(iterable): " Return series of accumulated sums. " iterator = iter(iterable) sum_data = next(iterator) yield sum_data for el in iterator: sum_data += el yield sum_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tee(data, n=2): """ Tee or "T" copy to help working with Stream instances as well as with numbers. Parameters data : Input to be copied. Can be anything. n : Size of returned tuple. Defaults to 2. Returns ------- Tuple of n independent Stream instances, if the input is a Stream or an iterator, otherwise a tuple with n times the same object. See Also -------- thub : use Stream instances *almost* like constants in your equations. """
if isinstance(data, (Stream, Iterator)): return tuple(Stream(cp) for cp in it.tee(data, n)) else: return tuple(data for unused in xrange(n))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def factorial(n): """ Factorial function that works with really big numbers. """
if isinstance(n, float): if n.is_integer(): n = int(n) if not isinstance(n, INT_TYPES): raise TypeError("Non-integer input (perhaps you need Euler Gamma " "function or Gauss Pi function)") if n < 0: raise ValueError("Input shouldn't be negative") return reduce(operator.mul, it.takewhile(lambda m: m <= n, it.count(2)), 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mgl_seq(x): """ Sequence whose sum is the Madhava-Gregory-Leibniz series. Returns ------- An endless sequence that has the property ``atan(x) = sum(mgl_seq(x))``. Usually you would use the ``atan()`` function, not this one. """
odd_numbers = thub(count(start=1, step=2), 2) return Stream(1, -1) * x ** odd_numbers / odd_numbers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def atan_mgl(x, n=10): """ Finds the arctan using the Madhava-Gregory-Leibniz series. """
acc = 1 / (1 - z ** -1) # Accumulator filter return acc(mgl_seq(x)).skip(n-1).take()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lagrange(pairs): """ Waring-Lagrange interpolator function. Parameters pairs : Iterable with pairs (tuples with two values), corresponding to points ``(x, y)`` of the function. Returns ------- A function that returns the interpolator result for a given ``x``. """
prod = lambda args: reduce(operator.mul, args) xv, yv = xzip(*pairs) return lambda k: sum( yv[j] * prod( (k - rk) / (rj - rk) for rk in xv if rj != rk ) for j, rj in enumerate(xv) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resample(sig, old=1, new=1, order=3, zero=0.): """ Generic resampler based on Waring-Lagrange interpolators. Parameters sig : Input signal (any iterable). old : Time duration reference (defaults to 1, allowing percentages to the ``new`` keyword argument). This can be float number, or perhaps a Stream instance. new : Time duration that the reference will have after resampling. For example, if ``old = 1, new = 2``, then there will be 2 samples yielded for each sample from input. This can be a float number, or perhaps a Stream instance. order : Lagrange interpolator order. The amount of neighboring samples to be used by the interpolator is ``order + 1``. zero : The input should be thought as zero-padded from the left with this value. Returns ------- The first value will be the first sample from ``sig``, and then the interpolator will find the next samples towards the end of the ``sig``. The actual sampling interval (or time step) for this interpolator obeys to the ``old / new`` relationship. Hint ---- The time step can also be time-varying, although that's certainly difficult to synchonize (one sample is needed for each output sample). Perhaps the best approach for this case would be a ControlStream keeping the desired value at any time. Note ---- The input isn't zero-padded at right. It means that the last output will be one with interpolated with known data. For endless inputs that's ok, this makes no difference, but for finite inputs that may be undesirable. """
sig = Stream(sig) threshold = .5 * (order + 1) step = old / new data = deque([zero] * (order + 1), maxlen=order + 1) data.extend(sig.take(rint(threshold))) idx = int(threshold) isig = iter(sig) if isinstance(step, Iterable): step = iter(step) while True: yield lagrange(enumerate(data))(idx) idx += next(step) while idx > threshold: data.append(next(isig)) idx -= 1 else: while True: yield lagrange(enumerate(data))(idx) idx += step while idx > threshold: data.append(next(isig)) idx -= 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_polynomial(self): """ Tells whether it is a linear combination of natural powers of ``x``. """
return all(isinstance(k, INT_TYPES) and k >= 0 for k in self._data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def order(self): """ Finds the polynomial order. Examples -------- 1 18 0 Traceback (most recent call last): AttributeError: Power needs to be positive integers """
if not self.is_polynomial(): raise AttributeError("Power needs to be positive integers") return max(key for key in self._data) if self._data else 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def integrate(self): """ Integrate without adding an integration constant. """
if -1 in self._data: raise ValueError("Unable to integrate term that powers to -1") return Poly(OrderedDict((k + 1, v / (k + 1)) for k, v in iteritems(self._data)), zero=self.zero)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def roots(self): """ Returns a list with all roots. Needs Numpy. """
import numpy as np return np.roots(list(self.values())[::-1]).tolist()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _exec_eval(data, expr): """ Internal function to isolate an exec. Executes ``data`` and returns the ``expr`` evaluation afterwards. """
ns = {} exec(data, ns) return eval(expr, ns)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def highpass(cutoff): """ This strategy uses an exponential approximation for cut-off frequency calculation, found by matching the one-pole Laplace lowpass filter and mirroring the resulting filter to get a highpass. """
R = thub(exp(cutoff - pi), 2) return (1 - R) / (1 + R * z ** -1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lowpass(cutoff): """ This strategy uses an exponential approximation for cut-off frequency calculation, found by matching the single pole and single zero Laplace highpass filter and mirroring the resulting filter to get a lowpass. """
R = thub(exp(cutoff - pi), 2) G = (R + 1) / 2 return G * (1 + z ** -1) / (1 + R * z ** -1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def highpass(cutoff): """ This strategy uses an exponential approximation for cut-off frequency calculation, found by matching the single pole and single zero Laplace highpass filter. """
R = thub(exp(-cutoff), 2) G = (R + 1) / 2 return G * (1 - z ** -1) / (1 - R * z ** -1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_linear(self): """ Tests whether all filters in the list are linear. CascadeFilter and ParallelFilter instances are also linear if all filters they group are linear. """
return all(isinstance(filt, LinearFilter) or (hasattr(filt, "is_linear") and filt.is_linear()) for filt in self.callables)