text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_samples(self,prop,fig=None,label=True, histtype='step',bins=50,lw=3, **kwargs): """Plots histogram of samples of desired property. :param prop: Desired property (must be legit column of samples) :param fig: Argument for :func:`plotutils.setfig` (``None`` or int). :param histtype, bins, lw: Passed to :func:`plt.hist`. :param **kwargs: Additional keyword arguments passed to `plt.hist` :return: Figure object. """
setfig(fig) samples,stats = self.prop_samples(prop) fig = plt.hist(samples,bins=bins,normed=True, histtype=histtype,lw=lw,**kwargs) plt.xlabel(prop) plt.ylabel('Normalized count') if label: med,lo,hi = stats plt.annotate('$%.2f^{+%.2f}_{-%.2f}$' % (med,hi,lo), xy=(0.7,0.8),xycoords='axes fraction',fontsize=20) return fig
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_band(cls, b, **kwargs): """Defines what a "shortcut" band name refers to. """
phot = None # Default to SDSS for these if b in ['u','g','r','i','z']: phot = 'SDSSugriz' band = 'sdss_{}'.format(b) elif b in ['U','B','V','R','I','J','H','Ks']: phot = 'UBVRIJHKsKp' band = b elif b=='K': phot = 'UBVRIJHKsKp' band = 'Ks' elif b in ['kep','Kepler','Kp']: phot = 'UBVRIJHKsKp' band = 'Kp' elif b in ['W1','W2','W3','W4']: phot = 'WISE' band = b elif re.match('uvf', b) or re.match('irf', b): phot = 'HST_WFC3' band = b else: m = re.match('([a-zA-Z]+)_([a-zA-Z_]+)',b) if m: if m.group(1) in cls.phot_systems: phot = m.group(1) if phot=='LSST': band = b else: band = m.group(2) elif m.group(1) in ['UK','UKIRT']: phot = 'UKIDSS' band = m.group(2) if phot is None: raise ValueError('Dartmouth Models cannot resolve band {}!'.format(b)) return phot, band
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def searchsorted(arr, N, x): """N is length of arr """
L = 0 R = N-1 done = False m = (L+R)//2 while not done: if arr[m] < x: L = m + 1 elif arr[m] > x: R = m - 1 elif arr[m] == x: done = True m = (L+R)//2 if L>R: done = True return L
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wrap_flask_restful_resource( fun: Callable, flask_restful_api: FlaskRestfulApi, injector: Injector ) -> Callable: """ This is needed because of how flask_restful views are registered originally. :type flask_restful_api: :class:`flask_restful.Api` """
# The following fragment of code is copied from flask_restful project """ Copyright (c) 2013, Twilio, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the Twilio, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ @functools.wraps(fun) def wrapper(*args: Any, **kwargs: Any) -> Any: resp = fun(*args, **kwargs) if isinstance(resp, Response): # There may be a better way to test return resp data, code, headers = flask_response_unpack(resp) return flask_restful_api.make_response(data, code, headers=headers) # end of flask_restful code return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def complete_url(self, url): """ Completes a given URL with this instance's URL base. """
if self.base_url: return urlparse.urljoin(self.base_url, url) else: return url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interact(self, **local): """ Drops the user into an interactive Python session with the ``sess`` variable set to the current session instance. If keyword arguments are supplied, these names will also be available within the session. """
import code code.interact(local=dict(sess=self, **local))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_for(self, condition, interval = DEFAULT_WAIT_INTERVAL, timeout = DEFAULT_WAIT_TIMEOUT): """ Wait until a condition holds by checking it in regular intervals. Raises ``WaitTimeoutError`` on timeout. """
start = time.time() # at least execute the check once! while True: res = condition() if res: return res # timeout? if time.time() - start > timeout: break # wait a bit time.sleep(interval) # timeout occured! raise WaitTimeoutError("wait_for timed out")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_while(self, condition, *args, **kw): """ Wait while a condition holds. """
return self.wait_for(lambda: not condition(), *args, **kw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def at_css(self, css, timeout = DEFAULT_AT_TIMEOUT, **kw): """ Returns the first node matching the given CSSv3 expression or ``None`` if a timeout occurs. """
return self.wait_for_safe(lambda: super(WaitMixin, self).at_css(css), timeout = timeout, **kw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def at_xpath(self, xpath, timeout = DEFAULT_AT_TIMEOUT, **kw): """ Returns the first node matching the given XPath 2.0 expression or ``None`` if a timeout occurs. """
return self.wait_for_safe(lambda: super(WaitMixin, self).at_xpath(xpath), timeout = timeout, **kw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def switch_axis_limits(ax, which_axis): ''' Switch the axis limits of either x or y. Or both! ''' for a in which_axis: assert a in ('x', 'y') ax_limits = ax.axis() if a == 'x': ax.set_xlim(ax_limits[1], ax_limits[0]) else: ax.set_ylim(ax_limits[3], ax_limits[2])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def remove_chartjunk(ax, spines, grid=None, ticklabels=None, show_ticks=False, xkcd=False): ''' Removes "chartjunk", such as extra lines of axes and tick marks. If grid="y" or "x", will add a white grid at the "y" or "x" axes, respectively If ticklabels="y" or "x", or ['x', 'y'] will remove ticklabels from that axis ''' all_spines = ['top', 'bottom', 'right', 'left', 'polar'] for spine in spines: # The try/except is for polar coordinates, which only have a 'polar' # spine and none of the others try: ax.spines[spine].set_visible(False) except KeyError: pass # For the remaining spines, make their line thinner and a slightly # off-black dark grey if not xkcd: for spine in set(all_spines).difference(set(spines)): # The try/except is for polar coordinates, which only have a # 'polar' spine and none of the others try: ax.spines[spine].set_linewidth(0.5) except KeyError: pass # ax.spines[spine].set_color(almost_black) # ax.spines[spine].set_tick_params(color=almost_black) # Check that the axes are not log-scale. If they are, leave # the ticks because otherwise people assume a linear scale. x_pos = set(['top', 'bottom']) y_pos = set(['left', 'right']) xy_pos = [x_pos, y_pos] xy_ax_names = ['xaxis', 'yaxis'] for ax_name, pos in zip(xy_ax_names, xy_pos): axis = ax.__dict__[ax_name] # axis.set_tick_params(color=almost_black) #print 'axis.get_scale()', axis.get_scale() if show_ticks or axis.get_scale() == 'log': # if this spine is not in the list of spines to remove for p in pos.difference(spines): #print 'p', p axis.set_tick_params(direction='out') axis.set_ticks_position(p) # axis.set_tick_params(which='both', p) else: axis.set_ticks_position('none') if grid is not None: for g in grid: assert g in ('x', 'y') ax.grid(axis=grid, color='white', linestyle='-', linewidth=0.5) if ticklabels is not None: if type(ticklabels) is str: assert ticklabels in set(('x', 'y')) if ticklabels == 'x': ax.set_xticklabels([]) if ticklabels == 'y': ax.set_yticklabels([]) else: assert set(ticklabels) | set(('x', 'y')) > 0 if 'x' in ticklabels: ax.set_xticklabels([]) elif 'y' in ticklabels: ax.set_yticklabels([])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maybe_get_ax(*args, **kwargs): """ It used to be that the first argument of prettyplotlib had to be the 'ax' object, but that's not the case anymore. @param args: @type args: @param kwargs: @type kwargs: @return: @rtype: """
if 'ax' in kwargs: ax = kwargs.pop('ax') elif len(args) == 0: fig = plt.gcf() ax = plt.gca() elif isinstance(args[0], mpl.axes.Axes): ax = args[0] args = args[1:] else: ax = plt.gca() return ax, args, dict(kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maybe_get_fig_ax(*args, **kwargs): """ It used to be that the first argument of prettyplotlib had to be the 'ax' object, but that's not the case anymore. This is specially made for pcolormesh. @param args: @type args: @param kwargs: @type kwargs: @return: @rtype: """
if 'ax' in kwargs: ax = kwargs.pop('ax') if 'fig' in kwargs: fig = kwargs.pop('fig') else: fig = plt.gcf() elif len(args) == 0: fig = plt.gcf() ax = plt.gca() elif isinstance(args[0], mpl.figure.Figure) and \ isinstance(args[1], mpl.axes.Axes): fig = args[0] ax = args[1] args = args[2:] else: fig, ax = plt.subplots(1) return fig, ax, args, dict(kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scatter(*args, **kwargs): """ This will plot a scatterplot of x and y, iterating over the ColorBrewer "Set2" color cycle unless a color is specified. The symbols produced are empty circles, with the outline in the color specified by either 'color' or 'edgecolor'. If you want to fill the circle, specify 'facecolor'. Besides the matplotlib scatter(), will also take the parameter @param show_ticks: Whether or not to show the x and y axis ticks """
# Force 'color' to indicate the edge color, so the middle of the # scatter patches are empty. Can specify ax, args, kwargs = utils.maybe_get_ax(*args, **kwargs) if 'color' not in kwargs: # Assume that color means the edge color. You can assign the color_cycle = ax._get_lines.color_cycle kwargs['color'] = next(color_cycle) kwargs.setdefault('edgecolor', almost_black) kwargs.setdefault('alpha', 0.5) lw = utils.maybe_get_linewidth(**kwargs) kwargs['lw'] = lw show_ticks = kwargs.pop('show_ticks', False) scatterpoints = ax.scatter(*args, **kwargs) utils.remove_chartjunk(ax, ['top', 'right'], show_ticks=show_ticks) return scatterpoints
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def boxplot(*args, **kwargs): """ Create a box-and-whisker plot showing the mean, 25th percentile, and 75th percentile. The difference from matplotlib is only the left axis line is shown, and ticklabels labeling each category of data can be added. @param ax: @param x: @param kwargs: Besides xticklabels, which is a prettyplotlib-specific argument which will label each individual boxplot, any argument for matplotlib.pyplot.boxplot will be accepted: http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot @return: """
ax, args, kwargs = maybe_get_ax(*args, **kwargs) # If no ticklabels are specified, don't draw any xticklabels = kwargs.pop('xticklabels', None) fontsize = kwargs.pop('fontsize', 10) kwargs.setdefault('widths', 0.15) bp = ax.boxplot(*args, **kwargs) if xticklabels: ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize) show_caps = kwargs.pop('show_caps', True) show_ticks = kwargs.pop('show_ticks', False) remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks) linewidth = 0.75 blue = colors.set1[1] red = colors.set1[0] plt.setp(bp['boxes'], color=blue, linewidth=linewidth) plt.setp(bp['medians'], color=red) plt.setp(bp['whiskers'], color=blue, linestyle='solid', linewidth=linewidth) plt.setp(bp['fliers'], color=blue) if show_caps: plt.setp(bp['caps'], color=blue, linewidth=linewidth) else: plt.setp(bp['caps'], color='none') ax.spines['left']._linewidth = 0.5 return bp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hist(*args, **kwargs): """ Plots a histogram of the provided data. Can provide optional argument "grid='x'" or "grid='y'" to draw a white grid over the histogram. Almost like "erasing" some of the plot, but it adds more information! """
ax, args, kwargs = maybe_get_ax(*args, **kwargs) color_cycle = ax._get_lines.color_cycle # Reassign the default colors to Set2 by Colorbrewer if iterable(args[0]): if isinstance(args[0], list): ncolors = len(args[0]) else: if len(args[0].shape) == 2: ncolors = args[0].shape[1] else: ncolors = 1 kwargs.setdefault('color', [next(color_cycle) for _ in range(ncolors)]) else: kwargs.setdefault('color', next(color_cycle)) kwargs.setdefault('edgecolor', 'white') show_ticks = kwargs.pop('show_ticks', False) # If no grid specified, don't draw one. grid = kwargs.pop('grid', None) # print 'hist kwargs', kwargs patches = ax.hist(*args, **kwargs) remove_chartjunk(ax, ['top', 'right'], grid=grid, show_ticks=show_ticks) return patches
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def beeswarm(*args, **kwargs): """ Create a R-like beeswarm plot showing the mean and datapoints. The difference from matplotlib is only the left axis line is shown, and ticklabels labeling each category of data can be added. @param ax: @param x: @param kwargs: Besides xticklabels, which is a prettyplotlib-specific argument which will label each individual beeswarm, many arguments for matplotlib.pyplot.boxplot will be accepted: http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot Additional arguments include: *median_color* : (default gray) The color of median lines *median_width* : (default 2) Median line width *colors* : (default None) Colors to use when painting a dataseries, for example list1 = [1,2,3] list2 = [5,6,7] ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"]) @return: """
ax, args, kwargs = maybe_get_ax(*args, **kwargs) # If no ticklabels are specified, don't draw any xticklabels = kwargs.pop('xticklabels', None) colors = kwargs.pop('colors', None) fontsize = kwargs.pop('fontsize', 10) gray = _colors.set1[8] red = _colors.set1[0] blue = kwargs.pop('color', _colors.set1[1]) kwargs.setdefault('widths', 0.25) kwargs.setdefault('sym', "o") bp = _beeswarm(ax, *args, **kwargs) kwargs.setdefault("median_color", gray) kwargs.setdefault("median_linewidth", 2) if xticklabels: ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize) show_caps = kwargs.pop('show_caps', True) show_ticks = kwargs.pop('show_ticks', False) remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks) linewidth = 0.75 plt.setp(bp['boxes'], color=blue, linewidth=linewidth) plt.setp(bp['medians'], color=kwargs.pop("median_color"), linewidth=kwargs.pop("median_linewidth")) #plt.setp(bp['whiskers'], color=blue, linestyle='solid', # linewidth=linewidth) for color, flier in zip(colors, bp['fliers']): plt.setp(flier, color=color) #if show_caps: # plt.setp(bp['caps'], color=blue, linewidth=linewidth) #else: # plt.setp(bp['caps'], color='none') ax.spines['left']._linewidth = 0.5 return bp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tags(self): """ Convert the ugly Tags JSON into a real dictionary and memorize the result. """
if self._tags is None: LOG.debug('need to build tags') self._tags = {} if hasattr(self.Meta, 'tags_spec') and (self.Meta.tags_spec is not None): LOG.debug('have a tags_spec') method, path, param_name, param_value = self.Meta.tags_spec[:4] kwargs = {} filter_type = getattr(self.Meta, 'filter_type', None) if filter_type == 'arn': kwargs = {param_name: [getattr(self, param_value)]} elif filter_type == 'list': kwargs = {param_name: [getattr(self, param_value)]} else: kwargs = {param_name: getattr(self, param_value)} if len(self.Meta.tags_spec) > 4: kwargs.update(self.Meta.tags_spec[4]) LOG.debug('fetching tags') self.data['Tags'] = self._client.call( method, query=path, **kwargs) LOG.debug(self.data['Tags']) if 'Tags' in self.data: _tags = self.data['Tags'] if isinstance(_tags, list): for kvpair in _tags: if kvpair['Key'] in self._tags: if not isinstance(self._tags[kvpair['Key']], list): self._tags[kvpair['Key']] = [self._tags[kvpair['Key']]] self._tags[kvpair['Key']].append(kvpair['Value']) else: self._tags[kvpair['Key']] = kvpair['Value'] elif isinstance(_tags, dict): self._tags = _tags return self._tags
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_metric_data(self, metric_name=None, metric=None, days=None, hours=1, minutes=None, statistics=None, period=None): """ Get metric data for this resource. You can specify the time frame for the data as either the number of days or number of hours. The maximum window is 14 days. Based on the time frame this method will calculate the correct ``period`` to return the maximum number of data points up to the CloudWatch max of 1440. :type metric_name: str :param metric_name: The name of the metric this data will pertain to. :type days: int :param days: The number of days worth of data to return. You can specify either ``days`` or ``hours``. The default is one hour. The maximum value is 14 days. :type hours: int :param hours: The number of hours worth of data to return. You can specify either ``days`` or ``hours``. The default is one hour. The maximum value is 14 days. :type statistics: list of str :param statistics: The metric statistics to return. The default value is **Average**. Possible values are: * Average * Sum * SampleCount * Maximum * Minimum :returns: A ``MetricData`` object that contains both the CloudWatch data as well as the ``period`` used since this value may have been calculated by skew. """
if not statistics: statistics = ['Average'] if days: delta = datetime.timedelta(days=days) elif hours: delta = datetime.timedelta(hours=hours) else: delta = datetime.timedelta(minutes=minutes) if not period: period = max(60, self._total_seconds(delta) // 1440) if not metric: metric = self.find_metric(metric_name) if metric and self._cloudwatch: end = datetime.datetime.utcnow() start = end - delta data = self._cloudwatch.call( 'get_metric_statistics', Dimensions=metric['Dimensions'], Namespace=metric['Namespace'], MetricName=metric['MetricName'], StartTime=start.isoformat(), EndTime=end.isoformat(), Statistics=statistics, Period=period) return MetricData(jmespath.search('Datapoints', data), period) else: raise ValueError('Metric (%s) not available' % metric_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self): r"""Loop over distributions and find best parameter to fit the data for each When a distribution is fitted onto the data, we populate a set of dataframes: - :attr:`df_errors` :sum of the square errors between the data and the fitted distribution i.e., :math:`\sum_i \left( Y_i - pdf(X_i) \right)^2` - :attr:`fitted_param` : the parameters that best fit the data - :attr:`fitted_pdf` : the PDF generated with the parameters that best fit the data Indices of the dataframes contains the name of the distribution. """
for distribution in self.distributions: try: # need a subprocess to check time it takes. If too long, skip it dist = eval("scipy.stats." + distribution) # TODO here, dist.fit may take a while or just hang forever # with some distributions. So, I thought to use signal module # to catch the error when signal takes too long. It did not work # presumably because another try/exception is inside the # fit function, so I used threading with arecipe from stackoverflow # See timed_run function above param = self._timed_run(dist.fit, distribution, args=self._data) # with signal, does not work. maybe because another expection is caught pdf_fitted = dist.pdf(self.x, *param) # hoping the order returned by fit is the same as in pdf self.fitted_param[distribution] = param[:] self.fitted_pdf[distribution] = pdf_fitted sq_error = pylab.sum((self.fitted_pdf[distribution] - self.y)**2) if self.verbose: print("Fitted {} distribution with error={})".format(distribution, sq_error)) # compute some errors now self._fitted_errors[distribution] = sq_error except Exception as err: if self.verbose: print("SKIPPED {} distribution (taking more than {} seconds)".format(distribution, self.timeout)) # if we cannot compute the error, set it to large values # FIXME use inf self._fitted_errors[distribution] = 1e6 self.df_errors = pd.DataFrame({'sumsquare_error':self._fitted_errors})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_pdf(self, names=None, Nbest=5, lw=2): """Plots Probability density functions of the distributions :param str,list names: names can be a single distribution name, or a list of distribution names, or kept as None, in which case, the first Nbest distribution will be taken (default to best 5) """
assert Nbest > 0 if Nbest > len(self.distributions): Nbest = len(self.distributions) if isinstance(names, list): for name in names: pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) elif names: pylab.plot(self.x, self.fitted_pdf[names], lw=lw, label=names) else: try: names = self.df_errors.sort_values( by="sumsquare_error").index[0:Nbest] except: names = self.df_errors.sort("sumsquare_error").index[0:Nbest] for name in names: if name in self.fitted_pdf.keys(): pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) else: print("%s was not fitted. no parameters available" % name) pylab.grid(True) pylab.legend()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_best(self): """Return best fitted distribution and its parameters a dictionary with one key (the distribution name) and its parameters """
# self.df should be sorted, so then us take the first one as the best name = self.df_errors.sort_values('sumsquare_error').iloc[0].name params = self.fitted_param[name] return {name: params}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def summary(self, Nbest=5, lw=2, plot=True): """Plots the distribution of the data and Nbest distribution """
if plot: pylab.clf() self.hist() self.plot_pdf(Nbest=Nbest, lw=lw) pylab.grid(True) Nbest = min(Nbest, len(self.distributions)) try: names = self.df_errors.sort_values( by="sumsquare_error").index[0:Nbest] except: names = self.df_errors.sort("sumsquare_error").index[0:Nbest] return self.df_errors.loc[names]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _timed_run(self, func, distribution, args=(), kwargs={}, default=None): """This function will spawn a thread and run the given function using the args, kwargs and return the given default value if the timeout is exceeded. http://stackoverflow.com/questions/492519/timeout-on-a-python-function-call """
class InterruptableThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = default self.exc_info = (None, None, None) def run(self): try: self.result = func(args, **kwargs) except Exception as err: self.exc_info = sys.exc_info() def suicide(self): raise RuntimeError('Stop has been called') it = InterruptableThread() it.start() started_at = datetime.now() it.join(self.timeout) ended_at = datetime.now() diff = ended_at - started_at if it.exc_info[0] is not None: # if there were any exceptions a,b,c = it.exc_info raise Exception(a,b,c) # communicate that to caller if it.isAlive(): it.suicide() raise RuntimeError else: return it.result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_avg_adj_deg(G): r""" Compute the average adjacency degree for each node. The average adjacency degree is the average of the degrees of a node and its neighbors. Parameters G: Graph Graph on which the statistic is extracted """
return np.sum(np.dot(G.A, G.A), axis=1) / (np.sum(G.A, axis=1) + 1.)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_spectrogram(G, atom=None, M=100, **kwargs): r""" Compute the norm of the Tig for all nodes with a kernel shifted along the spectral axis. Parameters G : Graph Graph on which to compute the spectrogram. atom : func Kernel to use in the spectrogram (default = exp(-M*(x/lmax)²)). M : int (optional) Number of samples on the spectral scale. (default = 100) kwargs: dict Additional parameters to be passed to the :func:`pygsp.filters.Filter.filter` method. """
if not atom: def atom(x): return np.exp(-M * (x / G.lmax)**2) scale = np.linspace(0, G.lmax, M) spectr = np.empty((G.N, M)) for shift_idx in range(M): shift_filter = filters.Filter(G, lambda x: atom(x - scale[shift_idx])) tig = compute_norm_tig(shift_filter, **kwargs).squeeze()**2 spectr[:, shift_idx] = tig G.spectr = spectr return spectr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate(self, x): r"""Evaluate the kernels at given frequencies. Parameters x : array_like Graph frequencies at which to evaluate the filter. Returns ------- y : ndarray Frequency response of the filters. Shape ``(g.Nf, len(x))``. Examples -------- Frequency response of a low-pass filter: """
x = np.asanyarray(x) # Avoid to copy data as with np.array([g(x) for g in self._kernels]). y = np.empty([self.Nf] + list(x.shape)) for i, kernel in enumerate(self._kernels): y[i] = kernel(x) return y
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_frame_bounds(self, x=None): r"""Estimate lower and upper frame bounds. A filter bank forms a frame if there are positive real numbers :math:`A` and :math:`B`, :math:`0 < A \leq B < \infty`, that satisfy the *frame condition* .. math:: A \|x\|^2 \leq \| g(L) x \|^2 \leq B \|x\|^2 for all signals :math:`x \in \mathbb{R}^N`, where :math:`g(L)` is the analysis operator of the filter bank. As :math:`g(L) = U g(\Lambda) U^\top` is diagonalized by the Fourier basis :math:`U` with eigenvalues :math:`\Lambda`, :math:`\| g(L) x \|^2 = \| g(\Lambda) U^\top x \|^2`, and :math:`A = \min g^2(\Lambda)`, :math:`B = \max g^2(\Lambda)`. Parameters x : array_like Graph frequencies at which to evaluate the filter bank `g(x)`. The default is ``x = np.linspace(0, G.lmax, 1000)``. The exact bounds are given by evaluating the filter bank at the eigenvalues of the graph Laplacian, i.e., ``x = G.e``. Returns ------- A : float Lower frame bound of the filter bank. B : float Upper frame bound of the filter bank. See Also -------- compute_frame: compute the frame complement: complement a filter bank to become a tight frame Examples -------- Estimation quality (loose, precise, exact): A=1.883, B=2.288 A=1.708, B=2.359 A=1.723, B=2.359 The frame bounds can be seen in the plot of the filter bank as the minimum and maximum of their squared sum (the black curve): The heat kernel has a null-space and doesn't define a frame (the lower bound should be greater than 0 to have a frame): A=0.000, B=1.000 Without a null-space, the heat kernel forms a frame: A=0.135, B=2.000 A kernel and its dual form a tight frame (A=B): A=1.000, B=1.000 The Itersine filter bank forms a tight frame (A=B): A=1.000, B=1.000 """
if x is None: x = np.linspace(0, self.G.lmax, 1000) else: x = np.asanyarray(x) sum_filters = np.sum(self.evaluate(x)**2, axis=0) return sum_filters.min(), sum_filters.max()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_frame(self, **kwargs): r"""Compute the associated frame. A filter bank defines a frame, which is a generalization of a basis to sets of vectors that may be linearly dependent. See `Wikipedia <https://en.wikipedia.org/wiki/Frame_(linear_algebra)>`_. The frame of a filter bank is the union of the frames of its constituent filters. The vectors forming the frame are the rows of the *analysis operator* .. math:: g(L) = \begin{pmatrix} g_1(L) \\ \vdots \\ g_F(L) \end{pmatrix} \in \mathbb{R}^{NF \times N}, \quad g_i(L) = U g_i(\Lambda) U^\top, where :math:`g_i` are the filter kernels, :math:`N` is the number of nodes, :math:`F` is the number of filters, :math:`L` is the graph Laplacian, :math:`\Lambda` is a diagonal matrix of the Laplacian's eigenvalues, and :math:`U` is the Fourier basis, i.e., its columns are the eigenvectors of the Laplacian. The matrix :math:`g(L)` represents the *analysis operator* of the frame. Its adjoint :math:`g(L)^\top` represents the *synthesis operator*. A signal :math:`x` is thus analyzed with the frame by :math:`y = g(L) x`, and synthesized from its frame coefficients by :math:`z = g(L)^\top y`. Computing this matrix is however a rather inefficient way of doing those operations. If :math:`F > 1`, the frame is said to be over-complete and the representation :math:`g(L) x` of the signal :math:`x` is said to be redundant. If the frame is tight, the *frame operator* :math:`g(L)^\top g(L)` is diagonal, with entries equal to the frame bound :math:`A = B`. Parameters kwargs: dict Parameters to be passed to the :meth:`analyze` method. Returns ------- frame : ndarray Array of size (#nodes x #filters) x #nodes. See Also -------- estimate_frame_bounds: estimate the frame bounds filter: more efficient way to filter signals Examples -------- Filtering as a multiplication with the matrix representation of the frame analysis operator: (600, 100) True The frame operator of a tight frame is the identity matrix times the frame bound: A=1.000, B=1.000 (600, 100) True """
if self.G.N > 2000: _logger.warning('Creating a big matrix. ' 'You should prefer the filter method.') # Filter one delta per vertex. s = np.identity(self.G.N) return self.filter(s, **kwargs).T.reshape(-1, self.G.N)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def complement(self, frame_bound=None): r"""Return the filter that makes the frame tight. The complementary filter is designed such that the union of a filter bank and its complementary filter forms a tight frame. Parameters frame_bound : float or None The desired frame bound :math:`A = B` of the resulting tight frame. The chosen bound should be larger than the sum of squared evaluations of all filters in the filter bank. If None (the default), the method chooses the smallest feasible bound. Returns ------- complement: Filter The complementary filter. See Also -------- estimate_frame_bounds: estimate the frame bounds Examples -------- A=0.200, B=1.971 A=1.971, B=1.971 """
def kernel(x, *args, **kwargs): y = self.evaluate(x) np.power(y, 2, out=y) y = np.sum(y, axis=0) if frame_bound is None: bound = y.max() elif y.max() > frame_bound: raise ValueError('The chosen bound is not feasible. ' 'Choose at least {}.'.format(y.max())) else: bound = frame_bound return np.sqrt(bound - y) return Filter(self.G, kernel)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inverse(self): r"""Return the pseudo-inverse filter bank. The pseudo-inverse of the *analysis filter bank* :math:`g` is the *synthesis filter bank* :math:`g^+` such that .. math:: g(L)^+ g(L) = I, where :math:`I` is the identity matrix, and the *synthesis operator* .. math:: g(L)^+ = (g(L)\top g(L))^{-1} g(L)^\top = (g_1(L)^+, \dots, g_F(L)^+) \in \mathbb{R}^{N \times NF} is the left pseudo-inverse of the analysis operator :math:`g(L)`. Note that :math:`g_i(L)^+` is the pseudo-inverse of :math:`g_i(L)`, :math:`N` is the number of vertices, and :math:`F` is the number of filters in the bank. The above relation holds, and the reconstruction is exact, if and only if :math:`g(L)` is a frame. To be a frame, the rows of :math:`g(L)` must span the whole space (i.e., :math:`g(L)` must have full row rank). That is the case if the lower frame bound :math:`A > 0`. If :math:`g(L)` is not a frame, the reconstruction :math:`g(L)^+ g(L) x` will be the closest to :math:`x` in the least square sense. While there exists infinitely many inverses of the analysis operator of a frame, the pseudo-inverse is unique and corresponds to the *canonical dual* of the filter kernel. The *frame operator* of :math:`g^+` is :math:`g(L)^+ (g(L)^+)^\top = (g(L)\top g(L))^{-1}`, the inverse of the frame operator of :math:`g`. Similarly, its *frame bounds* are :math:`A^{-1}` and :math:`B^{-1}`, where :math:`A` and :math:`B` are the frame bounds of :math:`g`. If :math:`g` is tight (i.e., :math:`A=B`), the canonical dual is given by :math:`g^+ = g / A` (i.e., :math:`g^+_i = g_i / A \ \forall i`). Returns ------- inverse : :class:`pygsp.filters.Filter` The pseudo-inverse filter bank, which synthesizes (or reconstructs) a signal from its coefficients using the canonical dual frame. See Also -------- estimate_frame_bounds: estimate the frame bounds Examples -------- error: 3e-14 A(g)*B(h) = 0.687 * 1.457 = 1.000 B(g)*A(h) = 1.994 * 0.501 = 1.000 """
A, _ = self.estimate_frame_bounds() if A == 0: _logger.warning('The filter bank is not invertible as it is not ' 'a frame (lower frame bound A=0).') def kernel(g, i, x): y = g.evaluate(x).T z = np.linalg.pinv(np.expand_dims(y, axis=-1)).squeeze(axis=-2) return z[:, i] # Return one filter. kernels = [partial(kernel, self, i) for i in range(self.n_filters)] return Filter(self.G, kernels)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def grad(self, x): r"""Compute the gradient of a signal defined on the vertices. The gradient :math:`y` of a signal :math:`x` is defined as .. math:: y = \nabla_\mathcal{G} x = D^\top x, where :math:`D` is the differential operator :attr:`D`. The value of the gradient on the edge :math:`e_k = (v_i, v_j)` from :math:`v_i` to :math:`v_j` with weight :math:`W[i, j]` is .. math:: y[k] = D[i, k] x[i] + D[j, k] x[j] = \sqrt{\frac{W[i, j]}{2}} (x[j] - x[i]) for the combinatorial Laplacian, and .. math:: y[k] = \sqrt{\frac{W[i, j]}{2}} \left( \frac{x[j]}{\sqrt{d[j]}} - \frac{x[i]}{\sqrt{d[i]}} \right) for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters x : array_like Signal of length :attr:`n_vertices` living on the vertices. Returns ------- y : ndarray Gradient signal of length :attr:`n_edges` living on the edges. See Also -------- compute_differential_operator div : compute the divergence of an edge signal dirichlet_energy : compute the norm of the gradient Examples -------- Non-directed graph and combinatorial Laplacian: array([ 2., 2., -2.]) Directed graph and combinatorial Laplacian: array([ 1.41421356, 1.41421356, -1.41421356]) Non-directed graph and normalized Laplacian: array([ 1.41421356, 1.41421356, -0.82842712]) Directed graph and normalized Laplacian: array([ 1.41421356, 1.41421356, -0.82842712]) """
x = self._check_signal(x) return self.D.T.dot(x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def div(self, y): r"""Compute the divergence of a signal defined on the edges. The divergence :math:`z` of a signal :math:`y` is defined as .. math:: z = \operatorname{div}_\mathcal{G} y = D y, where :math:`D` is the differential operator :attr:`D`. The value of the divergence on the vertex :math:`v_i` is .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2}} y[k] for the combinatorial Laplacian, and .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2 d[i]}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2 d[i]}} y[k] for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters y : array_like Signal of length :attr:`n_edges` living on the edges. Returns ------- z : ndarray Divergence signal of length :attr:`n_vertices` living on the vertices. See Also -------- compute_differential_operator grad : compute the gradient of a vertex signal Examples -------- Non-directed graph and combinatorial Laplacian: array([-2., 4., -2., 0.]) Directed graph and combinatorial Laplacian: array([-1.41421356, 2.82842712, -1.41421356, 0. ]) Non-directed graph and normalized Laplacian: array([-2. , 2.82842712, -1.41421356, 0. ]) Directed graph and normalized Laplacian: array([-2. , 2.82842712, -1.41421356, 0. ]) """
y = np.asanyarray(y) if y.shape[0] != self.Ne: raise ValueError('First dimension must be the number of edges ' 'G.Ne = {}, got {}.'.format(self.Ne, y.shape)) return self.D.dot(y)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gft(self, s): r"""Compute the graph Fourier transform. The graph Fourier transform of a signal :math:`s` is defined as .. math:: \hat{s} = U^* s, where :math:`U` is the Fourier basis attr:`U` and :math:`U^*` denotes the conjugate transpose or Hermitian transpose of :math:`U`. Parameters s : array_like Graph signal in the vertex domain. Returns ------- s_hat : ndarray Representation of s in the Fourier domain. Examples -------- True """
s = self._check_signal(s) U = np.conjugate(self.U) # True Hermitian. (Although U is often real.) return np.tensordot(U, s, ([0], [0]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def igft(self, s_hat): r"""Compute the inverse graph Fourier transform. The inverse graph Fourier transform of a Fourier domain signal :math:`\hat{s}` is defined as .. math:: s = U \hat{s}, where :math:`U` is the Fourier basis :attr:`U`. Parameters s_hat : array_like Graph signal in the Fourier domain. Returns ------- s : ndarray Representation of s_hat in the vertex domain. Examples -------- True """
s_hat = self._check_signal(s_hat) return np.tensordot(self.U, s_hat, ([1], [0]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_cheby_coeff(f, m=30, N=None, *args, **kwargs): r""" Compute Chebyshev coefficients for a Filterbank. Parameters f : Filter Filterbank with at least 1 filter m : int Maximum order of Chebyshev coeff to compute (default = 30) N : int Grid order used to compute quadrature (default = m + 1) i : int Index of the Filterbank element to compute (default = 0) Returns ------- c : ndarray Matrix of Chebyshev coefficients """
G = f.G i = kwargs.pop('i', 0) if not N: N = m + 1 a_arange = [0, G.lmax] a1 = (a_arange[1] - a_arange[0]) / 2 a2 = (a_arange[1] + a_arange[0]) / 2 c = np.zeros(m + 1) tmpN = np.arange(N) num = np.cos(np.pi * (tmpN + 0.5) / N) for o in range(m + 1): c[o] = 2. / N * np.dot(f._kernels[i](a1 * num + a2), np.cos(np.pi * o * (tmpN + 0.5) / N)) return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cheby_op(G, c, signal, **kwargs): r""" Chebyshev polynomial of graph Laplacian applied to vector. Parameters G : Graph c : ndarray or list of ndarrays Chebyshev coefficients for a Filter or a Filterbank signal : ndarray Signal to filter Returns ------- r : ndarray Result of the filtering """
# Handle if we do not have a list of filters but only a simple filter in cheby_coeff. if not isinstance(c, np.ndarray): c = np.array(c) c = np.atleast_2d(c) Nscales, M = c.shape if M < 2: raise TypeError("The coefficients have an invalid shape") # thanks to that, we can also have 1d signal. try: Nv = np.shape(signal)[1] r = np.zeros((G.N * Nscales, Nv)) except IndexError: r = np.zeros((G.N * Nscales)) a_arange = [0, G.lmax] a1 = float(a_arange[1] - a_arange[0]) / 2. a2 = float(a_arange[1] + a_arange[0]) / 2. twf_old = signal twf_cur = (G.L.dot(signal) - a2 * signal) / a1 tmpN = np.arange(G.N, dtype=int) for i in range(Nscales): r[tmpN + G.N*i] = 0.5 * c[i, 0] * twf_old + c[i, 1] * twf_cur factor = 2/a1 * (G.L - a2 * sparse.eye(G.N)) for k in range(2, M): twf_new = factor.dot(twf_cur) - twf_old for i in range(Nscales): r[tmpN + G.N*i] += c[i, k] * twf_new twf_old = twf_cur twf_cur = twf_new return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cheby_rect(G, bounds, signal, **kwargs): r""" Fast filtering using Chebyshev polynomial for a perfect rectangle filter. Parameters G : Graph bounds : array_like The bounds of the pass-band filter signal : array_like Signal to filter order : int (optional) Order of the Chebyshev polynomial (default: 30) Returns ------- r : array_like Result of the filtering """
if not (isinstance(bounds, (list, np.ndarray)) and len(bounds) == 2): raise ValueError('Bounds of wrong shape.') bounds = np.array(bounds) m = int(kwargs.pop('order', 30) + 1) try: Nv = np.shape(signal)[1] r = np.zeros((G.N, Nv)) except IndexError: r = np.zeros((G.N)) b1, b2 = np.arccos(2. * bounds / G.lmax - 1.) factor = 4./G.lmax * G.L - 2.*sparse.eye(G.N) T_old = signal T_cur = factor.dot(signal) / 2. r = (b1 - b2)/np.pi * signal + 2./np.pi * (np.sin(b1) - np.sin(b2)) * T_cur for k in range(2, m): T_new = factor.dot(T_cur) - T_old r += 2./(k*np.pi) * (np.sin(k*b1) - np.sin(k*b2)) * T_new T_old = T_cur T_cur = T_new return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_jackson_cheby_coeff(filter_bounds, delta_lambda, m): r""" To compute the m+1 coefficients of the polynomial approximation of an ideal band-pass between a and b, between a range of values defined by lambda_min and lambda_max. Parameters filter_bounds : list [a, b] delta_lambda : list [lambda_min, lambda_max] m : int Returns ------- ch : ndarray jch : ndarray References :cite:`tremblay2016compressive` """
# Parameters check if delta_lambda[0] > filter_bounds[0] or delta_lambda[1] < filter_bounds[1]: _logger.error("Bounds of the filter are out of the lambda values") raise() elif delta_lambda[0] > delta_lambda[1]: _logger.error("lambda_min is greater than lambda_max") raise() # Scaling and translating to standard cheby interval a1 = (delta_lambda[1]-delta_lambda[0])/2 a2 = (delta_lambda[1]+delta_lambda[0])/2 # Scaling bounds of the band pass according to lrange filter_bounds[0] = (filter_bounds[0]-a2)/a1 filter_bounds[1] = (filter_bounds[1]-a2)/a1 # First compute cheby coeffs ch = np.empty(m+1, dtype=float) ch[0] = (2/(np.pi))*(np.arccos(filter_bounds[0])-np.arccos(filter_bounds[1])) for i in range(1, len(ch)): ch[i] = (2/(np.pi * i)) * \ (np.sin(i * np.arccos(filter_bounds[0])) - np.sin(i * np.arccos(filter_bounds[1]))) # Then compute jackson coeffs jch = np.empty(m+1, dtype=float) alpha = (np.pi/(m+2)) for i in range(len(jch)): jch[i] = (1/np.sin(alpha)) * \ ((1 - i/(m+2)) * np.sin(alpha) * np.cos(i * alpha) + (1/(m+2)) * np.cos(alpha) * np.sin(i * alpha)) # Combine jackson and cheby coeffs jch = ch * jch return ch, jch
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lanczos_op(f, s, order=30): r""" Perform the lanczos approximation of the signal s. Parameters f: Filter s : ndarray Signal to approximate. order : int Degree of the lanczos approximation. (default = 30) Returns ------- L : ndarray lanczos approximation of s """
G = f.G Nf = len(f.g) # To have the right shape for the output array depending on the signal dim try: Nv = np.shape(s)[1] is2d = True c = np.zeros((G.N*Nf, Nv)) except IndexError: Nv = 1 is2d = False c = np.zeros((G.N*Nf)) tmpN = np.arange(G.N, dtype=int) for j in range(Nv): if is2d: V, H, _ = lanczos(G.L.toarray(), order, s[:, j]) else: V, H, _ = lanczos(G.L.toarray(), order, s) Eh, Uh = np.linalg.eig(H) Eh[Eh < 0] = 0 fe = f.evaluate(Eh) V = np.dot(V, Uh) for i in range(Nf): if is2d: c[tmpN + i*G.N, j] = np.dot(V, fe[:][i] * np.dot(V.T, s[:, j])) else: c[tmpN + i*G.N] = np.dot(V, fe[:][i] * np.dot(V.T, s)) return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interpolate(G, f_subsampled, keep_inds, order=100, reg_eps=0.005, **kwargs): r"""Interpolate a graph signal. Parameters G : Graph f_subsampled : ndarray A graph signal on the graph G. keep_inds : ndarray List of indices on which the signal is sampled. order : int Degree of the Chebyshev approximation (default = 100). reg_eps : float The regularized graph Laplacian is $\bar{L}=L+\epsilon I$. A smaller epsilon may lead to better regularization, but will also require a higher order Chebyshev approximation. Returns ------- f_interpolated : ndarray Interpolated graph signal on the full vertex set of G. References See :cite:`pesenson2009variational` """
L_reg = G.L + reg_eps * sparse.eye(G.N) K_reg = getattr(G.mr, 'K_reg', kron_reduction(L_reg, keep_inds)) green_kernel = getattr(G.mr, 'green_kernel', filters.Filter(G, lambda x: 1. / (reg_eps + x))) alpha = K_reg.dot(f_subsampled) try: Nv = np.shape(f_subsampled)[1] f_interpolated = np.zeros((G.N, Nv)) except IndexError: f_interpolated = np.zeros((G.N)) f_interpolated[keep_inds] = alpha return _analysis(green_kernel, f_interpolated, order=order, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kron_reduction(G, ind): r"""Compute the Kron reduction. This function perform the Kron reduction of the weight matrix in the graph *G*, with boundary nodes labeled by *ind*. This function will create a new graph with a weight matrix Wnew that contain only boundary nodes and is computed as the Schur complement of the original matrix with respect to the selected indices. Parameters G : Graph or sparse matrix Graph structure or weight matrix ind : list indices of the nodes to keep Returns ------- Gnew : Graph or sparse matrix New graph structure or weight matrix References See :cite:`dorfler2013kron` """
if isinstance(G, graphs.Graph): if G.lap_type != 'combinatorial': msg = 'Unknown reduction for {} Laplacian.'.format(G.lap_type) raise NotImplementedError(msg) if G.is_directed(): msg = 'This method only work for undirected graphs.' raise NotImplementedError(msg) L = G.L else: L = G N = np.shape(L)[0] ind_comp = np.setdiff1d(np.arange(N, dtype=int), ind) L_red = L[np.ix_(ind, ind)] L_in_out = L[np.ix_(ind, ind_comp)] L_out_in = L[np.ix_(ind_comp, ind)].tocsc() L_comp = L[np.ix_(ind_comp, ind_comp)].tocsc() Lnew = L_red - L_in_out.dot(linalg.spsolve(L_comp, L_out_in)) # Make the laplacian symmetric if it is almost symmetric! if np.abs(Lnew - Lnew.T).sum() < np.spacing(1) * np.abs(Lnew).sum(): Lnew = (Lnew + Lnew.T) / 2. if isinstance(G, graphs.Graph): # Suppress the diagonal ? This is a good question? Wnew = sparse.diags(Lnew.diagonal(), 0) - Lnew Snew = Lnew.diagonal() - np.ravel(Wnew.sum(0)) if np.linalg.norm(Snew, 2) >= np.spacing(1000): Wnew = Wnew + sparse.diags(Snew, 0) # Removing diagonal for stability Wnew = Wnew - Wnew.diagonal() coords = G.coords[ind, :] if len(G.coords.shape) else np.ndarray(None) Gnew = graphs.Graph(Wnew, coords=coords, lap_type=G.lap_type, plotting=G.plotting) else: Gnew = Lnew return Gnew
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pyramid_analysis(Gs, f, **kwargs): r"""Compute the graph pyramid transform coefficients. Parameters Gs : list of graphs A multiresolution sequence of graph structures. f : ndarray Graph signal to analyze. h_filters : list A list of filter that will be used for the analysis and sythesis operator. If only one filter is given, it will be used for all levels. Default is h(x) = 1 / (2x+1) Returns ------- ca : ndarray Coarse approximation at each level pe : ndarray Prediction error at each level h_filters : list Graph spectral filters applied References See :cite:`shuman2013framework` and :cite:`pesenson2009variational`. """
if np.shape(f)[0] != Gs[0].N: raise ValueError("PYRAMID ANALYSIS: The signal to analyze should have the same dimension as the first graph.") levels = len(Gs) - 1 # check if the type of filters is right. h_filters = kwargs.pop('h_filters', lambda x: 1. / (2*x+1)) if not isinstance(h_filters, list): if hasattr(h_filters, '__call__'): logger.warning('Converting filters into a list.') h_filters = [h_filters] else: logger.error('Filters must be a list of functions.') if len(h_filters) == 1: h_filters = h_filters * levels elif len(h_filters) != levels: message = 'The number of filters must be one or equal to {}.'.format(levels) raise ValueError(message) ca = [f] pe = [] for i in range(levels): # Low pass the signal s_low = _analysis(filters.Filter(Gs[i], h_filters[i]), ca[i], **kwargs) # Keep only the coefficient on the selected nodes ca.append(s_low[Gs[i+1].mr['idx']]) # Compute prediction s_pred = interpolate(Gs[i], ca[i+1], Gs[i+1].mr['idx'], **kwargs) # Compute errors pe.append(ca[i] - s_pred) return ca, pe
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pyramid_synthesis(Gs, cap, pe, order=30, **kwargs): r"""Synthesize a signal from its pyramid coefficients. Parameters Gs : Array of Graphs A multiresolution sequence of graph structures. cap : ndarray Coarsest approximation of the original signal. pe : ndarray Prediction error at each level. use_exact : bool To use exact graph spectral filtering instead of the Chebyshev approximation. order : int Degree of the Chebyshev approximation (default=30). least_squares : bool To use the least squares synthesis (default=False). h_filters : ndarray The filters used in the analysis operator. These are required for least squares synthesis, but not for the direct synthesis method. use_landweber : bool To use the Landweber iteration approximation in the least squares synthesis. reg_eps : float Interpolation parameter. landweber_its : int Number of iterations in the Landweber approximation for least squares synthesis. landweber_tau : float Parameter for the Landweber iteration. Returns ------- reconstruction : ndarray The reconstructed signal. ca : ndarray Coarse approximations at each level """
least_squares = bool(kwargs.pop('least_squares', False)) def_ul = Gs[0].N > 3000 or Gs[0]._e is None or Gs[0]._U is None use_landweber = bool(kwargs.pop('use_landweber', def_ul)) reg_eps = float(kwargs.get('reg_eps', 0.005)) if least_squares and 'h_filters' not in kwargs: ValueError('h-filters not provided.') levels = len(Gs) - 1 if len(pe) != levels: ValueError('Gs and pe have different shapes.') ca = [cap] # Reconstruct each level for i in range(levels): if not least_squares: s_pred = interpolate(Gs[levels - i - 1], ca[i], Gs[levels - i].mr['idx'], order=order, reg_eps=reg_eps, **kwargs) ca.append(s_pred + pe[levels - i - 1]) else: ca.append(_pyramid_single_interpolation(Gs[levels - i - 1], ca[i], pe[levels - i - 1], h_filters[levels - i - 1], use_landweber=use_landweber, **kwargs)) ca.reverse() reconstruction = ca[0] return reconstruction, ca
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree_multiresolution(G, Nlevel, reduction_method='resistance_distance', compute_full_eigen=False, root=None): r"""Compute a multiresolution of trees Parameters G : Graph Graph structure of a tree. Nlevel : Number of times to downsample and coarsen the tree root : int The index of the root of the tree. (default = 1) reduction_method : str The graph reduction method (default = 'resistance_distance') compute_full_eigen : bool To also compute the graph Laplacian eigenvalues for every tree in the sequence Returns ------- Gs : ndarray Ndarray, with each element containing a graph structure represent a reduced tree. subsampled_vertex_indices : ndarray Indices of the vertices of the previous tree that are kept for the subsequent tree. """
if not root: if hasattr(G, 'root'): root = G.root else: root = 1 Gs = [G] if compute_full_eigen: Gs[0].compute_fourier_basis() subsampled_vertex_indices = [] depths, parents = _tree_depths(G.A, root) old_W = G.W for lev in range(Nlevel): # Identify the vertices in the even depths of the current tree down_odd = round(depths) % 2 down_even = np.ones((Gs[lev].N)) - down_odd keep_inds = np.where(down_even == 1)[0] subsampled_vertex_indices.append(keep_inds) # There will be one undirected edge in the new graph connecting each # non-root subsampled vertex to its new parent. Here, we find the new # indices of the new parents non_root_keep_inds, new_non_root_inds = np.setdiff1d(keep_inds, root) old_parents_of_non_root_keep_inds = parents[non_root_keep_inds] old_grandparents_of_non_root_keep_inds = parents[old_parents_of_non_root_keep_inds] # TODO new_non_root_parents = dsearchn(keep_inds, old_grandparents_of_non_root_keep_inds) old_W_i_inds, old_W_j_inds, old_W_weights = sparse.find(old_W) i_inds = np.concatenate((new_non_root_inds, new_non_root_parents)) j_inds = np.concatenate((new_non_root_parents, new_non_root_inds)) new_N = np.sum(down_even) if reduction_method == "unweighted": new_weights = np.ones(np.shape(i_inds)) elif reduction_method == "sum": # TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds,old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds]); old_weights_to_parents = old_W_weights[old_weights_to_parents_inds] # old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds); # TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds]) old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds] # old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds); new_weights = old_weights_to_parents + old_weights_parents_to_grandparents new_weights = np.concatenate((new_weights. new_weights)) elif reduction_method == "resistance_distance": # TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds]) old_weights_to_parents = old_W_weight[sold_weights_to_parents_inds] # old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds); # TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds]) old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds] # old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds); new_weights = 1./(1./old_weights_to_parents + 1./old_weights_parents_to_grandparents) new_weights = np.concatenate(([new_weights, new_weights])) else: raise ValueError('Unknown graph reduction method.') new_W = sparse.csc_matrix((new_weights, (i_inds, j_inds)), shape=(new_N, new_N)) # Update parents new_root = np.where(keep_inds == root)[0] parents = np.zeros(np.shape(keep_inds)[0], np.shape(keep_inds)[0]) parents[:new_root - 1, new_root:] = new_non_root_parents # Update depths depths = depths[keep_inds] depths = depths/2. # Store new tree Gtemp = graphs.Graph(new_W, coords=Gs[lev].coords[keep_inds], limits=G.limits, root=new_root) #Gs[lev].copy_graph_attributes(Gtemp, False) if compute_full_eigen: Gs[lev + 1].compute_fourier_basis() # Replace current adjacency matrix and root Gs.append(Gtemp) old_W = new_W root = new_root return Gs, subsampled_vertex_indices
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close_all(): r"""Close all opened windows."""
# Windows can be closed by releasing all references to them so they can be # garbage collected. May not be necessary to call close(). global _qtg_windows for window in _qtg_windows: window.close() _qtg_windows = [] global _qtg_widgets for widget in _qtg_widgets: widget.close() _qtg_widgets = [] global _plt_figures for fig in _plt_figures: _, plt, _ = _import_plt() plt.close(fig) _plt_figures = []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _plot_filter(filters, n, eigenvalues, sum, title, ax, **kwargs): r"""Plot the spectral response of a filter bank. Parameters n : int Number of points where the filters are evaluated. eigenvalues : boolean Whether to show the eigenvalues of the graph Laplacian. The eigenvalues should have been computed with :meth:`~pygsp.graphs.Graph.compute_fourier_basis`. By default, the eigenvalues are shown if they are available. sum : boolean Whether to plot the sum of the squared magnitudes of the filters. Default True if there is multiple filters. title : str Title of the figure. ax : :class:`matplotlib.axes.Axes` Axes where to draw the graph. Optional, created if not passed. Only available with the matplotlib backend. kwargs : dict Additional parameters passed to the matplotlib plot function. Useful for example to change the linewidth, linestyle, or set a label. Only available with the matplotlib backend. Returns ------- fig : :class:`matplotlib.figure.Figure` The figure the plot belongs to. Only with the matplotlib backend. ax : :class:`matplotlib.axes.Axes` The axes the plot belongs to. Only with the matplotlib backend. Notes ----- This function is only implemented for the matplotlib backend at the moment. Examples -------- """
if eigenvalues is None: eigenvalues = (filters.G._e is not None) if sum is None: sum = filters.n_filters > 1 if title is None: title = repr(filters) return _plt_plot_filter(filters, n=n, eigenvalues=eigenvalues, sum=sum, title=title, ax=ax, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _plot_spectrogram(G, node_idx): r"""Plot the graph's spectrogram. Parameters node_idx : ndarray Order to sort the nodes in the spectrogram. By default, does not reorder the nodes. Notes ----- This function is only implemented for the pyqtgraph backend at the moment. Examples -------- """
from pygsp import features qtg, _, _ = _import_qtg() if not hasattr(G, 'spectr'): features.compute_spectrogram(G) M = G.spectr.shape[1] spectr = G.spectr[node_idx, :] if node_idx is not None else G.spectr spectr = np.ravel(spectr) min_spec, max_spec = spectr.min(), spectr.max() pos = np.array([0., 0.25, 0.5, 0.75, 1.]) color = [[20, 133, 212, 255], [53, 42, 135, 255], [48, 174, 170, 255], [210, 184, 87, 255], [249, 251, 14, 255]] color = np.array(color, dtype=np.ubyte) cmap = qtg.ColorMap(pos, color) spectr = (spectr.astype(float) - min_spec) / (max_spec - min_spec) w = qtg.GraphicsWindow() w.setWindowTitle("Spectrogram of {}".format(G.__repr__(limit=4))) label = 'frequencies {}:{:.2f}:{:.2f}'.format(0, G.lmax/M, G.lmax) v = w.addPlot(labels={'bottom': 'nodes', 'left': label}) v.setAspectLocked() spi = qtg.ScatterPlotItem(np.repeat(np.arange(G.N), M), np.ravel(np.tile(np.arange(M), (1, G.N))), pxMode=False, symbol='s', size=1, brush=cmap.map(spectr, 'qcolor')) v.addItem(spi) global _qtg_windows _qtg_windows.append(w)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def classification_tikhonov(G, y, M, tau=0): r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X otherwise, where :math:`X` and :math:`Y` are logits. The function returns the maximum of the logits. Parameters G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- logits : array, length G.n_vertices The logits :math:`X`. Examples -------- Create a ground truth signal: Construct a measurement signal from a binary mask: Solve the classification problem by reconstructing the signal: Plot the results. Note that we recover the class with ``np.argmax(recovery, axis=1)``. """
y[M == False] = 0 Y = _to_logits(y.astype(np.int)) return regression_tikhonov(G, Y, M, tau)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def regression_tikhonov(G, y, M, tau=0): r"""Solve a regression problem on graph via Tikhonov minimization. The function solves .. math:: \operatorname*{arg min}_x \| M x - y \|_2^2 + \tau \ x^T L x if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_x x^T L x \ \text{ s. t. } \ y = M x otherwise. Parameters G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- x : array, length G.n_vertices Recovered values :math:`x`. Examples -------- Create a smooth ground truth signal: Construct a measurement signal from a binary mask: Solve the regression problem by reconstructing the signal: Plot the results: """
if tau > 0: y[M == False] = 0 if sparse.issparse(G.L): def Op(x): return (M * x.T).T + tau * (G.L.dot(x)) LinearOp = sparse.linalg.LinearOperator([G.N, G.N], Op) if y.ndim > 1: sol = np.empty(shape=y.shape) res = np.empty(shape=y.shape[1]) for i in range(y.shape[1]): sol[:, i], res[i] = sparse.linalg.cg( LinearOp, y[:, i]) else: sol, res = sparse.linalg.cg(LinearOp, y) # TODO: do something with the residual... return sol else: # Creating this matrix may be problematic in term of memory. # Consider using an operator instead... if type(G.L).__module__ == np.__name__: LinearOp = np.diag(M*1) + tau * G.L return np.linalg.solve(LinearOp, M * y) else: if np.prod(M.shape) != G.n_vertices: raise ValueError("M should be of size [G.n_vertices,]") indl = M indu = (M == False) Luu = G.L[indu, :][:, indu] Wul = - G.L[indu, :][:, indl] if sparse.issparse(G.L): sol_part = sparse.linalg.spsolve(Luu, Wul.dot(y[indl])) else: sol_part = np.linalg.solve(Luu, np.matmul(Wul, y[indl])) sol = y.copy() sol[indu] = sol_part return sol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_signal(self, signal, name): r"""Attach a signal to the graph. Attached signals can be accessed (and modified or deleted) through the :attr:`signals` dictionary. Parameters signal : array_like A sequence that assigns a value to each vertex. The value of the signal at vertex `i` is ``signal[i]``. name : String Name of the signal used as a key in the :attr:`signals` dictionary. Examples -------- {'mysignal': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])} """
signal = self._check_signal(signal) self.signals[name] = signal
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subgraph(self, vertices): r"""Create a subgraph from a list of vertices. Parameters vertices : list Vertices to keep. Either a list of indices or an indicator function. Returns ------- subgraph : :class:`Graph` Subgraph. Examples -------- array([[0., 0., 3.], [0., 0., 4.], [3., 4., 0.]]) """
adjacency = self.W[vertices, :][:, vertices] try: coords = self.coords[vertices] except AttributeError: coords = None graph = Graph(adjacency, self.lap_type, coords, self.plotting) for name, signal in self.signals.items(): graph.set_signal(signal[vertices], name) return graph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_components(self): r"""Split the graph into connected components. See :func:`is_connected` for the method used to determine connectedness. Returns ------- graphs : list A list of graph structures. Each having its own node list and weight matrix. If the graph is directed, add into the info parameter the information about the source nodes and the sink nodes. Examples -------- """
if self.A.shape[0] != self.A.shape[1]: self.logger.error('Inconsistent shape to extract components. ' 'Square matrix required.') return None if self.is_directed(): raise NotImplementedError('Directed graphs not supported yet.') graphs = [] visited = np.zeros(self.A.shape[0], dtype=bool) # indices = [] # Assigned but never used while not visited.all(): # pick a node not visted yet stack = set(np.nonzero(~visited)[0][[0]]) comp = [] while len(stack): v = stack.pop() if not visited[v]: comp.append(v) visited[v] = True # Add indices of nodes not visited yet and accessible from # v stack.update(set([idx for idx in self.A[v, :].nonzero()[1] if not visited[idx]])) comp = sorted(comp) self.logger.info(('Constructing subgraph for component of ' 'size {}.').format(len(comp))) G = self.subgraph(comp) G.info = {'orig_idx': comp} graphs.append(G) return graphs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_laplacian(self, lap_type='combinatorial'): r"""Compute a graph Laplacian. For undirected graphs, the combinatorial Laplacian is defined as .. math:: L = D - W, where :math:`W` is the weighted adjacency matrix and :math:`D` the weighted degree matrix. The normalized Laplacian is defined as .. math:: L = I - D^{-1/2} W D^{-1/2}, where :math:`I` is the identity matrix. For directed graphs, the Laplacians are built from a symmetrized version of the weighted adjacency matrix that is the average of the weighted adjacency matrix and its transpose. As the Laplacian is defined as the divergence of the gradient, it is not affected by the orientation of the edges. For both Laplacians, the diagonal entries corresponding to disconnected nodes (i.e., nodes with degree zero) are set to zero. Once computed, the Laplacian is accessible by the attribute :attr:`L`. Parameters lap_type : {'combinatorial', 'normalized'} The kind of Laplacian to compute. Default is combinatorial. Examples -------- Combinatorial and normalized Laplacians of an undirected graph. array([[ 2., -2., 0.], [-2., 3., -1.], [ 0., -1., 1.]]) array([[ 1. , -0.81649658, 0. ], [-0.81649658, 1. , -0.57735027], [ 0. , -0.57735027, 1. ]]) Combinatorial and normalized Laplacians of a directed graph. array([[ 2. , -2. , 0. ], [-2. , 2.5, -0.5], [ 0. , -0.5, 0.5]]) array([[ 1. , -0.89442719, 0. ], [-0.89442719, 1. , -0.4472136 ], [ 0. , -0.4472136 , 1. ]]) The Laplacian is defined as the divergence of the gradient. See :meth:`compute_differential_operator` for details. True The Laplacians have a bounded spectrum. True True """
if lap_type != self.lap_type: # Those attributes are invalidated when the Laplacian is changed. # Alternative: don't allow the user to change the Laplacian. self._lmax = None self._U = None self._e = None self._coherence = None self._D = None self.lap_type = lap_type if not self.is_directed(): W = self.W else: W = utils.symmetrize(self.W, method='average') if lap_type == 'combinatorial': D = sparse.diags(self.dw) self.L = D - W elif lap_type == 'normalized': d = np.zeros(self.n_vertices) disconnected = (self.dw == 0) np.power(self.dw, -0.5, where=~disconnected, out=d) D = sparse.diags(d) self.L = sparse.identity(self.n_vertices) - D * W * D self.L[disconnected, disconnected] = 0 self.L.eliminate_zeros() else: raise ValueError('Unknown Laplacian type {}'.format(lap_type))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_signal(self, s): r"""Check if signal is valid."""
s = np.asanyarray(s) if s.shape[0] != self.n_vertices: raise ValueError('First dimension must be the number of vertices ' 'G.N = {}, got {}.'.format(self.N, s.shape)) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dirichlet_energy(self, x): r"""Compute the Dirichlet energy of a signal defined on the vertices. The Dirichlet energy of a signal :math:`x` is defined as .. math:: x^\top L x = \| \nabla_\mathcal{G} x \|_2^2 = \frac12 \sum_{i,j} W[i, j] (x[j] - x[i])^2 for the combinatorial Laplacian, and .. math:: x^\top L x = \| \nabla_\mathcal{G} x \|_2^2 = \frac12 \sum_{i,j} W[i, j] \left( \frac{x[j]}{d[j]} - \frac{x[i]}{d[i]} \right)^2 for the normalized Laplacian, where :math:`d` is the weighted degree :attr:`dw`, :math:`\nabla_\mathcal{G} x = D^\top x` and :math:`D` is the differential operator :attr:`D`. See :meth:`grad` for the definition of the gradient :math:`\nabla_\mathcal{G}`. Parameters x : array_like Signal of length :attr:`n_vertices` living on the vertices. Returns ------- energy : float The Dirichlet energy of the graph signal. See Also -------- grad : compute the gradient of a vertex signal Examples -------- Non-directed graph: 8.0 array([2., 0., 2., 0.]) Directed graph: 4.0 array([1.41421356, 0. , 1.41421356, 0. ]) """
x = self._check_signal(x) return x.T.dot(self.L.dot(x))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dw(self): r"""The weighted degree of vertices. For undirected graphs, the weighted degree of the vertex :math:`v_i` is defined as .. math:: d[i] = \sum_j W[j, i] = \sum_j W[i, j], where :math:`W` is the weighted adjacency matrix :attr:`W`. For directed graphs, the weighted degree of the vertex :math:`v_i` is defined as .. math:: d[i] = \frac12 (d^\text{in}[i] + d^\text{out}[i]) = \frac12 (\sum_j W[j, i] + \sum_j W[i, j]), i.e., as the average of the in and out degrees. Examples -------- Undirected graph: [1 2 1] [1 3 2] Directed graph: [0.5 1.5 1. ] [0.5 2.5 2. ] """
if self._dw is None: if not self.is_directed(): # Shortcut for undirected graphs. self._dw = np.ravel(self.W.sum(axis=0)) else: degree_in = np.ravel(self.W.sum(axis=0)) degree_out = np.ravel(self.W.sum(axis=1)) self._dw = (degree_in + degree_out) / 2 return self._dw
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lmax(self): r"""Largest eigenvalue of the graph Laplacian. Can be exactly computed by :func:`compute_fourier_basis` or approximated by :func:`estimate_lmax`. """
if self._lmax is None: self.logger.warning('The largest eigenvalue G.lmax is not ' 'available, we need to estimate it. ' 'Explicitly call G.estimate_lmax() or ' 'G.compute_fourier_basis() ' 'once beforehand to suppress the warning.') self.estimate_lmax() return self._lmax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_upper_bound(self): r"""Return an upper bound on the eigenvalues of the Laplacian."""
if self.lap_type == 'normalized': return 2 # Equal iff the graph is bipartite. elif self.lap_type == 'combinatorial': bounds = [] # Equal for full graphs. bounds += [self.n_vertices * np.max(self.W)] # Gershgorin circle theorem. Equal for regular bipartite graphs. # Special case of the below bound. bounds += [2 * np.max(self.dw)] # Anderson, Morley, Eigenvalues of the Laplacian of a graph. # Equal for regular bipartite graphs. if self.n_edges > 0: sources, targets, _ = self.get_edge_list() bounds += [np.max(self.dw[sources] + self.dw[targets])] # Merris, A note on Laplacian graph eigenvalues. if not self.is_directed(): W = self.W else: W = utils.symmetrize(self.W, method='average') m = W.dot(self.dw) / self.dw # Mean degree of adjacent vertices. bounds += [np.max(self.dw + m)] # Good review: On upper bounds for Laplacian graph eigenvalues. return min(bounds) else: raise ValueError('Unknown Laplacian type ' '{}'.format(self.lap_type))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_edge_list(self): r"""Return an edge list, an alternative representation of the graph. Each edge :math:`e_k = (v_i, v_j) \in \mathcal{E}` from :math:`v_i` to :math:`v_j` is associated with the weight :math:`W[i, j]`. For each edge :math:`e_k`, the method returns :math:`(i, j, W[i, j])` as `(sources[k], targets[k], weights[k])`, with :math:`i \in [0, |\mathcal{V}|-1], j \in [0, |\mathcal{V}|-1], k \in [0, |\mathcal{E}|-1]`. Returns ------- sources : vector of int Source node indices. targets : vector of int Target node indices. weights : vector of float Edge weights. Notes ----- The weighted adjacency matrix is the canonical form used in this package to represent a graph as it is the easiest to work with when considering spectral methods. Edge orientation (i.e., which node is the source or the target) is arbitrary for undirected graphs. The implementation uses the upper triangular part of the adjacency matrix, hence :math:`i \leq j \ \forall k`. Examples -------- Edge list of a directed graph. ([0, 1, 1], [1, 0, 2], [3, 3, 4]) Edge list of an undirected graph. ([0, 1], [1, 2], [3, 4]) """
if self.is_directed(): W = self.W.tocoo() else: W = sparse.triu(self.W, format='coo') sources = W.row targets = W.col weights = W.data assert self.n_edges == sources.size == targets.size == weights.size return sources, targets, weights
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prox_tv(x, gamma, G, A=None, At=None, nu=1, tol=10e-4, maxit=200, use_matrix=True): r""" Total Variation proximal operator for graphs. This function computes the TV proximal operator for graphs. The TV norm is the one norm of the gradient. The gradient is defined in the function :meth:`pygsp.graphs.Graph.grad`. This function requires the PyUNLocBoX to be executed. This function solves: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \|x\|_{TV}` Parameters x: int Input signal gamma: ndarray Regularization parameter G: graph object Graphs structure A: lambda function Forward operator, this parameter allows to solve the following problem: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \| A x\|_{TV}` (default = Id) At: lambda function Adjoint operator. (default = Id) nu: float Bound on the norm of the operator (default = 1) tol: float Stops criterion for the loop. The algorithm will stop if : :math:`\frac{n(t) - n(t - 1)} {n(t)} < tol` where :math:`n(t) = f(x) + 0.5 \|x-y\|_2^2` is the objective function at iteration :math:`t` (default = :math:`10e-4`) maxit: int Maximum iteration. (default = 200) use_matrix: bool If a matrix should be used. (default = True) Returns ------- sol: solution Examples -------- """
if A is None: def A(x): return x if At is None: def At(x): return x tight = 0 l1_nu = 2 * G.lmax * nu if use_matrix: def l1_a(x): return G.Diff * A(x) def l1_at(x): return G.Diff * At(D.T * x) else: def l1_a(x): return G.grad(A(x)) def l1_at(x): return G.div(x) functions, _ = _import_pyunlocbox() functions.norm_l1(x, gamma, A=l1_a, At=l1_at, tight=tight, maxit=maxit, verbose=verbose, tol=tol)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_regular(self): r""" Troubleshoot a given regular graph. """
warn = False msg = 'The given matrix' # check symmetry if np.abs(self.A - self.A.T).sum() > 0: warn = True msg = '{} is not symmetric,'.format(msg) # check parallel edged if self.A.max(axis=None) > 1: warn = True msg = '{} has parallel edges,'.format(msg) # check that d is d-regular if np.min(self.d) != np.max(self.d): warn = True msg = '{} is not d-regular,'.format(msg) # check that g doesn't contain any self-loop if self.A.diagonal().any(): warn = True msg = '{} has self loop.'.format(msg) if warn: self.logger.warning('{}.'.format(msg[:-1]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _break_signals(self): r"""Break N-dimensional signals into N 1D signals."""
for name in list(self.signals.keys()): if self.signals[name].ndim == 2: for i, signal_1d in enumerate(self.signals[name].T): self.signals[name + '_' + str(i)] = signal_1d del self.signals[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _join_signals(self): r"""Join N 1D signals into one N-dimensional signal."""
joined = dict() for name in self.signals: name_base = name.rsplit('_', 1)[0] names = joined.get(name_base, list()) names.append(name) joined[name_base] = names for name_base, names in joined.items(): if len(names) > 1: names = sorted(names) # ensure dim ordering (_0, _1, etc.) signal_nd = np.stack([self.signals[n] for n in names], axis=1) self.signals[name_base] = signal_nd for name in names: del self.signals[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_networkx(self): r"""Export the graph to NetworkX. Edge weights are stored as an edge attribute, under the name "weight". Signals are stored as node attributes, under their name in the :attr:`signals` dictionary. `N`-dimensional signals are broken into `N` 1-dimensional signals. They will eventually be joined back together on import. Returns ------- graph : :class:`networkx.Graph` A NetworkX graph object. See Also -------- to_graphtool : export to graph-tool save : save to a file Examples -------- Name: Path Type: DiGraph Number of nodes: 4 Number of edges: 3 Average in degree: 0.7500 Average out degree: 0.7500 True NodeView((0, 1, 2, 3)) OutEdgeView([(0, 1), (1, 2), (2, 3)]) {'signal': 2.3} {'weight': 1.0} Another common goal is to use NetworkX to compute some properties to be be imported back in the PyGSP as signals. """
nx = _import_networkx() def convert(number): # NetworkX accepts arbitrary python objects as attributes, but: # * the GEXF writer does not accept any NumPy types (on signals), # * the GraphML writer does not accept NumPy ints. if issubclass(number.dtype.type, (np.integer, np.bool_)): return int(number) else: return float(number) def edges(): for source, target, weight in zip(*self.get_edge_list()): yield int(source), int(target), {'weight': convert(weight)} def nodes(): for vertex in range(self.n_vertices): signals = {name: convert(signal[vertex]) for name, signal in self.signals.items()} yield vertex, signals self._break_signals() graph = nx.DiGraph() if self.is_directed() else nx.Graph() graph.add_nodes_from(nodes()) graph.add_edges_from(edges()) graph.name = self.__class__.__name__ return graph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_graphtool(self): r"""Export the graph to graph-tool. Edge weights are stored as an edge property map, under the name "weight". Signals are stored as vertex property maps, under their name in the :attr:`signals` dictionary. `N`-dimensional signals are broken into `N` 1-dimensional signals. They will eventually be joined back together on import. Returns ------- graph : :class:`graph_tool.Graph` A graph-tool graph object. See Also -------- to_networkx : export to NetworkX save : save to a file Examples -------- True 2.3 1.0 Another common goal is to use graph-tool to compute some properties to be imported back in the PyGSP as signals. """
# See gt.value_types() for the list of accepted types. # See the definition of _type_alias() for a list of aliases. # Mapping from https://docs.scipy.org/doc/numpy/user/basics.types.html. convert = { np.bool_: 'bool', np.int8: 'int8_t', np.int16: 'int16_t', np.int32: 'int32_t', np.int64: 'int64_t', np.short: 'short', np.intc: 'int', np.uintc: 'unsigned int', np.long: 'long', np.longlong: 'long long', np.uint: 'unsigned long', np.single: 'float', np.double: 'double', np.longdouble: 'long double', } gt = _import_graphtool() graph = gt.Graph(directed=self.is_directed()) sources, targets, weights = self.get_edge_list() graph.add_edge_list(np.asarray((sources, targets)).T) try: dtype = convert[weights.dtype.type] except KeyError: raise TypeError("Type {} of the edge weights is not supported." .format(weights.dtype)) prop = graph.new_edge_property(dtype) prop.get_array()[:] = weights graph.edge_properties['weight'] = prop self._break_signals() for name, signal in self.signals.items(): try: dtype = convert[signal.dtype.type] except KeyError: raise TypeError("Type {} of signal {} is not supported." .format(signal.dtype, name)) prop = graph.new_vertex_property(dtype) prop.get_array()[:] = signal graph.vertex_properties[name] = prop return graph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_networkx(cls, graph, weight='weight'): r"""Import a graph from NetworkX. Edge weights are retrieved as an edge attribute, under the name specified by the ``weight`` parameter. Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters graph : :class:`networkx.Graph` A NetworkX graph object. weight : string or None, optional The edge attribute that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- The nodes are ordered according to :meth:`networkx.Graph.nodes`. In NetworkX, node attributes need not be set for every node. If a node attribute is not set for a node, a NaN is assigned to the corresponding signal for that node. If the graph is a :class:`networkx.MultiGraph`, multiedges are aggregated by summation. See Also -------- from_graphtool : import from graph-tool load : load from a file Examples -------- NodeView((1, 2, 3, 4)) array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) {'sig': array([ nan, nan, nan, 3.1416])} """
nx = _import_networkx() from .graph import Graph adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight) graph_pg = Graph(adjacency) for i, node in enumerate(graph.nodes()): for name in graph.nodes[node].keys(): try: signal = graph_pg.signals[name] except KeyError: signal = np.full(graph_pg.n_vertices, np.nan) graph_pg.set_signal(signal, name) try: signal[i] = graph.nodes[node][name] except KeyError: pass # attribute not set for node graph_pg._join_signals() return graph_pg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_graphtool(cls, graph, weight='weight'): r"""Import a graph from graph-tool. Edge weights are retrieved as an edge property, under the name specified by the ``weight`` parameter. Signals are retrieved from node properties, and stored in the :attr:`signals` dictionary under the property name. `N`-dimensional signals that were broken during export are joined. Parameters graph : :class:`graph_tool.Graph` A graph-tool graph object. weight : string The edge property that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- If the graph has multiple edge connecting the same two nodes, a sum over the edges is taken to merge them. See Also -------- from_networkx : import from NetworkX load : load from a file Examples -------- array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) {'sig': PropertyArray([ nan, nan, nan, 3.1416])} """
gt = _import_graphtool() import graph_tool.spectral from .graph import Graph weight = graph.edge_properties.get(weight, None) adjacency = gt.spectral.adjacency(graph, weight=weight) graph_pg = Graph(adjacency.T) for name, signal in graph.vertex_properties.items(): graph_pg.set_signal(signal.get_array(), name) graph_pg._join_signals() return graph_pg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(cls, path, fmt=None, backend=None): r"""Load a graph from a file. Edge weights are retrieved as an edge attribute named "weight". Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters path : string Path to the file from which to load the graph. fmt : {'graphml', 'gml', 'gexf', None}, optional Format in which the graph is saved. Guessed from the filename extension if None. backend : {'networkx', 'graph-tool', None}, optional Library used to load the graph. Automatically chosen if None. Returns ------- graph : :class:`Graph` The loaded graph. See Also -------- save : save a graph to a file from_networkx : load with NetworkX then import in the PyGSP from_graphtool : load with graph-tool then import in the PyGSP Notes ----- A lossless round-trip is only guaranteed if the graph (and its signals) is saved and loaded with the same backend. Loading from other formats is possible by loading in NetworkX or graph-tool, and importing to the PyGSP. The proposed formats are however tested for faithful round-trips. Examples -------- """
if fmt is None: fmt = os.path.splitext(path)[1][1:] if fmt not in ['graphml', 'gml', 'gexf']: raise ValueError('Unsupported format {}.'.format(fmt)) def load_networkx(path, fmt): nx = _import_networkx() load = getattr(nx, 'read_' + fmt) graph = load(path) return cls.from_networkx(graph) def load_graphtool(path, fmt): gt = _import_graphtool() graph = gt.load_graph(path, fmt=fmt) return cls.from_graphtool(graph) if backend == 'networkx': return load_networkx(path, fmt) elif backend == 'graph-tool': return load_graphtool(path, fmt) elif backend is None: try: return load_networkx(path, fmt) except ImportError: try: return load_graphtool(path, fmt) except ImportError: raise ImportError('Cannot import networkx nor graph-tool.') else: raise ValueError('Unknown backend {}.'.format(backend))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, path, fmt=None, backend=None): r"""Save the graph to a file. Edge weights are stored as an edge attribute, under the name "weight". Signals are stored as node attributes, under their name in the :attr:`signals` dictionary. `N`-dimensional signals are broken into `N` 1-dimensional signals. They will eventually be joined back together on import. Supported formats are: * GraphML_, a comprehensive XML format. `Wikipedia <https://en.wikipedia.org/wiki/GraphML>`_. Supported by NetworkX_, graph-tool_, NetworKit_, igraph_, Gephi_, Cytoscape_, SocNetV_. * GML_ (Graph Modelling Language), a simple non-XML format. `Wikipedia <https://wikipedia.org/wiki/Graph_Modelling_Language>`_. Supported by NetworkX_, graph-tool_, NetworKit_, igraph_, Gephi_, Cytoscape_, SocNetV_, Tulip_. * GEXF_ (Graph Exchange XML Format), Gephi's XML format. Supported by NetworkX_, NetworKit_, Gephi_, Tulip_, ngraph_. If unsure, we recommend GraphML_. .. _GraphML: http://graphml.graphdrawing.org .. _GML: http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html .. _GEXF: https://gephi.org/gexf/format .. _NetworkX: https://networkx.github.io .. _graph-tool: https://graph-tool.skewed.de .. _NetworKit: https://networkit.github.io .. _igraph: https://igraph.org .. _ngraph: https://github.com/anvaka/ngraph .. _Gephi: https://gephi.org .. _Cytoscape: https://cytoscape.org .. _SocNetV: https://socnetv.org .. _Tulip: http://tulip.labri.fr Parameters path : string Path to the file where the graph is to be saved. fmt : {'graphml', 'gml', 'gexf', None}, optional Format in which to save the graph. Guessed from the filename extension if None. backend : {'networkx', 'graph-tool', None}, optional Library used to load the graph. Automatically chosen if None. See Also -------- load : load a graph from a file to_networkx : export as a NetworkX graph, and save with NetworkX to_graphtool : export as a graph-tool graph, and save with graph-tool Notes ----- A lossless round-trip is only guaranteed if the graph (and its signals) is saved and loaded with the same backend. Saving in other formats is possible by exporting to NetworkX or graph-tool, and using their respective saving functionality. The proposed formats are however tested for faithful round-trips. Edge weights and signal values are rounded at the sixth decimal when saving in ``fmt='gml'`` with ``backend='graph-tool'``. Examples -------- """
if fmt is None: fmt = os.path.splitext(path)[1][1:] if fmt not in ['graphml', 'gml', 'gexf']: raise ValueError('Unsupported format {}.'.format(fmt)) def save_networkx(graph, path, fmt): nx = _import_networkx() graph = graph.to_networkx() save = getattr(nx, 'write_' + fmt) save(graph, path) def save_graphtool(graph, path, fmt): graph = graph.to_graphtool() graph.save(path, fmt=fmt) if backend == 'networkx': save_networkx(self, path, fmt) elif backend == 'graph-tool': save_graphtool(self, path, fmt) elif backend is None: try: save_networkx(self, path, fmt) except ImportError: try: save_graphtool(self, path, fmt) except ImportError: raise ImportError('Cannot import networkx nor graph-tool.') else: raise ValueError('Unknown backend {}.'.format(backend))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loadmat(path): r""" Load a matlab data file. Parameters path : string Path to the mat file from the data folder, without the .mat extension. Returns ------- data : dict dictionary with variable names as keys, and loaded matrices as values. Examples -------- (2503, 3) """
data = pkgutil.get_data('pygsp', 'data/' + path + '.mat') data = io.BytesIO(data) return scipy.io.loadmat(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distanz(x, y=None): r""" Calculate the distance between two colon vectors. Parameters x : ndarray First colon vector y : ndarray Second colon vector Returns ------- d : ndarray Distance between x and y Examples -------- array([[0., 1., 2.], [1., 0., 1.], [2., 1., 0.]]) """
try: x.shape[1] except IndexError: x = x.reshape(1, x.shape[0]) if y is None: y = x else: try: y.shape[1] except IndexError: y = y.reshape(1, y.shape[0]) rx, cx = x.shape ry, cy = y.shape # Size verification if rx != ry: raise ValueError("The sizes of x and y do not fit") xx = (x * x).sum(axis=0) yy = (y * y).sum(axis=0) xy = np.dot(x.T, y) d = abs(np.kron(np.ones((cy, 1)), xx).T + np.kron(np.ones((cx, 1)), yy) - 2 * xy) return np.sqrt(d)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resistance_distance(G): r""" Compute the resistance distances of a graph. Parameters G : Graph or sparse matrix Graph structure or Laplacian matrix (L) Returns ------- rd : sparse matrix distance matrix References :cite:`klein1993resistance` """
if sparse.issparse(G): L = G.tocsc() else: if G.lap_type != 'combinatorial': raise ValueError('Need a combinatorial Laplacian.') L = G.L.tocsc() try: pseudo = sparse.linalg.inv(L) except RuntimeError: pseudo = sparse.lil_matrix(np.linalg.pinv(L.toarray())) N = np.shape(L)[0] d = sparse.csc_matrix(pseudo.diagonal()) rd = sparse.kron(d, sparse.csc_matrix(np.ones((N, 1)))).T \ + sparse.kron(d, sparse.csc_matrix(np.ones((N, 1)))) \ - pseudo - pseudo.T return rd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def symmetrize(W, method='average'): r""" Symmetrize a square matrix. Parameters W : array_like Square matrix to be symmetrized method : string * 'average' : symmetrize by averaging with the transpose. Most useful when transforming a directed graph to an undirected one. * 'maximum' : symmetrize by taking the maximum with the transpose. Similar to 'fill' except that ambiguous entries are resolved by taking the largest value. * 'fill' : symmetrize by filling in the zeros in both the upper and lower triangular parts. Ambiguous entries are resolved by averaging the values. * 'tril' : symmetrize by considering the lower triangular part only. * 'triu' : symmetrize by considering the upper triangular part only. Notes ----- You can have the sum by multiplying the average by two. It is however not a good candidate for this function as it modifies an already symmetric matrix. Examples -------- array([[0., 3., 0.], [3., 1., 6.], [4., 2., 3.]]) array([[0., 3., 2.], [3., 1., 4.], [2., 4., 3.]]) array([[0., 6., 4.], [6., 2., 8.], [4., 8., 6.]]) array([[0., 3., 4.], [3., 1., 6.], [4., 6., 3.]]) array([[0., 3., 4.], [3., 1., 4.], [4., 4., 3.]]) array([[0., 3., 4.], [3., 1., 2.], [4., 2., 3.]]) array([[0., 3., 0.], [3., 1., 6.], [0., 6., 3.]]) """
if W.shape[0] != W.shape[1]: raise ValueError('Matrix must be square.') if method == 'average': return (W + W.T) / 2 elif method == 'maximum': if sparse.issparse(W): bigger = (W.T > W) return W - W.multiply(bigger) + W.T.multiply(bigger) else: return np.maximum(W, W.T) elif method == 'fill': A = (W > 0) # Boolean type. if sparse.issparse(W): mask = (A + A.T) - A W = W + mask.multiply(W.T) else: # Numpy boolean subtract is deprecated. mask = np.logical_xor(np.logical_or(A, A.T), A) W = W + mask * W.T return symmetrize(W, method='average') # Resolve ambiguous entries. elif method in ['tril', 'triu']: if sparse.issparse(W): tri = getattr(sparse, method) else: tri = getattr(np, method) W = tri(W) return symmetrize(W, method='maximum') else: raise ValueError('Unknown symmetrization method {}.'.format(method))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_log_scales(lmin, lmax, Nscales, t1=1, t2=2): r""" Compute logarithm scales for wavelets. Parameters lmin : float Smallest non-zero eigenvalue. lmax : float Largest eigenvalue, i.e. :py:attr:`pygsp.graphs.Graph.lmax`. Nscales : int Number of scales. Returns ------- scales : ndarray List of scales of length Nscales. Examples -------- array([2. , 0.4472136, 0.1 ]) """
scale_min = t1 / lmax scale_max = t2 / lmin return np.exp(np.linspace(np.log(scale_max), np.log(scale_min), Nscales))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_modules(names, src, dst): """Import modules in package."""
for name in names: module = importlib.import_module(src + '.' + name) setattr(sys.modules[dst], name, module)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_classes(names, src, dst): """Import classes in package from their implementation modules."""
for name in names: module = importlib.import_module('pygsp.' + src + '.' + name.lower()) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_functions(names, src, dst): """Import functions in package from their implementation modules."""
for name in names: module = importlib.import_module('pygsp.' + src) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_api_result(result: Optional[Dict[str, Any]]) -> Any: """ Retrieve 'data' field from the API result object. :param result: API result that received from HTTP API :return: the 'data' field in result object :raise ActionFailed: the 'status' field is 'failed' """
if isinstance(result, dict): if result.get('status') == 'failed': raise ActionFailed(retcode=result.get('retcode')) return result.get('data')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reduce(self) -> None: """ Remove redundant segments. Since this class is implemented based on list, this method may require O(n) time. """
idx = 0 while idx < len(self): if idx > 0 and \ self[idx - 1].type == 'text' and self[idx].type == 'text': self[idx - 1].data['text'] += self[idx].data['text'] del self[idx] else: idx += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_plain_text(self, reduce: bool = False) -> str: """ Extract text segments from the message, joined by single space. :param reduce: reduce the message before extracting :return: the joined string """
if reduce: self.reduce() result = '' for seg in self: if seg.type == 'text': result += ' ' + seg.data['text'] if result: result = result[1:] return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_html_mail(subject, message, message_html, from_email, recipient_list, priority=None, fail_silently=False, auth_user=None, auth_password=None, headers={}): """ Function to queue HTML e-mails """
from django.utils.encoding import force_text from django.core.mail import EmailMultiAlternatives from mailer.models import make_message priority = get_priority(priority) # need to do this in case subject used lazy version of ugettext subject = force_text(subject) message = force_text(message) msg = make_message(subject=subject, body=message, from_email=from_email, to=recipient_list, priority=priority) email = msg.email email = EmailMultiAlternatives( email.subject, email.body, email.from_email, email.to, headers=headers ) email.attach_alternative(message_html, "text/html") msg.email = email msg.save() return 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_message(subject="", body="", from_email=None, to=None, bcc=None, attachments=None, headers=None, priority=None): """ Creates a simple message for the email parameters supplied. The 'to' and 'bcc' lists are filtered using DontSendEntry. If needed, the 'email' attribute can be set to any instance of EmailMessage if e-mails with attachments etc. need to be supported. Call 'save()' on the result when it is ready to be sent, and not before. """
to = filter_recipient_list(to) bcc = filter_recipient_list(bcc) core_msg = EmailMessage( subject=subject, body=body, from_email=from_email, to=to, bcc=bcc, attachments=attachments, headers=headers ) db_msg = Message(priority=priority) db_msg.email = core_msg return db_msg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_address(self, address): """ is the given address on the don't send list? """
queryset = self.filter(to_address__iexact=address) return queryset.exists()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prioritize(): """ Yield the messages in the queue in the order they should be sent. """
while True: hp_qs = Message.objects.high_priority().using('default') mp_qs = Message.objects.medium_priority().using('default') lp_qs = Message.objects.low_priority().using('default') while hp_qs.count() or mp_qs.count(): while hp_qs.count(): for message in hp_qs.order_by("when_added"): yield message while hp_qs.count() == 0 and mp_qs.count(): yield mp_qs.order_by("when_added")[0] while hp_qs.count() == 0 and mp_qs.count() == 0 and lp_qs.count(): yield lp_qs.order_by("when_added")[0] if Message.objects.non_deferred().using('default').count() == 0: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_all(): """ Send all eligible messages in the queue. """
# The actual backend to use for sending, defaulting to the Django default. # To make testing easier this is not stored at module level. EMAIL_BACKEND = getattr( settings, "MAILER_EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend" ) acquired, lock = acquire_lock() if not acquired: return start_time = time.time() deferred = 0 sent = 0 try: connection = None for message in prioritize(): try: if connection is None: connection = get_connection(backend=EMAIL_BACKEND) logging.info("sending message '{0}' to {1}".format( message.subject, ", ".join(message.to_addresses)) ) email = message.email if email is not None: email.connection = connection if not hasattr(email, 'reply_to'): # Compatability fix for EmailMessage objects # pickled when running < Django 1.8 and then # unpickled under Django 1.8 email.reply_to = [] ensure_message_id(email) email.send() # connection can't be stored in the MessageLog email.connection = None message.email = email # For the sake of MessageLog MessageLog.objects.log(message, RESULT_SUCCESS) sent += 1 else: logging.warning("message discarded due to failure in converting from DB. Added on '%s' with priority '%s'" % (message.when_added, message.priority)) # noqa message.delete() except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPDataError, smtplib.SMTPAuthenticationError) as err: message.defer() logging.info("message deferred due to failure: %s" % err) MessageLog.objects.log(message, RESULT_FAILURE, log_message=str(err)) deferred += 1 # Get new connection, it case the connection itself has an error. connection = None # Check if we reached the limits for the current run if _limits_reached(sent, deferred): break _throttle_emails() finally: release_lock(lock) logging.info("") logging.info("%s sent; %s deferred;" % (sent, deferred)) logging.info("done in %.2f seconds" % (time.time() - start_time))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_loop(): """ Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and sending messages if any are on queue. """
while True: while not Message.objects.all(): logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP) time.sleep(EMPTY_QUEUE_SLEEP) send_all()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_name(name): """ import module given by str or pass the module if it is not str """
if isinstance(name, str): components = name.split('.') mod = __import__('.'.join(components[0:-1]), globals(), locals(), [components[-1]] ) return getattr(mod, components[-1]) else: return name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_plan(modeladmin, request, queryset): """ Admin command for duplicating plans preserving quotas and pricings. """
for plan in queryset: plan_copy = deepcopy(plan) plan_copy.id = None plan_copy.available = False plan_copy.default = False plan_copy.created = None plan_copy.save(force_insert=True) for pricing in plan.planpricing_set.all(): pricing.id = None pricing.plan = plan_copy pricing.save(force_insert=True) for quota in plan.planquota_set.all(): quota.id = None quota.plan = plan_copy quota.save(force_insert=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recalculate(self, amount, billing_info): """ Calculates and return pre-filled Order """
order = Order(pk=-1) order.amount = amount order.currency = self.get_currency() country = getattr(billing_info, 'country', None) if not country is None: country = country.code tax_number = getattr(billing_info, 'tax_number', None) # Calculating tax can be complex task (e.g. VIES webservice call) # To ensure that tax calculated on order preview will be the same on final order # tax rate is cached for a given billing data (as this value only depends on it) tax_session_key = "tax_%s_%s" % (tax_number, country) tax = self.request.session.get(tax_session_key) if tax is None: taxation_policy = getattr(settings, 'PLANS_TAXATION_POLICY', None) if not taxation_policy: raise ImproperlyConfigured('PLANS_TAXATION_POLICY is not set') taxation_policy = import_name(taxation_policy) tax = str(taxation_policy.get_tax_rate(tax_number, country)) # Because taxation policy could return None which clutters with saving this value # into cache, we use str() representation of this value self.request.session[tax_session_key] = tax order.tax = Decimal(tax) if tax != 'None' else None return order
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_context(self): """ Retrieves Plan and Pricing for current order creation """
self.plan_pricing = get_object_or_404(PlanPricing.objects.all().select_related('plan', 'pricing'), Q(pk=self.kwargs['pk']) & Q(plan__available=True) & ( Q(plan__customized=self.request.user) | Q( plan__customized__isnull=True))) # User is not allowed to create new order for Plan when he has different Plan # He should use Plan Change View for this kind of action if not self.request.user.userplan.is_expired() and self.request.user.userplan.plan != self.plan_pricing.plan: raise Http404 self.plan = self.plan_pricing.plan self.pricing = self.plan_pricing.pricing
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_proforma_invoice(sender, instance, created, **kwargs): """ For every Order if there are defined billing_data creates invoice proforma, which is an order confirmation document """
if created: Invoice.create(instance, Invoice.INVOICE_TYPES['PROFORMA'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_quota_value(self, user, quota_dict=None): """ Returns quota value for a given user """
if quota_dict is None: quota_dict = get_user_quota(user) return quota_dict.get(self.code, self.default_quota_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_template_email(recipients, title_template, body_template, context, language): """Sends e-mail using templating system"""
send_emails = getattr(settings, 'SEND_PLANS_EMAILS', True) if not send_emails: return site_name = getattr(settings, 'SITE_NAME', 'Please define settings.SITE_NAME') domain = getattr(settings, 'SITE_URL', None) if domain is None: try: Site = apps.get_model('sites', 'Site') current_site = Site.objects.get_current() site_name = current_site.name domain = current_site.domain except LookupError: pass context.update({'site_name': site_name, 'site_domain': domain}) if language is not None: translation.activate(language) mail_title_template = loader.get_template(title_template) mail_body_template = loader.get_template(body_template) title = mail_title_template.render(context) body = mail_body_template.render(context) try: email_from = getattr(settings, 'DEFAULT_FROM_EMAIL') except AttributeError: raise ImproperlyConfigured('DEFAULT_FROM_EMAIL setting needed for sending e-mails') mail.send_mail(title, body, email_from, recipients) if language is not None: translation.deactivate() email_logger.info(u"Email (%s) sent to %s\nTitle: %s\n%s\n\n" % (language, recipients, title, body))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _calculate_day_cost(self, plan, period): """ Finds most fitted plan pricing for a given period, and calculate day cost """
plan_pricings = plan.planpricing_set.order_by('-pricing__period').select_related('pricing') selected_pricing = None for plan_pricing in plan_pricings: selected_pricing = plan_pricing if plan_pricing.pricing.period <= period: break if selected_pricing: return (selected_pricing.price / selected_pricing.pricing.period).quantize(Decimal('1.00')) raise ValueError('Plan %s has no pricings.' % plan)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_change_price(self, plan_old, plan_new, period): """ Calculates total price of plan change. Returns None if no payment is required. """
if period is None or period < 1: return None plan_old_day_cost = self._calculate_day_cost(plan_old, period) plan_new_day_cost = self._calculate_day_cost(plan_new, period) if plan_new_day_cost <= plan_old_day_cost: return self._calculate_final_price(period, None) else: return self._calculate_final_price(period, plan_new_day_cost - plan_old_day_cost)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def comparator(operator): """ Wrap a VersionInfo binary op method in a type-check """
@wraps(operator) def wrapper(self, other): if not isinstance(other, (VersionInfo, dict)): return NotImplemented return operator(self, other) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_sampled_topics(self, sampled_topics): """ Allocate sampled topics to the documents rather than estimate them. Automatically generate term-topic and document-topic matrices. """
assert sampled_topics.dtype == np.int and \ len(sampled_topics.shape) <= 2 if len(sampled_topics.shape) == 1: self.sampled_topics = \ sampled_topics.reshape(1, sampled_topics.shape[0]) else: self.sampled_topics = sampled_topics self.samples = self.sampled_topics.shape[0] self.tt = self.tt_comp(self.sampled_topics) self.dt = self.dt_comp(self.sampled_topics)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dt_comp(self, sampled_topics): """ Compute document-topic matrix from sampled_topics. """
samples = sampled_topics.shape[0] dt = np.zeros((self.D, self.K, samples)) for s in range(samples): dt[:, :, s] = \ samplers_lda.dt_comp(self.docid, sampled_topics[s, :], self.N, self.K, self.D, self.alpha) return dt