text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crps_gaussian(x, mu, sig, grad=False): """ Computes the CRPS of observations x relative to normally distributed forecasts with mean, mu, and standard deviation, sig. CRPS(N(mu, sig^2); x) Formula taken from Equation (5): Calibrated Probablistic Forecasting Using Ensemble Model Output Statistics and Minimum CRPS Estimation. Gneiting, Raftery, Westveld, Goldman. Monthly Weather Review 2004 http://journals.ametsoc.org/doi/pdf/10.1175/MWR2904.1 Parameters x : scalar or np.ndarray The observation or set of observations. mu : scalar or np.ndarray The mean of the forecast normal distribution sig : scalar or np.ndarray The standard deviation of the forecast distribution grad : boolean If True the gradient of the CRPS w.r.t. mu and sig is returned along with the CRPS. Returns ------- crps : scalar or np.ndarray or tuple of The CRPS of each observation x relative to mu and sig. The shape of the output array is determined by numpy broadcasting rules. crps_grad : np.ndarray (optional) If grad=True the gradient of the crps is returned as a numpy array [grad_wrt_mu, grad_wrt_sig]. The same broadcasting rules apply. """
x = np.asarray(x) mu = np.asarray(mu) sig = np.asarray(sig) # standadized x sx = (x - mu) / sig # some precomputations to speed up the gradient pdf = _normpdf(sx) cdf = _normcdf(sx) pi_inv = 1. / np.sqrt(np.pi) # the actual crps crps = sig * (sx * (2 * cdf - 1) + 2 * pdf - pi_inv) if grad: dmu = 1 - 2 * cdf dsig = 2 * pdf - pi_inv return crps, np.array([dmu, dsig]) else: return crps
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _discover_bounds(cdf, tol=1e-7): """ Uses scipy's general continuous distribution methods which compute the ppf from the cdf, then use the ppf to find the lower and upper limits of the distribution. """
class DistFromCDF(stats.distributions.rv_continuous): def cdf(self, x): return cdf(x) dist = DistFromCDF() # the ppf is the inverse cdf lower = dist.ppf(tol) upper = dist.ppf(1. - tol) return lower, upper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _crps_cdf_single(x, cdf_or_dist, xmin=None, xmax=None, tol=1e-6): """ See crps_cdf for docs. """
# TODO: this function is pretty slow. Look for clever ways to speed it up. # allow for directly passing in scipy.stats distribution objects. cdf = getattr(cdf_or_dist, 'cdf', cdf_or_dist) assert callable(cdf) # if bounds aren't given, discover them if xmin is None or xmax is None: # Note that infinite values for xmin and xmax are valid, but # it slows down the resulting quadrature significantly. xmin, xmax = _discover_bounds(cdf) # make sure the bounds haven't clipped the cdf. if (tol is not None) and (cdf(xmin) >= tol) or (cdf(xmax) <= (1. - tol)): raise ValueError('CDF does not meet tolerance requirements at %s ' 'extreme(s)! Consider using function defaults ' 'or using infinities at the bounds. ' % ('lower' if cdf(xmin) >= tol else 'upper')) # CRPS = int_-inf^inf (F(y) - H(x))**2 dy # = int_-inf^x F(y)**2 dy + int_x^inf (1 - F(y))**2 dy def lhs(y): # left hand side of CRPS integral return np.square(cdf(y)) # use quadrature to integrate the lhs lhs_int, lhs_tol = integrate.quad(lhs, xmin, x) # make sure the resulting CRPS will be with tolerance if (tol is not None) and (lhs_tol >= 0.5 * tol): raise ValueError('Lower integral did not evaluate to within tolerance! ' 'Tolerance achieved: %f , Value of integral: %f \n' 'Consider setting the lower bound to -np.inf.' % (lhs_tol, lhs_int)) def rhs(y): # right hand side of CRPS integral return np.square(1. - cdf(y)) rhs_int, rhs_tol = integrate.quad(rhs, x, xmax) # make sure the resulting CRPS will be with tolerance if (tol is not None) and (rhs_tol >= 0.5 * tol): raise ValueError('Upper integral did not evaluate to within tolerance! \n' 'Tolerance achieved: %f , Value of integral: %f \n' 'Consider setting the upper bound to np.inf or if ' 'you already have, set warn_level to `ignore`.' % (rhs_tol, rhs_int)) return lhs_int + rhs_int
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _crps_ensemble_vectorized(observations, forecasts, weights=1): """ An alternative but simpler implementation of CRPS for testing purposes This implementation is based on the identity: .. math:: CRPS(F, x) = E_F|X - x| - 1/2 * E_F|X - X'| where X and X' denote independent random variables drawn from the forecast distribution F, and E_F denotes the expectation value under F. Hence it has runtime O(n^2) instead of O(n log(n)) where n is the number of ensemble members. Reference --------- Tilmann Gneiting and Adrian E. Raftery. Strictly proper scoring rules, prediction, and estimation, 2005. University of Washington Department of Statistics Technical Report no. 463R. https://www.stat.washington.edu/research/reports/2004/tr463R.pdf """
observations = np.asarray(observations) forecasts = np.asarray(forecasts) weights = np.asarray(weights) if weights.ndim > 0: weights = np.where(~np.isnan(forecasts), weights, np.nan) weights = weights / np.nanmean(weights, axis=-1, keepdims=True) if observations.ndim == forecasts.ndim - 1: # sum over the last axis assert observations.shape == forecasts.shape[:-1] observations = observations[..., np.newaxis] with suppress_warnings('Mean of empty slice'): score = np.nanmean(weights * abs(forecasts - observations), -1) # insert new axes along last and second to last forecast dimensions so # forecasts_diff expands with the array broadcasting forecasts_diff = (np.expand_dims(forecasts, -1) - np.expand_dims(forecasts, -2)) weights_matrix = (np.expand_dims(weights, -1) * np.expand_dims(weights, -2)) with suppress_warnings('Mean of empty slice'): score += -0.5 * np.nanmean(weights_matrix * abs(forecasts_diff), axis=(-2, -1)) return score elif observations.ndim == forecasts.ndim: # there is no 'realization' axis to sum over (this is a deterministic # forecast) return abs(observations - forecasts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear(self): """Deletes the history"""
self._points = _np.empty( (self.prealloc,self.dim) ) self._slice_for_run_nr = [] self.memleft = self.prealloc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dispatch(self, request, *args, **kwargs): """Dispatch all HTTP methods to the proxy."""
self.request = DownstreamRequest(request) self.args = args self.kwargs = kwargs self._verify_config() self.middleware = MiddlewareSet(self.proxy_middleware) return self.proxy()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def proxy(self): """Retrieve the upstream content and build an HttpResponse."""
headers = self.request.headers.filter(self.ignored_request_headers) qs = self.request.query_string if self.pass_query_string else '' # Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005 if (self.request.META.get('CONTENT_LENGTH', None) == '' and get_django_version() == '1.10'): del self.request.META['CONTENT_LENGTH'] request_kwargs = self.middleware.process_request( self, self.request, method=self.request.method, url=self.proxy_url, headers=headers, data=self.request.body, params=qs, allow_redirects=False, verify=self.verify_ssl, cert=self.cert, timeout=self.timeout) result = request(**request_kwargs) response = HttpResponse(result.content, status=result.status_code) # Attach forwardable headers to response forwardable_headers = HeaderDict(result.headers).filter( self.ignored_upstream_headers) for header, value in iteritems(forwardable_headers): response[header] = value return self.middleware.process_response( self, self.request, result, response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shell_out(cmd, stderr=STDOUT, cwd=None): """Friendlier version of check_output."""
if cwd is None: from os import getcwd cwd = getcwd() # TODO do I need to normalize this on Windows out = check_output(cmd, cwd=cwd, stderr=stderr, universal_newlines=True) return _clean_output(out)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shell_out_ignore_exitcode(cmd, stderr=STDOUT, cwd=None): """Same as shell_out but doesn't raise if the cmd exits badly."""
try: return shell_out(cmd, stderr=stderr, cwd=cwd) except CalledProcessError as c: return _clean_output(c.output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def from_dir(cwd): "Context manager to ensure in the cwd directory." import os curdir = os.getcwd() try: os.chdir(cwd) yield finally: os.chdir(curdir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def text_filter(regex_base, value): """ A text-filter helper, used in ``markdown_thumbnails``-filter and ``html_thumbnails``-filter. It can be used to build custom thumbnail text-filters. :param regex_base: A string with a regex that contains ``%(captions)s`` and ``%(image)s`` where the caption and image should be. :param value: String of text in which the source URLs can be found. :return: A string ready to be put in a template. """
from thumbnails import get_thumbnail regex = regex_base % { 'caption': '[a-zA-Z0-9\.\,:;/_ \(\)\-\!\?\"]+', 'image': '[a-zA-Z0-9\.:/_\-\% ]+' } images = re.findall(regex, value) for i in images: image_url = i[1] image = get_thumbnail( image_url, **settings.THUMBNAIL_FILTER_OPTIONS ) value = value.replace(i[1], image.url) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eat(self, argv=None): """ Eat the guacamole. :param argv: Command line arguments or None. None means that sys.argv is used :return: Whatever is returned by the first ingredient that agrees to perform the command dispatch. The eat method is called to run the application, as if it was invoked from command line directly. """
# The setup phase, here KeyboardInterrupt is a silent sign to exit the # application. Any error that happens here will result in a raw # backtrace being printed to the user. try: self.context.argv = argv self._added() self._build_early_parser() self._preparse() self._early_init() self._build_parser() self._parse() self._late_init() except KeyboardInterrupt: self._shutdown() return # The execution phase. Here we differentiate SystemExit from all other # exceptions. SystemExit is just re-raised as that's what any piece of # code can raise to ask to exit the currently running application. All # other exceptions are recorded in the context and the failure-path of # the dispatch is followed. In other case, when there are no # exceptions, the success-path is followed. In both cases, ingredients # are shut down. try: return self._dispatch() except SystemExit: raise except BaseException: (self.context.exc_type, self.context.exc_value, self.context.traceback) = sys.exc_info() self._dispatch_failed() else: self._dispatch_succeeded() finally: self._shutdown()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear(self): """Delete the history."""
self.sampler.clear() self.samples_list = self._comm.gather(self.sampler.samples, root=0) if hasattr(self.sampler, 'weights'): self.weights_list = self._comm.gather(self.sampler.weights, root=0) else: self.weights_list = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def path(self, path): """ Creates a path based on the location attribute of the backend and the path argument of the function. If the path argument is an absolute path the path is returned. :param path: The path that should be joined with the backends location. """
if os.path.isabs(path): return path return os.path.join(self.location, path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, eps=1e-4, kill=True, max_steps=50, verbose=False): r"""Perform the clustering on the input components updating the initial guess. The result is available in the member ``self.g``. Return the number of iterations at convergence, or None. :param eps: If relative change of distance between current and last step falls below ``eps``, declare convergence: .. math:: 0 < \frac{d^t - d^{t-1}}{d^t} < \varepsilon :param kill: If a component is assigned zero weight (no input components), it is removed. :param max_steps: Perform a maximum number of update steps. :param verbose: Output information on progress of algorithm. """
old_distance = np.finfo(np.float64).max new_distance = np.finfo(np.float64).max if verbose: print('Starting hierarchical clustering with %d components.' % len(self.g.components)) converged = False for step in range(1, max_steps + 1): self._cleanup(kill, verbose) self._regroup() self._refit() new_distance = self._distance() assert new_distance >= 0, 'Found non-positive distance %d' % new_distance if verbose: print('Distance in step %d: %g' % (step, new_distance)) if new_distance == old_distance: converged = True if verbose: print('Exact minimum found after %d steps' % step) break rel_change = (old_distance - new_distance) / old_distance assert not (rel_change < -1e-13), 'distance increased' if rel_change < eps and not converged and step > 0: converged = True if verbose and new_distance != old_distance: print('Close enough to local minimum after %d steps' % step) break # save distance for comparison in next step old_distance = new_distance self._cleanup(kill, verbose) if verbose: print('%d components remain.' % len(self.g.components)) if converged: return step
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eventdata(payload): """ Parse a Supervisor event. """
headerinfo, data = payload.split('\n', 1) headers = get_headers(headerinfo) return headers, data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def supervisor_events(stdin, stdout): """ An event stream from Supervisor. """
while True: stdout.write('READY\n') stdout.flush() line = stdin.readline() headers = get_headers(line) payload = stdin.read(int(headers['len'])) event_headers, event_data = eventdata(payload) yield event_headers, event_data stdout.write('RESULT 2\nOK') stdout.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ Main application loop. """
env = os.environ try: host = env['SYSLOG_SERVER'] port = int(env['SYSLOG_PORT']) socktype = socket.SOCK_DGRAM if env['SYSLOG_PROTO'] == 'udp' \ else socket.SOCK_STREAM except KeyError: sys.exit("SYSLOG_SERVER, SYSLOG_PORT and SYSLOG_PROTO are required.") handler = SysLogHandler( address=(host, port), socktype=socktype, ) handler.setFormatter(PalletFormatter()) for event_headers, event_data in supervisor_events(sys.stdin, sys.stdout): event = logging.LogRecord( name=event_headers['processname'], level=logging.INFO, pathname=None, lineno=0, msg=event_data, args=(), exc_info=None, ) event.process = int(event_headers['pid']) handler.handle(event)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def formatTime(self, record, datefmt=None): """ Format time, including milliseconds. """
formatted = super(PalletFormatter, self).formatTime( record, datefmt=datefmt) return formatted + '.%03dZ' % record.msecs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_diff(original, fixed, file_name, original_label='original', fixed_label='fixed'): """Return text of unified diff between original and fixed."""
original, fixed = original.splitlines(True), fixed.splitlines(True) newline = '\n' from difflib import unified_diff diff = unified_diff(original, fixed, os.path.join(original_label, file_name), os.path.join(fixed_label, file_name), lineterm=newline) text = '' for line in diff: text += line # Work around missing newline (http://bugs.python.org/issue2142). if not line.endswith(newline): text += newline + r'\ No newline at end of file' + newline return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run(self, N=1): '''Run the chain and store the history of visited points into the member variable ``self.samples``. Returns the number of accepted points during the run. .. seealso:: :py:class:`pypmc.tools.History` :param N: An int which defines the number of steps to run the chain. ''' if N == 0: return 0 # set the accept function if self.proposal.symmetric: get_log_rho = self._get_log_rho_metropolis else: get_log_rho = self._get_log_rho_metropolis_hastings # allocate an empty numpy array to store the run if self.target_values is not None: this_target_values = self.target_values.append(N) this_run = self.samples.append(N) accept_count = 0 for i_N in range(N): # propose new point proposed_point = self.proposal.propose(self.current_point, self.rng) proposed_eval = self.target(proposed_point) # log_rho := log(probability to accept point), where log_rho > 0 is meant to imply rho = 1 log_rho = get_log_rho(proposed_point, proposed_eval) # check for NaN if _np.isnan(log_rho): raise ValueError('encountered NaN') # accept if rho = 1 if log_rho >=0: accept_count += 1 this_run[i_N] = proposed_point self.current_point = proposed_point self.current_target_eval = proposed_eval # accept with probability rho elif log_rho >= _np.log(self.rng.rand()): accept_count += 1 this_run[i_N] = proposed_point self.current_point = proposed_point self.current_target_eval = proposed_eval # reject if not accepted else: this_run[i_N] = self.current_point #do not need to update self.current #self.current = self.current # save target value if desired if self.target_values is not None: this_target_values[i_N] = self.current_target_eval # ---------------------- end for -------------------------------- return accept_count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_adapt_params(self, *args, **kwargs): r"""Sets variables for covariance adaptation. When :meth:`.adapt` is called, the proposal's covariance matrix is adapted in order to improve the chain's performance. The aim is to improve the efficiency of the chain by making better proposals and forcing the acceptance rate :math:`\alpha` of the chain to lie in an interval ensuring good exploration: :param force_acceptance_max: Float, the upper limit (in (0,1]) Default: :math:`\alpha_{max}=.35` :param force_acceptance_min: Float, the lower limit (in [0,1)) Default: :math:`\alpha_{min}=.15` This is achieved in two steps: 1. **Estimate the target covariance**: compute the sample covariance from the last (the t-th) run as :math:`S^t` then combine with previous estimate :math:`\Sigma^{t-1}` with a weight damping out over time as .. math:: \Sigma^t = (1-a^t) \Sigma^{t-1} + a^t S^t where the weight is given by .. math:: a^t = 1/t^{\lambda}. :param damping: Float, see formula above Default: :math:`\lambda=.5` The ``damping`` :math:`\lambda` is neccessary to assure convergence and should be in [0,1]. A default value of 0.5 was found to work well in practice. For details, see [HST01]_. 2. **Rescale the covariance matrix**: Remember that the goal is to force the acceptance rate into a specific interval. Suppose that the chain already is in a region of significant probability mass (should be the case before adapting it). When the acceptance rate is close to zero, the chain cannot move at all; i.e., the proposed points have a low probability relative to the current point. In this case the proposal covariance should decrease to increase "locality" of the chain. In the opposite case, when the acceptance rate is close to one, the chain most probably only explores a small volume of the target. Then enlarging the covariance matrix decreases "locality". In this implementation, the proposal covariance matrix is :math:`c \Sigma^t` :param covar_scale_factor: Float, this number ``c`` is multiplied to :math:`\Sigma^t` after it has been recalculated. The higher the dimension :math:`d`, the smaller it should be. For a Gaussian proposal and target, the optimal factor is :math:`2.38^2/d`. Use this argument to increase performance from the start before any adaptation. Default: :math:`c=2.38^2/d` ``covar_scale_factor`` is updated using :math:`\beta` :param covar_scale_multiplier: Float; if the acceptance rate is larger than ``force_acceptance_max``, :math:`c \to \beta c`. If the acceptance rate is smaller than ``force_acceptance_min``, :math:`c \to c / \beta`. Default :math:`\beta=1.5` Additionally, an upper and a lower limit on ``covar_scale_factor`` can be provided. This is useful to hint at bugs in the target or MC implementation that cause the efficiency to run away. :param covar_scale_factor_max: Float, ``covar_scale_factor`` is kept below this value. Default: :math:`c_{max}=100` :param covar_scale_factor_min: Float, ``covar_scale_factor`` is kept above this value. Default: :math:`c_{max}=10^{-4}` """
if args != (): raise TypeError('keyword args only; try set_adapt_parameters(keyword = value)') self.covar_scale_multiplier = kwargs.pop('covar_scale_multiplier' , self.covar_scale_multiplier) self.covar_scale_factor = kwargs.pop('covar_scale_factor' , self.covar_scale_factor ) self.covar_scale_factor_max = kwargs.pop('covar_scale_factor_max' , self.covar_scale_factor_max) self.covar_scale_factor_min = kwargs.pop('covar_scale_factor_min' , self.covar_scale_factor_min) self.force_acceptance_max = kwargs.pop('force_acceptance_max' , self.force_acceptance_max ) self.force_acceptance_min = kwargs.pop('force_acceptance_min' , self.force_acceptance_min ) self.damping = kwargs.pop('damping' , self.damping ) if not kwargs == {}: raise TypeError('unexpected keyword(s): ' + str(kwargs.keys()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _update_scale_factor(self, accept_rate): '''Private function. Updates the covariance scaling factor ``covar_scale_factor`` according to its limits ''' if accept_rate > self.force_acceptance_max and self.covar_scale_factor < self.covar_scale_factor_max: self.covar_scale_factor *= self.covar_scale_multiplier elif accept_rate < self.force_acceptance_min and self.covar_scale_factor > self.covar_scale_factor_min: self.covar_scale_factor /= self.covar_scale_multiplier
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, original, size, crop, options=None): """ Creates a thumbnail. It loads the image, scales it and crops it. :param original: :param size: :param crop: :param options: :return: """
if options is None: options = self.evaluate_options() image = self.engine_load_image(original) image = self.scale(image, size, crop, options) crop = self.parse_crop(crop, self.get_image_size(image), size) image = self.crop(image, size, crop, options) image = self.colormode(image, options) return image
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale(self, image, size, crop, options): """ Wrapper for ``engine_scale``, checks if the scaling factor is below one or that scale_up option is set to True before calling ``engine_scale``. :param image: :param size: :param crop: :param options: :return: """
original_size = self.get_image_size(image) factor = self._calculate_scaling_factor(original_size, size, crop is not None) if factor < 1 or options['scale_up']: width = int(original_size[0] * factor) height = int(original_size[1] * factor) image = self.engine_scale(image, width, height) return image
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crop(self, image, size, crop, options): """ Wrapper for ``engine_crop``, will return without calling ``engine_crop`` if crop is None. :param image: :param size: :param crop: :param options: :return: """
if not crop: return image return self.engine_crop(image, size, crop, options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def colormode(self, image, options): """ Wrapper for ``engine_colormode``. :param image: :param options: :return: """
mode = options['colormode'] return self.engine_colormode(image, mode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_size(size): """ Parses size string into a tuple :param size: String on the form '100', 'x100 or '100x200' :return: Tuple of two integers for width and height :rtype: tuple """
if size.startswith('x'): return None, int(size.replace('x', '')) if 'x' in size: return int(size.split('x')[0]), int(size.split('x')[1]) return int(size), None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_crop(self, crop, original_size, size): """ Parses crop into a tuple usable by the crop function. :param crop: String with the crop settings. :param original_size: A tuple of size of the image that should be cropped. :param size: A tuple of the wanted size. :return: Tuple of two integers with crop settings :rtype: tuple """
if crop is None: return None crop = crop.split(' ') if len(crop) == 1: crop = crop[0] x_crop = 50 y_crop = 50 if crop in CROP_ALIASES['x']: x_crop = CROP_ALIASES['x'][crop] elif crop in CROP_ALIASES['y']: y_crop = CROP_ALIASES['y'][crop] x_offset = self.calculate_offset(x_crop, original_size[0], size[0]) y_offset = self.calculate_offset(y_crop, original_size[1], size[1]) return int(x_offset), int(y_offset)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_offset(percent, original_length, length): """ Calculates crop offset based on percentage. :param percent: A percentage representing the size of the offset. :param original_length: The length the distance that should be cropped. :param length: The desired length. :return: The offset in pixels :rtype: int """
return int( max( 0, min(percent * original_length / 100.0, original_length - length / 2) - length / 2) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """Main function for pyfttt command line tool"""
args = parse_arguments() if args.key is None: print("Error: Must provide IFTTT secret key.") sys.exit(1) try: res = pyfttt.send_event(api_key=args.key, event=args.event, value1=args.value1, value2=args.value2, value3=args.value3) except requests.exceptions.ConnectionError: print("Error: Could not connect to IFTTT") sys.exit(2) except requests.exceptions.HTTPError: print("Error: Received invalid response") sys.exit(3) except requests.exceptions.Timeout: print("Error: Request timed out") sys.exit(4) except requests.exceptions.TooManyRedirects: print("Error: Too many redirects") sys.exit(5) except requests.exceptions.RequestException as reqe: print("Error: {e}".format(e=reqe)) sys.exit(6) if res.status_code != requests.codes.ok: try: j = res.json() except ValueError: print('Error: Could not parse server response. Event not sent') sys.exit(7) for err in j['errors']: print('Error: {}'.format(err['message'])) sys.exit(8)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def plot_responsibility(data, responsibility, cmap='nipy_spectral'): '''Classify the 2D ``data`` according to the ``responsibility`` and make a scatter plot of each data point with the color of the component it is most likely from. The ``responsibility`` is normalized internally such that each row sums to unity. :param data: matrix-like; one row = one 2D sample :param responsibility: matrix-like; one row = probabilities that sample n is from 1st, 2nd, ... component. The number of rows has to agree with ``data`` :param cmap: colormap; defines how component indices are mapped to the color of the data points ''' import numpy as np from matplotlib import pyplot as plt from matplotlib.cm import get_cmap data = np.asarray(data) responsibility = np.asarray(responsibility) assert data.ndim == 2 assert responsibility.ndim == 2 D = data.shape[1] N = data.shape[0] K = responsibility.shape[1] assert D == 2 assert N == responsibility.shape[0] # normalize responsibility so each row sums to one inv_row_sum = 1.0 / np.einsum('nk->n', responsibility) responsibility = np.einsum('n,nk->nk', inv_row_sum, responsibility) # index of the most likely component for each sample indicators = np.argmax(responsibility, axis=1) # same color range as in plot_mixture if K > 1: point_colors = indicators / (K - 1) * _max_color else: point_colors = np.zeros(N) plt.scatter(data.T[0], data.T[1], c=point_colors, cmap=cmap)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_string(dotted_path): """ Import a dotted module path. Returns the attribute/class designated by the last name in the path. Raises ImportError if the import fails. """
try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: raise ImportError('%s doesn\'t look like a valid path' % dotted_path) module = __import__(module_path, fromlist=[class_name]) try: return getattr(module, class_name) except AttributeError: msg = 'Module "%s" does not define a "%s" attribute/class' % ( dotted_path, class_name) raise ImportError(msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def ball(center, radius=1., bdy=True): '''Returns the indicator function of a ball. :param center: A vector-like numpy array, defining the center of the ball.\n len(center) fixes the dimension. :param radius: Float or int, the radius of the ball :param bdy: Bool, When ``x`` is at the ball's boundary then ``ball_indicator(x)`` returns ``True`` if and only if ``bdy=True``. ''' center = _np.array(center) # copy input parameter dim = len(center) if bdy: def ball_indicator(x): if len(x) != dim: raise ValueError('input has wrong dimension (%i instead of %i)' % (len(x), dim)) if _np.linalg.norm(x - center) <= radius: return True return False else: def ball_indicator(x): if len(x) != dim: raise ValueError('input has wrong dimension (%i instead of %i)' % (len(x), dim)) if _np.linalg.norm(x - center) < radius: return True return False # write docstring for ball_indicator ball_indicator.__doc__ = 'automatically generated ball indicator function:' ball_indicator.__doc__ += '\ncenter = ' + repr(center)[6:-1] ball_indicator.__doc__ += '\nradius = ' + str(radius) ball_indicator.__doc__ += '\nbdy = ' + str(bdy) return ball_indicator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def hyperrectangle(lower, upper, bdy=True): '''Returns the indicator function of a hyperrectangle. :param lower: Vector-like numpy array, defining the lower boundary of the hyperrectangle.\n len(lower) fixes the dimension. :param upper: Vector-like numpy array, defining the upper boundary of the hyperrectangle.\n :param bdy: Bool. When ``x`` is at the hyperrectangles's boundary then ``hr_indicator(x)`` returns ``True`` if and only if ``bdy=True``. ''' # copy input lower = _np.array(lower) upper = _np.array(upper) dim = len(lower) if (upper <= lower).any(): raise ValueError('invalid input; found upper <= lower') if bdy: def hr_indicator(x): if len(x) != dim: raise ValueError('input has wrong dimension (%i instead of %i)' % (len(x), dim)) if (lower <= x).all() and (x <= upper).all(): return True return False else: def hr_indicator(x): if len(x) != dim: raise ValueError('input has wrong dimension (%i instead of %i)' % (len(x), dim)) if (lower < x).all() and (x < upper).all(): return True return False # write docstring for ball_indicator hr_indicator.__doc__ = 'automatically generated hyperrectangle indicator function:' hr_indicator.__doc__ += '\nlower = ' + repr(lower)[6:-1] hr_indicator.__doc__ += '\nupper = ' + repr(upper)[6:-1] hr_indicator.__doc__ += '\nbdy = ' + str(bdy) return hr_indicator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_thumbnail(original, size, **options): """ Creates or gets an already created thumbnail for the given image with the given size and options. :param original: File-path, url or base64-encoded string of the image that you want an thumbnail. :param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or ``x200``. :param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``. :param force: If set to ``True`` the thumbnail will be created even if it exists before. :param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while saving the thumbnail. :param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled up if necessary. :param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails. Supports all values supported by pillow. In other engines there is a best effort translation from pillow modes to the modes supported by the current engine. :param format: Overrides the format the thumbnail will be saved in. This will override both the detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``. :return: A Thumbnail object """
engine = get_engine() cache = get_cache_backend() original = SourceFile(original) crop = options.get('crop', None) options = engine.evaluate_options(options) thumbnail_name = generate_filename(original, size, crop) if settings.THUMBNAIL_DUMMY: engine = DummyEngine() return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options) cached = cache.get(thumbnail_name) force = options is not None and 'force' in options and options['force'] if not force and cached: return cached thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options)) if force or not thumbnail.exists: size = engine.parse_size(size) thumbnail.image = engine.get_thumbnail(original, size, crop, options) thumbnail.save(options) for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS: resolution_size = engine.calculate_alternative_resolution_size(resolution, size) image = engine.get_thumbnail(original, resolution_size, crop, options) thumbnail.save_alternative_resolution(resolution, image, options) cache.set(thumbnail) return thumbnail
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def argsort_indices(a, axis=-1): """Like argsort, but returns an index suitable for sorting the the original array even if that array is multidimensional """
a = np.asarray(a) ind = list(np.ix_(*[np.arange(d) for d in a.shape])) ind[axis] = a.argsort(axis) return tuple(ind)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_event(api_key, event, value1=None, value2=None, value3=None): """Send an event to the IFTTT maker channel Parameters: api_key : string Your IFTTT API key event : string The name of the IFTTT event to trigger value1 : Optional: Extra data sent with the event (default: None) value2 : Optional: Extra data sent with the event (default: None) value3 : Optional: Extra data sent with the event (default: None) """
url = 'https://maker.ifttt.com/trigger/{e}/with/key/{k}/'.format(e=event, k=api_key) payload = {'value1': value1, 'value2': value2, 'value3': value3} return requests.post(url, data=payload)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_localized_docstring(obj, domain): """Get a cleaned-up, localized copy of docstring of this class."""
if obj.__class__.__doc__ is not None: return inspect.cleandoc( gettext.dgettext(domain, obj.__class__.__doc__))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cmd_help(self): """ Get the single-line help of this command. :returns: ``self.help``, if defined :returns: The first line of the docstring, without the trailing dot, if present. :returns: None, otherwise """
try: return self.help except AttributeError: pass try: return get_localized_docstring( self, self.get_gettext_domain() ).splitlines()[0].rstrip('.').lower() except (AttributeError, IndexError, ValueError): pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cmd_description(self): """ Get the leading, multi-line description of this command. :returns: ``self.description``, if defined :returns: A substring of the class docstring between the first line (which is discarded) and the string ``@EPILOG@``, if present, or the end of the docstring, if any :returns: None, otherwise The description string will be displayed after the usage string but before any of the detailed argument descriptions. Please consider following good practice by keeping the description line short enough not to require scrolling but useful enough to provide additional information that cannot be inferred from the name of the command or other arguments. Stating the purpose of the command is highly recommended. """
try: return self.description except AttributeError: pass try: return '\n'.join( get_localized_docstring( self, self.get_gettext_domain() ).splitlines()[1:] ).split('@EPILOG@', 1)[0].strip() except (AttributeError, IndexError, ValueError): pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cmd_epilog(self): """ Get the trailing, multi-line description of this command. :returns: ``self.epilog``, if defined :returns: A substring of the class docstring between the string ``@EPILOG`` and the end of the docstring, if defined :returns: None, otherwise The epilog is similar to the description string but it is instead printed after the section containing detailed descriptions of all of the command line arguments. Please consider following good practice by providing additional details about how the command can be used, perhaps an example or a reference to means of finding additional documentation. """
try: return self.source.epilog except AttributeError: pass try: return '\n'.join( get_localized_docstring( self, self.get_gettext_domain() ).splitlines()[1:] ).split('@EPILOG@', 1)[1].strip() except (AttributeError, IndexError, ValueError): pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(self, argv=None, exit=True): """ Shortcut for running a command. See :meth:`guacamole.recipes.Recipe.main()` for details. """
return CommandRecipe(self).main(argv, exit)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ingredients(self): """Get a list of ingredients for guacamole."""
return [ cmdtree.CommandTreeBuilder(self.command), cmdtree.CommandTreeDispatcher(), argparse.AutocompleteIngredient(), argparse.ParserIngredient(), crash.VerboseCrashHandler(), ansi.ANSIIngredient(), log.Logging(), ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_arguments(self, parser): """ Guacamole method used by the argparse ingredient. :param parser: Argument parser (from :mod:`argparse`) specific to this command. """
parser.add_argument('x', type=int, help='the first value') parser.add_argument('y', type=int, help='the second value')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invoked(self, ctx): """Method called when the command is invoked."""
if not ctx.ansi.is_enabled: print("You need color support to use this demo") else: print(ctx.ansi.cmd('erase_display')) self._demo_fg_color(ctx) self._demo_bg_color(ctx) self._demo_bg_indexed(ctx) self._demo_rgb(ctx) self._demo_style(ctx)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, thumbnail_name): """ Wrapper for ``_get``, which converts the thumbnail_name to String if necessary before calling ``_get`` :rtype: Thumbnail """
if isinstance(thumbnail_name, list): thumbnail_name = '/'.join(thumbnail_name) return self._get(thumbnail_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(dsn, parse_class=ParseResult, **defaults): """ parse a dsn to parts similar to parseurl :param dsn: string, the dsn to parse :param parse_class: ParseResult, the class that will be used to hold parsed values :param **defaults: dict, any values you want to have defaults for if they aren't in the dsn :returns: ParseResult() tuple-like instance """
r = parse_class(dsn, **defaults) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setdefault(self, key, val): """ set a default value for key this is different than dict's setdefault because it will set default either if the key doesn't exist, or if the value at the key evaluates to False, so an empty string or a None value will also be updated :param key: string, the attribute to update :param val: mixed, the attributes new value if key has a current value that evaluates to False """
if not getattr(self, key, None): setattr(self, key, val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geturl(self): """return the dsn back into url form"""
return urlparse.urlunparse(( self.scheme, self.netloc, self.path, self.params, self.query_str, self.fragment, ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preparse(self, context): """ Parse a portion of command line arguments with the early parser. This method relies on ``context.argv`` and ``context.early_parser`` and produces ``context.early_args``. The ``context.early_args`` object is the return value from argparse. It is the dict/object like namespace object. """
context.early_args, unused = ( context.early_parser.parse_known_args(context.argv))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_parser(self, context): """ Create the final argument parser. This method creates the non-early (full) argparse argument parser. Unlike the early counterpart it is expected to have knowledge of the full command tree. This method relies on ``context.cmd_tree`` and produces ``context.parser``. Other ingredients can interact with the parser up until :meth:`parse()` is called. """
context.parser, context.max_level = self._create_parser(context)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, context): """ Optionally trigger argument completion in the invoking shell. This method is called to see if bash argument completion is requested and to honor the request, if needed. This causes the process to exit (early) without giving other ingredients a chance to initialize or shut down. Due to the way argcomple works, no other ingredient can print() anything to stdout prior to this point. """
try: import argcomplete except ImportError: return try: parser = context.parser except AttributeError: raise RecipeError( """ The context doesn't have the parser attribute. The auto-complete ingredient depends on having a parser object to generate completion data for she shell. In a typical application this requires that the AutocompleteIngredient and ParserIngredient are present and that the auto-complete ingredient precedes the parser. """) else: argcomplete.autocomplete(parser)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ansi_cmd(cmd, *args): """Get ANSI command code by name."""
try: obj = getattr(ANSI, str('cmd_{}'.format(cmd))) except AttributeError: raise ValueError( "incorrect command: {!r}".format(cmd)) if isinstance(obj, type("")): return obj else: return obj(*args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_visible_color(color): """Get the visible counter-color."""
if isinstance(color, (str, type(""))): try: return getattr(_Visible, str('{}'.format(color))) except AttributeError: raise ValueError("incorrect color: {!r}".format(color)) elif isinstance(color, tuple): return (0x80 ^ color[0], 0x80 ^ color[1], 0x80 ^ color[2]) elif isinstance(color, int): if 0 <= color <= 0x07: index = color return 0xFF if index == 0 else 0xE8 elif 0x08 <= color <= 0x0F: index = color - 0x08 return 0xFF if index == 0 else 0xE8 elif 0x10 <= color <= 0xE7: index = color - 0x10 if 0 <= index % 36 < 18: return 0xFF else: return 0x10 elif 0xE8 <= color <= 0xFF: index = color - 0x0E8 return 0xFF if 0 <= index < 12 else 0xE8 else: raise ValueError("incorrect color: {!r}".format(color))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def using_git(cwd): """Test whether the directory cwd is contained in a git repository."""
try: git_log = shell_out(["git", "log"], cwd=cwd) return True except (CalledProcessError, OSError): # pragma: no cover return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def using_hg(cwd): """Test whether the directory cwd is contained in a mercurial repository."""
try: hg_log = shell_out(["hg", "log"], cwd=cwd) return True except (CalledProcessError, OSError): return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def using_bzr(cwd): """Test whether the directory cwd is contained in a bazaar repository."""
try: bzr_log = shell_out(["bzr", "log"], cwd=cwd) return True except (CalledProcessError, OSError): return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def which(cwd=None): # pragma: no cover """Try to find which version control system contains the cwd directory. Returns the VersionControl superclass e.g. Git, if none were found this will raise a NotImplementedError. """
if cwd is None: cwd = os.getcwd() for (k, using_vc) in globals().items(): if k.startswith('using_') and using_vc(cwd=cwd): return VersionControl.from_string(k[6:]) # Not supported (yet) raise NotImplementedError("Unknown version control system, " "or you're not in the project directory.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def modified_lines(self, r, file_name): """Returns the line numbers of a file which have been changed."""
cmd = self.file_diff_cmd(r, file_name) diff = shell_out_ignore_exitcode(cmd, cwd=self.root) return list(self.modified_lines_from_diff(diff))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def modified_lines_from_diff(self, diff): """Returns the changed lines in a diff. - Potentially this is vc specific (if not using udiff). Note: this returns the line numbers in descending order. """
from pep8radius.diff import modified_lines_from_udiff for start, end in modified_lines_from_udiff(diff): yield start, end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_filenames_diff(self, r): """Get the py files which have been changed since rev."""
cmd = self.filenames_diff_cmd(r) diff_files = shell_out_ignore_exitcode(cmd, cwd=self.root) diff_files = self.parse_diff_filenames(diff_files) return set(f for f in diff_files if f.endswith('.py'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_diff_filenames(diff_files): """Parse the output of filenames_diff_cmd."""
# ? .gitignore # M 0.txt files = [] for line in diff_files.splitlines(): line = line.strip() fn = re.findall('[^ ]+\s+(.*.py)', line) if fn and not line.startswith('?'): files.append(fn[0]) return files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data(self): """ Helper class for parsing JSON POST data into a Python object. """
if self.request.method == 'GET': return self.request.GET else: assert self.request.META['CONTENT_TYPE'].startswith('application/json') charset = self.request.encoding or settings.DEFAULT_CHARSET return json.loads(self.request.body.decode(charset))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def options(self, request, *args, **kwargs): """ Implements a OPTIONS HTTP method function returning all allowed HTTP methods. """
allow = [] for method in self.http_method_names: if hasattr(self, method): allow.append(method.upper()) r = self.render_to_response(None) r['Allow'] = ','.join(allow) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(args=None, vc=None, cwd=None, apply_config=False): """PEP8 clean only the parts of the files touched since the last commit, a previous commit or branch."""
import signal try: # pragma: no cover # Exit on broken pipe. signal.signal(signal.SIGPIPE, signal.SIG_DFL) except AttributeError: # pragma: no cover # SIGPIPE is not available on Windows. pass try: if args is None: args = [] try: # Note: argparse on py 2.6 you can't pass a set # TODO neater solution for this! args_set = set(args) except TypeError: args_set = args # args is a Namespace if '--version' in args_set or getattr(args_set, 'version', 0): print(version) return 0 if '--list-fixes' in args_set or getattr(args_set, 'list_fixes', 0): from autopep8 import supported_fixes for code, description in sorted(supported_fixes()): print('{code} - {description}'.format( code=code, description=description)) return 0 try: try: args = parse_args(args, apply_config=apply_config) except TypeError: pass # args is already a Namespace (testing) if args.from_diff: # pragma: no cover r = Radius.from_diff(args.from_diff.read(), options=args, cwd=cwd) else: r = Radius(rev=args.rev, options=args, vc=vc, cwd=cwd) except NotImplementedError as e: # pragma: no cover print(e) return 1 except CalledProcessError as c: # pragma: no cover # cut off usage and exit output = c.output.splitlines()[0] print(output) return c.returncode any_changes = r.fix() if any_changes and args.error_status: return 1 return 0 except KeyboardInterrupt: # pragma: no cover return 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_args(arguments=None, root=None, apply_config=False): """Parse the arguments from the CLI. If apply_config then we first look up and apply configs using apply_config_defaults. """
if arguments is None: arguments = [] parser = create_parser() args = parser.parse_args(arguments) if apply_config: parser = apply_config_defaults(parser, args, root=root) args = parser.parse_args(arguments) # sanity check args (from autopep8) if args.max_line_length <= 0: # pragma: no cover parser.error('--max-line-length must be greater than 0') if args.select: args.select = _split_comma_separated(args.select) if args.ignore: args.ignore = _split_comma_separated(args.ignore) elif not args.select and args.aggressive: # Enable everything by default if aggressive. args.select = ['E', 'W'] else: args.ignore = _split_comma_separated(DEFAULT_IGNORE) if args.exclude: args.exclude = _split_comma_separated(args.exclude) else: args.exclude = [] return args
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_vint32(self): """ This seems to be a variable length integer ala utf-8 style """
result = 0 count = 0 while True: if count > 4: raise ValueError("Corrupt VarInt32") b = self.read_byte() result = result | (b & 0x7F) << (7 * count) count += 1 if not b & 0x80: return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_message(self, message_type, compressed=False, read_size=True): """ Read a protobuf message """
if read_size: size = self.read_vint32() b = self.read(size) else: b = self.read() if compressed: b = snappy.decompress(b) m = message_type() m.ParseFromString(b) return m
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_hooks(self, packet): """ Run any additional functions that want to process this type of packet. These can be internal parser hooks, or external hooks that process information """
if packet.__class__ in self.internal_hooks: self.internal_hooks[packet.__class__](packet) if packet.__class__ in self.hooks: self.hooks[packet.__class__](packet)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_string_table(self, tables): """ Need to pull out player information from string table """
self.info("String table: %s" % (tables.tables, )) for table in tables.tables: if table.table_name == "userinfo": for item in table.items: if len(item.data) > 0: if len(item.data) == 140: p = PlayerInfo() ctypes.memmove(ctypes.addressof(p), item.data, 140) p.str = item.str self.run_hooks(p) if table.table_name == "CombatLogNames": self.combat_log_names = dict(enumerate( (item.str for item in table.items)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_game_event(self, event): """ So CSVCMsg_GameEventList is a list of all events that can happen. A game event has an eventid which maps to a type of event that happened """
if event.eventid in self.event_lookup: #Bash this into a nicer data format to work with event_type = self.event_lookup[event.eventid] ge = GameEvent(event_type.name) for i, key in enumerate(event.keys): key_type = event_type.keys[i] ge.keys[key_type.name] = getattr(key, KEY_DATA_TYPES[key.type]) self.debug("|==========> %s" % (ge, )) self.run_hooks(ge)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self): """ Parse a replay """
self.important("Parsing demo file '%s'" % (self.filename, )) with open(self.filename, 'rb') as f: reader = Reader(StringIO(f.read())) filestamp = reader.read(8) offset = reader.read_int32() if filestamp != "PBUFDEM\x00": raise ValueError("Invalid replay - incorrect filestamp") buff = StringIO(f.read()) frame = 0 more = True while more and reader.remaining > 0: cmd = reader.read_vint32() tick = reader.read_vint32() compressed = False if cmd & demo_pb2.DEM_IsCompressed: compressed = True cmd = cmd & ~demo_pb2.DEM_IsCompressed if cmd not in messages.MESSAGE_TYPES: raise KeyError("Unknown message type found") message_type = messages.MESSAGE_TYPES[cmd] message = reader.read_message(message_type, compressed) self.info('%s: %s' % (frame, message_type)) self.worthless(message) self.run_hooks(message) self.info('|%s' % ('-' * 79, )) frame += 1 if self.frames and frame > self.frames: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(data): """ Convert from unicode to native ascii """
try: st = basestring except NameError: st = str if isinstance(data, st): return str(data) elif isinstance(data, Mapping): return dict(map(convert, data.iteritems())) elif isinstance(data, Iterable): return type(data)(map(convert, data)) else: return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_type_by_schema(self, schema_obj, schema_type): """ Set property type by schema object Schema will create, if it doesn't exists in collection :param dict schema_obj: raw schema object :param str schema_type: """
schema_id = self._get_object_schema_id(schema_obj, schema_type) if not self.storage.contains(schema_id): schema = self.storage.create_schema( schema_obj, self.name, schema_type, root=self.root) assert schema.schema_id == schema_id self._type = schema_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tablib_export_action(modeladmin, request, queryset, file_type="xls"): """ Allow the user to download the current filtered list of items :param file_type: One of the formats supported by tablib (e.g. "xls", "csv", "html", etc.) """
dataset = SimpleDataset(queryset, headers=None) filename = '{0}.{1}'.format( smart_str(modeladmin.model._meta.verbose_name_plural), file_type) response_kwargs = { 'content_type': get_content_type(file_type) } response = HttpResponse(getattr(dataset, file_type), **response_kwargs) response['Content-Disposition'] = 'attachment; filename={0}'.format( filename) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_type_properties(self, property_obj, name, additional_prop=False): """ Extend parents 'Get internal properties of property'-method """
property_type, property_format, property_dict = \ super(Schema, self).get_type_properties(property_obj, name, additional_prop=additional_prop) _schema = self.storage.get(property_type) if _schema and ('additionalProperties' in property_obj): _property_type, _property_format, _property_dict = super(Schema, self).get_type_properties( property_obj['additionalProperties'], '{}-mapped'.format(name), additional_prop=True) if _property_type not in PRIMITIVE_TYPES: SchemaMapWrapper.wrap(self.storage.get(_property_type)) _schema.nested_schemas.add(_property_type) else: _schema.type_format = _property_type return property_type, property_format, property_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generic_export(request, model_name=None): """ Generic view configured through settings.TABLIB_MODELS Usage: 1. Add the view to ``urlpatterns`` in ``urls.py``:: url(r'export/(?P<model_name>[^/]+)/$', "django_tablib.views.generic_export"), 2. Create the ``settings.TABLIB_MODELS`` dictionary using model names as keys the allowed lookup operators as values, if any:: TABLIB_MODELS = { 'myapp.simple': None, 'myapp.related': {'simple__title': ('exact', 'iexact')}, } 3. Open ``/export/myapp.simple`` or ``/export/myapp.related/?simple__title__iexact=test`` """
if model_name not in settings.TABLIB_MODELS: raise Http404() model = get_model(*model_name.split(".", 2)) if not model: raise ImproperlyConfigured( "Model {0} is in settings.TABLIB_MODELS but" " could not be loaded".format(model_name)) qs = model._default_manager.all() # Filtering may be allowed based on TABLIB_MODELS: filter_settings = settings.TABLIB_MODELS[model_name] filters = {} for k, v in request.GET.items(): try: # Allow joins (they'll be checked below) but chop off the trailing # lookup operator: rel, lookup_type = k.rsplit("__", 1) except ValueError: rel = k lookup_type = "exact" allowed_lookups = filter_settings.get(rel, None) if allowed_lookups is None: return HttpResponseBadRequest( "Filtering on {0} is not allowed".format(rel) ) elif lookup_type not in allowed_lookups: return HttpResponseBadRequest( "{0} may only be filtered using {1}".format( k, " ".join(allowed_lookups))) else: filters[str(k)] = v if filters: qs = qs.filter(**filters) return export(request, model=model, queryset=qs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sorted(collection): """ sorting dict by key, schema-collection by schema-name operations by id """
if len(collection) < 1: return collection if isinstance(collection, dict): return sorted(collection.items(), key=lambda x: x[0]) if isinstance(list(collection)[0], Operation): key = lambda x: x.operation_id elif isinstance(list(collection)[0], str): key = lambda x: SchemaObjects.get(x).name else: raise TypeError(type(collection[0])) return sorted(collection, key=key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pre_save(self, instance, add): """ Updates the edtf value from the value of the display_field. If there's a valid edtf, then set the date values. """
if not self.natural_text_field or self.attname not in instance.__dict__: return edtf = getattr(instance, self.attname) # Update EDTF field based on latest natural text value, if any natural_text = getattr(instance, self.natural_text_field) if natural_text: edtf = text_to_edtf(natural_text) else: edtf = None # TODO If `natural_text_field` becomes cleared the derived EDTF field # value should also be cleared, rather than left at original value? # TODO Handle case where EDTF field is set to a string directly, not # via `natural_text_field` (this is a slightly unexpected use-case, but # is a very efficient way to set EDTF values in situations like for API # imports so we probably want to continue to support it?) if edtf and not isinstance(edtf, EDTFObject): edtf = parse_edtf(edtf, fail_silently=True) setattr(instance, self.attname, edtf) # set or clear related date fields on the instance for attr in DATE_ATTRS: field_attr = "%s_field" % attr g = getattr(self, field_attr, None) if g: if edtf: try: target_field = instance._meta.get_field(g) except FieldDoesNotExist: continue value = getattr(edtf, attr)() # struct_time if isinstance(target_field, models.FloatField): value = struct_time_to_jd(value) elif isinstance(target_field, models.DateField): value = struct_time_to_date(value) else: raise NotImplementedError( u"EDTFField does not support %s as a derived data" u" field, only FloatField or DateField" % type(target_field)) setattr(instance, g, value) else: setattr(instance, g, None) return edtf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_delta(op, time_struct, delta): """ Apply a `relativedelta` to a `struct_time` data structure. `op` is an operator function, probably always `add` or `sub`tract to correspond to `a_date + a_delta` and `a_date - a_delta`. This function is required because we cannot use standard `datetime` module objects for conversion when the date/time is, or will become, outside the boundary years 1 AD to 9999 AD. """
if not delta: return time_struct # No work to do try: dt_result = op(datetime(*time_struct[:6]), delta) return dt_to_struct_time(dt_result) except (OverflowError, ValueError): # Year is not within supported 1 to 9999 AD range pass # Here we fake the year to one in the acceptable range to avoid having to # write our own date rolling logic # Adjust the year to be close to the 2000 millenium in 1,000 year # increments to try and retain accurate relative leap years actual_year = time_struct.tm_year millenium = int(float(actual_year) / 1000) millenium_diff = (2 - millenium) * 1000 adjusted_year = actual_year + millenium_diff # Apply delta to the date/time with adjusted year dt = datetime(*(adjusted_year,) + time_struct[1:6]) dt_result = op(dt, delta) # Convert result year back to its original millenium final_year = dt_result.year - millenium_diff return struct_time( (final_year,) + dt_result.timetuple()[1:6] + tuple(TIME_EMPTY_EXTRAS))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _strict_date(self, lean): """ Return a `time.struct_time` representation of the date. """
return struct_time( ( self._precise_year(lean), self._precise_month(lean), self._precise_day(lean), ) + tuple(TIME_EMPTY_TIME) + tuple(TIME_EMPTY_EXTRAS) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def package(self): """Packages lambda data for deployment into a zip"""
logger.info('Packaging lambda {}'.format(self.lambda_name)) zfh = io.BytesIO() if os.path.exists(os.path.join(self.lambda_dir, '.env')): logger.warn( 'A .env file exists in your Lambda directory - be ' 'careful that it does not contain any secrets you ' 'don\'t want uploaded to AWS!' ) with zipfile.ZipFile(zfh, 'w') as zf: self.add_directory_to_zip(self.lambda_dir, zf) # Construct a .env file in the archive with our # needed envrionment variables. envinfo = zipfile.ZipInfo('.env') envinfo.external_attr = 0644 << 16L zf.writestr( envinfo, '\n'.join( '{} = {}'.format(key, yaep.env(key)) for key in self.env_vars ) ) if 'requirements.txt' in os.listdir(self.lambda_dir): with TemporaryDirectory() as temp_dir: pip_args = [ 'install', '-r', os.path.join(self.lambda_dir, 'requirements.txt'), '-t', temp_dir ] # Do pip install to temporary dir if pip.main(pip_args) == 0: self.add_directory_to_zip(temp_dir, zf) else: if sys.platform == 'darwin': logger.error( 'A DistutilsOptionError about the prefix ' 'can occur when you are on OS X and ' 'installed Python via Homebrew.\nIf this ' 'is you, please look at https://github.com' '/Homebrew/brew/blob/master/share/doc/' 'homebrew/Homebrew-and-Python.md' '#note-on-pip-install---user\n' 'If this is not you, please contact us ' ' for support.' ) raise DependencyInstallationError( 'Failed to install dependencies of {}'.format( self.lambda_name ) ) zfh.seek(0) return zfh
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy(self, *lambdas): """Deploys lambdas to AWS"""
if not self.role: logger.error('Missing AWS Role') raise ArgumentsError('Role required') logger.debug('Deploying lambda {}'.format(self.lambda_name)) zfh = self.package() if self.lambda_name in self.get_function_names(): logger.info('Updating {} lambda'.format(self.lambda_name)) response = self.client.update_function_code( FunctionName=self.lambda_name, ZipFile=zfh.getvalue(), Publish=True ) else: logger.info('Adding new {} lambda'.format(self.lambda_name)) response = self.client.create_function( FunctionName=self.lambda_name, Runtime=yaep.env( 'LAMBDA_RUNTIME', 'python2.7' ), Role=self.role, Handler=yaep.env( 'LAMBDA_HANDLER', 'lambda_function.lambda_handler' ), Code={ 'ZipFile': zfh.getvalue(), }, Description=yaep.env( 'LAMBDA_DESCRIPTION', 'Lambda code for {}'.format(self.lambda_name) ), Timeout=yaep.env( 'LAMBDA_TIMEOUT', 3, convert_booleans=False, type_class=int ), MemorySize=yaep.env( 'LAMBDA_MEMORY_SIZE', 128, convert_booleans=False, type_class=int ), Publish=True ) status_code = response.get( 'ResponseMetadata', {} ).get('HTTPStatusCode') if status_code in [200, 201]: logger.info('Successfully deployed {} version {}'.format( self.lambda_name, response.get('Version', 'Unkown') )) else: logger.error('Error deploying {}: {}'.format( self.lambda_name, response ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list(self): """Lists already deployed lambdas"""
for function in self.client.list_functions().get('Functions', []): lines = json.dumps(function, indent=4, sort_keys=True).split('\n') for line in lines: logger.info(line)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def date_to_jd(year,month,day): """ Convert a date to Julian Day. Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 4th ed., Duffet-Smith and Zwart, 2011. Parameters year : int Year as integer. Years preceding 1 A.D. should be 0 or negative. The year before 1 A.D. is 0, 10 B.C. is year -9. month : int Month as integer, Jan = 1, Feb. = 2, etc. day : float Day, may contain fractional part. Returns ------- jd : float Julian Day Examples -------- Convert 6 a.m., February 17, 1985 to Julian Day 2446113.75 """
if month == 1 or month == 2: yearp = year - 1 monthp = month + 12 else: yearp = year monthp = month # this checks where we are in relation to October 15, 1582, the beginning # of the Gregorian calendar. if ((year < 1582) or (year == 1582 and month < 10) or (year == 1582 and month == 10 and day < 15)): # before start of Gregorian calendar B = 0 else: # after start of Gregorian calendar A = math.trunc(yearp / 100.) B = 2 - A + math.trunc(A / 4.) if yearp < 0: C = math.trunc((365.25 * yearp) - 0.75) else: C = math.trunc(365.25 * yearp) D = math.trunc(30.6001 * (monthp + 1)) jd = B + C + D + day + 1720994.5 return jd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jd_to_date(jd): """ Convert Julian Day to date. Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 4th ed., Duffet-Smith and Zwart, 2011. Parameters jd : float Julian Day Returns ------- year : int Year as integer. Years preceding 1 A.D. should be 0 or negative. The year before 1 A.D. is 0, 10 B.C. is year -9. month : int Month as integer, Jan = 1, Feb. = 2, etc. day : float Day, may contain fractional part. Examples -------- Convert Julian Day 2446113.75 to year, month, and day. (1985, 2, 17.25) """
jd = jd + 0.5 F, I = math.modf(jd) I = int(I) A = math.trunc((I - 1867216.25)/36524.25) if I > 2299160: B = I + 1 + A - math.trunc(A / 4.) else: B = I C = B + 1524 D = math.trunc((C - 122.1) / 365.25) E = math.trunc(365.25 * D) G = math.trunc((C - E) / 30.6001) day = C - E + F - math.trunc(30.6001 * G) if G < 13.5: month = G - 1 else: month = G - 13 if month > 2.5: year = D - 4716 else: year = D - 4715 return year, month, day
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hmsm_to_days(hour=0,min=0,sec=0,micro=0): """ Convert hours, minutes, seconds, and microseconds to fractional days. Parameters hour : int, optional Hour number. Defaults to 0. min : int, optional Minute number. Defaults to 0. sec : int, optional Second number. Defaults to 0. micro : int, optional Microsecond number. Defaults to 0. Returns ------- days : float Fractional days. Examples -------- 0.25 """
days = sec + (micro / 1.e6) days = min + (days / 60.) days = hour + (days / 60.) return days / 24.
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def days_to_hmsm(days): """ Convert fractional days to hours, minutes, seconds, and microseconds. Precision beyond microseconds is rounded to the nearest microsecond. Parameters days : float A fractional number of days. Must be less than 1. Returns ------- hour : int Hour number. min : int Minute number. sec : int Second number. micro : int Microsecond number. Raises ------ ValueError If `days` is >= 1. Examples -------- (2, 24, 0, 0) """
hours = days * 24. hours, hour = math.modf(hours) mins = hours * 60. mins, min = math.modf(mins) secs = mins * 60. secs, sec = math.modf(secs) micro = round(secs * 1.e6) return int(hour), int(min), int(sec), int(micro)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def datetime_to_jd(date): """ Convert a `datetime.datetime` object to Julian Day. Parameters date : `datetime.datetime` instance Returns ------- jd : float Julian day. Examples -------- datetime.datetime(1985, 2, 17, 6, 0) 2446113.75 """
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond) return date_to_jd(date.year,date.month,days)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jd_to_datetime(jd): """ Convert a Julian Day to an `jdutil.datetime` object. Parameters jd : float Julian day. Returns ------- dt : `jdutil.datetime` object `jdutil.datetime` equivalent of Julian day. Examples -------- datetime(1985, 2, 17, 6, 0) """
year, month, day = jd_to_date(jd) frac_days,day = math.modf(day) day = int(day) hour,min,sec,micro = days_to_hmsm(frac_days) return datetime(year,month,day,hour,min,sec,micro)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timedelta_to_days(td): """ Convert a `datetime.timedelta` object to a total number of days. Parameters td : `datetime.timedelta` instance Returns ------- days : float Total number of days in the `datetime.timedelta` object. Examples -------- datetime.timedelta(4, 43200) 4.5 """
seconds_in_day = 24. * 3600. days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day return days
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_schema(cls, obj, name, schema_type, root): """ Create Schema object :param dict obj: swagger schema object :param str name: schema name :param str schema_type: schema location. Can be ``inline``, ``definition`` or ``mapped`` :param BaseSwaggerObject root: root doc :return: new schema :rtype: Schema """
if schema_type == SchemaTypes.MAPPED: schema = SchemaMapWrapper(obj, storage=cls, name=name, root=root) else: schema = Schema(obj, schema_type, storage=cls, name=name, root=root) cls.add_schema(schema) return schema
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_schemas(cls, schema_types=None, sort=True): """ Get schemas by type. If ``schema_type`` is None, return all schemas :param schema_types: list of schema types :type schema_types: list or None :param bool sort: sort by name :return: list of schemas :rtype: list """
result = filter(lambda x: not x.is_inline_array, cls._schemas.values()) if schema_types: result = filter(lambda x: x.schema_type in schema_types, result) if sort: result = sorted(result, key=attrgetter('name')) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trim_struct_time(st, strip_time=False): """ Return a `struct_time` based on the one provided but with the extra fields `tm_wday`, `tm_yday`, and `tm_isdst` reset to default values. If `strip_time` is set to true the time value are also set to zero: `tm_hour`, `tm_min`, and `tm_sec`. """
if strip_time: return struct_time(list(st[:3]) + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS) else: return struct_time(list(st[:6]) + TIME_EMPTY_EXTRAS)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def struct_time_to_jd(st): """ Return a float number representing the Julian Date for the given `struct_time`. NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored. """
year, month, day = st[:3] hours, minutes, seconds = st[3:6] # Convert time of day to fraction of day day += jdutil.hmsm_to_days(hours, minutes, seconds) return jdutil.date_to_jd(year, month, day)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jd_to_struct_time(jd): """ Return a `struct_time` converted from a Julian Date float number. WARNING: Conversion to then from Julian Date value to `struct_time` can be inaccurate and lose or gain time, especially for BC (negative) years. NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default values, not real ones. """
year, month, day = jdutil.jd_to_date(jd) # Convert time of day from fraction of day day_fraction = day - int(day) hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction) day = int(day) # This conversion can return negative values for items we do not want to be # negative: month, day, hour, minute, second. year, month, day, hour, minute, second = _roll_negative_time_fields( year, month, day, hour, minute, second) return struct_time( [year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_example_by_schema(cls, schema, ignored_schemas=None, paths=None, name=''): """ Get example by schema object :param Schema schema: current schema :param list ignored_schemas: list of previous schemas for avoid circular references :param list paths: list object paths (ex. #/definitions/Model.property) If nested schemas exists, custom examples checks in order from paths :param str name: name of property schema object if exists :return: dict or list (if schema is array) """
if schema.schema_example: return schema.schema_example if ignored_schemas is None: ignored_schemas = [] if paths is None: paths = [] if name: paths = list(map(lambda path: '.'.join((path, name)), paths)) if schema.ref_path: paths.append(schema.ref_path) if schema.schema_id in ignored_schemas: result = [] if schema.is_array else {} else: schemas = ignored_schemas + [schema.schema_id] kwargs = dict( ignored_schemas=schemas, paths=paths ) if schema.is_array: result = cls.get_example_for_array( schema.item, **kwargs) elif schema.type in PRIMITIVE_TYPES: result = cls.get_example_value_for_primitive_type( schema.type, schema.raw, schema.type_format, paths=paths ) elif schema.all_of: result = {} for _schema_id in schema.all_of: schema = SchemaObjects.get(_schema_id) result.update(cls.get_example_by_schema(schema, **kwargs)) else: result = cls.get_example_for_object( schema.properties, nested=schema.nested_schemas, **kwargs) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_body_example(cls, operation): """ Get example for body parameter example by operation :param Operation operation: operation object """
path = "#/paths/'{0.path}'/{0.method}/parameters/{name}".format( operation, name=operation.body.name or 'body') return cls.get_example_by_schema(operation.body, paths=[path])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_response_example(cls, operation, response): """ Get example for response object by operation object :param Operation operation: operation object :param Response response: response object """
path = "#/paths/'{}'/{}/responses/{}".format( operation.path, operation.method, response.name) kwargs = dict(paths=[path]) if response.type in PRIMITIVE_TYPES: result = cls.get_example_value_for_primitive_type( response.type, response.properties, response.type_format, **kwargs) else: schema = SchemaObjects.get(response.type) result = cls.get_example_by_schema(schema, **kwargs) return result