text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _check_stringify_year_row(self, row_index): ''' Checks the given row to see if it is labeled year data and fills any blank years within that data. ''' table_row = self.table[row_index] # State trackers prior_year = None for column_index in range(self.start[1]+1, self.end[1]): current_year = table_row[column_index] # Quit if we see if not self._check_years(current_year, prior_year): return # Only copy when we see a non-empty entry if current_year: prior_year = current_year # If we have a title of years, convert them to strings self._stringify_row(row_index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _check_stringify_year_column(self, column_index): ''' Same as _check_stringify_year_row but for columns. ''' table_column = TableTranspose(self.table)[column_index] # State trackers prior_year = None for row_index in range(self.start[0]+1, self.end[0]): current_year = table_column[row_index] if not self._check_years(current_year, prior_year): return # Only copy when we see a non-empty entry if current_year: prior_year = current_year # If we have a title of years, convert them to strings self._stringify_column(column_index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _check_years(self, cell, prior_year): ''' Helper method which defines the rules for checking for existence of a year indicator. If the cell is blank then prior_year is used to determine validity. ''' # Anything outside these values shouldn't auto # categorize to strings min_year = 1900 max_year = 2100 # Empty cells could represent the prior cell's title, # but an empty cell before we find a year is not a title if is_empty_cell(cell): return bool(prior_year) # Check if we have a numbered cell between min and max years return is_num_cell(cell) and cell > min_year and cell < max_year
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_hash(content): """Generate hash for file or string and avoid strings starting with "ad" to workaround ad blocks being over aggressiv. The current implementation is based on sha256. :param str|FileIO content: The content to hash, either as string or as file-like object """
h = hashlib.sha256() if isinstance(content, bytes_type): h.update(content) else: data = True while data: data = content.read(1024 * 1024) h.update(data) h_digest = h.digest() # base64url # | char | substitute | # | + | - | # | / | _ | # result = base64.b64encode(h_digest, altchars=b'-_') # ensure this is a str object in 3.x result = result.decode('ascii') result = result.rstrip('=') if result[:2].lower() == 'ad': # workaround adblockers blocking everything starting with "ad" # by replacing the "d" with another charackter if result[1] == 'd': result = result[0] + '~' + result[2:] else: # upper case D result = result[0] + '.' + result[2:] return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init(scope, app, settings): """Plugin for serving static files in development mode"""
cfg = settings.get('rw.static', {}) static = Static() scope['static'] = static scope['template_env'].globals['static'] = static for base_uri, sources in cfg.items(): full_paths = [] for source in sources: if isinstance(source, dict): full_path = source['path'] full_paths.append(full_path.format(**os.environ)) continue elif ',' in source: module_name, path = [part.strip() for part in source.split(',')] else: module_name = source path = 'static' full_path = pkg_resources.resource_filename(module_name, path) full_paths.append(full_path) app.root.mount('/' + base_uri + '/<h>/<path:path>', StaticHandler, {'path': full_paths}, name='static_' + base_uri.replace('.', '_')) static.handlers.append((base_uri, StaticHandler, full_paths)) static.setup()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_absolute_path(cls, roots, path): """Returns the absolute location of ``path`` relative to one of the ``roots``. ``roots`` is the path configured for this `StaticFileHandler` (in most cases the ``static_path`` `Application` setting). """
for root in roots: abspath = os.path.abspath(os.path.join(root, path)) if abspath.startswith(root) and os.path.exists(abspath): return abspath # XXX TODO return 'file-not-found'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upload_file_to_bucket(self, bucket, file_path, key, is_public=False): """ Upload files to S3 Bucket """
with open(file_path, 'rb') as data: self.__s3.upload_fileobj(data, bucket, key) if is_public: self.__s3.put_object_acl(ACL='public-read', Bucket=bucket, Key=key) bucket_location = self.__s3.get_bucket_location(Bucket=bucket) file_url = "https://s3-{0}.amazonaws.com/{1}/{2}".format( bucket_location['LocationConstraint'], bucket, key) return file_url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_file_from_bucket(self, bucket, file_path, key): """ Download file from S3 Bucket """
with open(file_path, 'wb') as data: self.__s3.download_fileobj(bucket, key, data) return file_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def next(self): """Move to the next valid locus. Will only return valid loci or exit via StopIteration exception """
while True: self.cur_idx += 1 if self.__datasource.populate_iteration(self): return self raise StopIteration
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape, name=None): """Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding first `SparseTensor`'s values. Args: parent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices parent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape child_indices: 2D int64 `Tensor` with child `SparseTensor` indices child_values: 1D int64 `Tensor` with child `SparseTensor` values child_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape name: A name for the operation (optional). Returns: `SparseTensor` with an additional dimension of size 1 added. """
with ops.name_scope(name, "CombineSparseSuccessor", [parent_indices, parent_shape, child_indices, child_values, child_shape]): indices, values, shape = ops_module.combine_sparse_successor( parent_indices, parent_shape, child_indices, child_values, child_shape ) return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expand_char_ngrams(source, minn, maxn, itself='ASIS', name=None): """Split unicode strings into char ngrams. Ngrams size configures with minn and max Args: source: `Tensor` or `SparseTensor` of any shape, strings to split minn: Minimum length of char ngram minn: Maximum length of char ngram itself: Scalar value, strategy for source word preserving. One of `"ASIS"`, `"NEVER"`, `"ALWAYS"`, `"ALONE"`. name: A name for the operation (optional). Returns: `SparseTensor` with an additional dimension of size 1 added. """
with ops.name_scope(name, "ExpandCharNgrams", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): child_indices, child_values, child_shape = ops_module.expand_char_ngrams(source.values, minn, maxn, itself) result = _combine_sparse_successor(source.indices, source.dense_shape, child_indices, child_values, child_shape) else: indices, values, shape = ops_module.expand_char_ngrams(source, minn, maxn, itself) result = tf.SparseTensor(indices=indices, values=values, dense_shape=shape) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_normalize_unicode(source, form, name=None): """Normalize unicode strings tensor. Args: source: `Tensor` or `SparseTensor` of any shape, strings to normalize. form: Scalar value, name of normalization algorithm. One of `"NFD"`, `"NFC"`, `"NFKD"`, `"NFKC"`. name: A name for the operation (optional). Returns: `Tensor` or `SparseTensor` of same shape and size as input. """
with ops.name_scope(name, "TransformNormalizeUnicode", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_normalize_unicode(source.values, form), dense_shape=source.dense_shape ) else: result = ops_module.transform_normalize_unicode(source, form) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_wrap_with(source, left, right, name=None): """Wrap source strings with "left" and "right" strings Args: source: `Tensor` or `SparseTensor` of any shape, strings to replace digits. left: Scalar string to add in the beginning right: Scalar string to add in the ending name: A name for the operation (optional). Returns: `SparseTensor` of same shape and size as input. """
with ops.name_scope(name, "TransformWrapWith", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_wrap_with(source.values, left, right), dense_shape=source.dense_shape ) else: result = ops_module.transform_wrap_with(source, left, right) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def specialRound(number, rounding): """A method used to round a number in the way that UsefulUtils rounds."""
temp = 0 if rounding == 0: temp = number else: temp = round(number, rounding) if temp % 1 == 0: return int(temp) else: return float(temp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Where_filter_gen(*data): """ Generate an sqlite "LIKE" filter generator based on the given data. This functions arguments should be a N length series of field and data tuples. """
where = [] def Fwhere(field, pattern): """Add where filter for the given field with the given pattern.""" where.append("WHERE {0} LIKE '{1}'".format(field, pattern)) def Fstring(field, string): """Add a where filter based on a string.""" Fwhere(field, "%{0}%".format(string if not isinstance(string, str) else str(string))) def Fdict(field, data): """Add where filters to search for dict keys and values.""" for key, value in data.items(): if value == '*': Fstring(field, key) else: Fstring(field, "{0}:%{1}".format(key, value if not isinstance(value, str) else str(value))) def Flist(field, data): """Add where filters to search for elements of a list.""" for elem in data: Fstring(field, elem if not isinstance(elem, str) else str(elem)) for field, data in data: if isinstance(data, str): Fstring(field, data) elif isinstance(data, dict): Fdict(field, data) elif isinstance(data, list): Flist(field, data) return ' AND '.join(where)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cache_card(self, card): """ Cache the card for faster future lookups. Removes the oldest card when the card cache stores more cards then this libraries cache limit. """
code = card.code self.card_cache[code] = card if code in self.card_cache_list: self.card_cache_list.remove(code) self.card_cache_list.append(code) if len(self.card_cache_list) > self.cachelimit: del self.card_cache[self.card_cache_list.pop(0)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_card(self, code, cache=True): """ Load a card with the given code from the database. This calls each save event hook on the save string before commiting it to the database. Will cache each resulting card for faster future lookups with this method while respecting the libraries cache limit. However only if the cache argument is True. Will return None if the card could not be loaded. """
card = self.card_cache.get(code, None) if card is None: code = code if isinstance(code, str) else str(code) with sqlite3.connect(self.dbname) as carddb: result = carddb.execute( "SELECT * FROM CARDS WHERE code = ?", (code,)) loadrow = result.fetchone() if not loadrow: return None loaddict = dict(zip(FIELDS, loadrow)) card = self.cardclass(loaddict=loaddict) if cache: self.cache_card(card) return card
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_card(self, card, cache=False): """ Save the given card to the database. This calls each save event hook on the save string before commiting it to the database. """
if cache: self.cache_card(card) carddict = card.save() with sqlite3.connect(self.dbname) as carddb: carddb.execute("DELETE from CARDS where code = ?", (carddict["code"],)) carddb.execute("INSERT INTO CARDS VALUES(?, ?, ?, ?, ?)", [carddict[key] if isinstance(carddict[key], str) else str(carddict[key]) for key in FIELDS])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve_all(self): """ A generator that iterates over each card in the library database. This is best used in for loops as it will only load a card from the library as needed rather then all at once. """
with sqlite3.connect(self.dbname) as carddb: for row in carddb.execute("SELECT code FROM CARDS"): yield self.load_card(row[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_search(self, code=None, name=None, abilities=None, attributes=None, info=None): """ Return a list of codes and names pertaining to cards that have the given information values stored. Can take a code integer, name string, abilities dict {phase: ability list/"*"}, attributes list, info dict {key, value list/"*"}. In the above argument examples "*" is a string that may be passed instead of a list as the dict value to match anything that stores that key. """
command = "SELECT code, name FROM CARDS " command += Where_filter_gen(("code", code), ("name", name), ("abilities", abilities), ("attributes", attributes), ("info", info)) with sqlite3.connect(self.dbname) as carddb: return carddb.execute(command).fetchall()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_unit(C, val, unit=None): """convert a string measurement to a Unum"""
md = re.match(r'^(?P<num>[\d\.]+)(?P<unit>.*)$', val) if md is not None: un = float(md.group('num')) * CSS.units[md.group('unit')] if unit is not None: return un.asUnit(unit) else: return un
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_stylesheets(Class, fn, *cssfns): """merge the given CSS files, in order, into a single stylesheet. First listed takes priority. """
stylesheet = Class(fn=fn) for cssfn in cssfns: css = Class(fn=cssfn) for sel in sorted(css.styles.keys()): if sel not in stylesheet.styles: stylesheet.styles[sel] = css.styles[sel] else: for prop in [prop for prop in css.styles[sel] if prop not in stylesheet.styles[sel]]: stylesheet.styles[sel][prop] = css.styles[sel][prop] return stylesheet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_selectors(Class, fn): """return a sorted list of selectors that occur in the stylesheet"""
selectors = [] cssparser = cssutils.CSSParser(validate=False) css = cssparser.parseFile(fn) for rule in [r for r in css.cssRules if type(r)==cssutils.css.CSSStyleRule]: selectors += [sel.selectorText for sel in rule.selectorList] selectors = sorted(list(set(selectors))) return selectors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def selector_to_xpath(cls, selector, xmlns=None): """convert a css selector into an xpath expression. xmlns is option single-item dict with namespace prefix and href """
selector = selector.replace(' .', ' *.') if selector[0] == '.': selector = '*' + selector log.debug(selector) if '#' in selector: selector = selector.replace('#', '*#') log.debug(selector) if xmlns is not None: prefix = list(xmlns.keys())[0] href = xmlns[prefix] selector = ' '.join([ (n.strip() != '>' and prefix + '|' + n.strip() or n.strip()) for n in selector.split(' ') ]) log.debug(selector) path = cssselect.GenericTranslator().css_to_xpath(selector) path = path.replace("descendant-or-self::", "") path = path.replace("/descendant::", "//") path = path.replace('/*/', '//') log.debug(' ==> %s' % path) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition_to_add(self): """Transition to add"""
assert self.state in [AQStateMachineStates.init, AQStateMachineStates.add] self.state = AQStateMachineStates.add
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition_to_execute(self): """Transition to execute"""
assert self.state in [AQStateMachineStates.add] self.state = AQStateMachineStates.execute
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition_to_rollback(self): """Transition to rollback"""
assert self.state in [AQStateMachineStates.execute, AQStateMachineStates.execute_complete] self.state = AQStateMachineStates.rollback
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition_to_execute_complete(self): """Transition to execute complate"""
assert self.state in [AQStateMachineStates.execute] self.state = AQStateMachineStates.execute_complete
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition_to_rollback_complete(self): """Transition to rollback complete"""
assert self.state in [AQStateMachineStates.rollback] self.state = AQStateMachineStates.rollback_complate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, key, sort_key): """ Get an element in dictionary """
key = self.prefixed('{}:{}'.format(key, sort_key)) self.logger.debug('Storage - get {}'.format(key)) if key not in self.cache.keys(): return None return self.cache[key]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, key, sort_key): primary_key = key key = self.prefixed('{}:{}'.format(key, sort_key)) """ Delete an element in dictionary """
self.logger.debug('Storage - delete {}'.format(key)) if sort_key is not None: self.cache[self.prefixed(primary_key)].remove(sort_key) for index in self._secondary_indexes: obj = json.loads(self.cache[key]) if index in obj.keys(): self.cache['secondary_indexes'][index][obj[index]].remove( key) del(self.cache[key]) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_flask_metadata(app, version, repository, description, api_version="1.0", name=None, auth=None, route=None): """ Sets metadata on the application to be returned via metadata routes. Parameters app : :class:`flask.Flask` instance Flask application for the microservice you're adding metadata to. version: `str` Version of your microservice. repository: `str` URL of the repository containing your microservice's source code. description: `str` Description of the microservice. api_version: `str`, optional Version of the SQuaRE service API framework. Defaults to '1.0'. name : `str`, optional Microservice name. Defaults to the Flask app name. If set, changes the Flask app name to match. auth : `dict`, `str`, or `None` The 'auth' parameter must be None, the empty string, the string 'none', or a dict containing a 'type' key, which must be 'none', 'basic', or 'bitly-proxy'. If the type is not 'none', there must also be a 'data' key containing a dict which holds authentication information appropriate to the authentication type. The legal non-dict 'auth' values are equivalent to a 'type' key of 'none'. route : `None`, `str`, or list of `str`, optional The 'route' parameter must be None, a string, or a list of strings. If supplied, each string will be prepended to the metadata route. Raises ------ TypeError If arguments are not of the appropriate type. ValueError If arguments are the right type but have illegal values. Returns ------- Nothing, but sets `app` metadata and decorates it with `/metadata` and `/v{app_version}/metadata` routes. """
errstr = set_flask_metadata.__doc__ if not isinstance(app, Flask): raise TypeError(errstr) if name is None: name = app.name app.config["NAME"] = name if app.name != name: app.name = name app.config["VERSION"] = version app.config["REPOSITORY"] = repository app.config["DESCRIPTION"] = description app.config["API_VERSION"] = api_version if not (isinstance(name, str) and isinstance(description, str) and isinstance(repository, str) and isinstance(version, str) and isinstance(api_version, str)): raise TypeError(errstr) if not (name and description and repository and version and api_version): raise ValueError(errstr) if auth is None or (isinstance(auth, str) and ((auth == "none") or (auth == ""))): auth = {"type": "none", "data": None} if not isinstance(auth, dict): raise TypeError(errstr) if "type" not in auth: raise ValueError(errstr) atp = auth["type"] if atp == "none": app.config["AUTH"] = {"type": "none", "data": None} else: if atp not in ["basic", "bitly-proxy"] or "data" not in auth: raise ValueError(errstr) app.config["AUTH"] = auth add_metadata_route(app, route)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def raise_ise(text): """Turn a failed request response into a BackendError that represents an Internal Server Error. Handy for reflecting HTTP errors from farther back in the call chain as failures of your service. Parameters text: `str` Error text. Raises ------ :class:`apikit.BackendError` The `status_code` will be `500`, and the reason `Internal Server Error`. Its `content` will be the text you passed. """
if isinstance(text, Exception): # Just in case we are exuberantly passed the entire Exception and # not its textual representation. text = str(text) raise BackendError(status_code=500, reason="Internal Server Error", content=text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def raise_from_response(resp): """Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400. """
if resp.status_code < 400: # Request was successful. Or at least, not a failure. return raise BackendError(status_code=resp.status_code, reason=resp.reason, content=resp.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_logger(file=None, syslog=False, loghost=None, level=None): """Creates a logging object compatible with Python standard logging, but which, as a `structlog` instance, emits JSON. Parameters file: `None` or `str` (default `None`) If given, send log output to file; otherwise, to `stdout`. syslog: `bool` (default `False`) If `True`, log to syslog. loghost: `None` or `str` (default `None`) If given, send syslog output to specified host, UDP port 514. level: `None` or `str` (default `None`) If given, and if one of (case-insensitive) `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`, log events of that level or higher. Defaults to `WARNING`. Returns ------- :class:`structlog.Logger` A logging object """
if not syslog: if not file: handler = logging.StreamHandler(sys.stdout) else: handler = logging.FileHandler(file) else: if loghost: handler = logging.handlers.SysLogHandler(loghost, 514) else: handler = logging.handlers.SysLogHandler() root_logger = logging.getLogger() if level: level = level.upper() lldict = { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL } if level in lldict: root_logger.setLevel(lldict[level]) root_logger.addHandler(handler) structlog.configure( processors=[ structlog.stdlib.filter_by_level, structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.TimeStamper(fmt="iso"), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.processors.JSONRenderer() ], context_class=structlog.threadlocal.wrap_dict(dict), logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) log = structlog.get_logger() return log
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getBody(self): """ Extract body json """
data = None try: data = json.loads(self.request.body) except: data = json.loads(urllib.unquote_plus(self.request.body)) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, obj): """ Print object on output """
accept = self.request.headers.get("Accept") if "json" in accept: if JsonDefaultHandler.__parser is None: JsonDefaultHandler.__parser = Parser() super(JsonDefaultHandler, self).write(JsonDefaultHandler.__parser.encode(obj)) return # If we are not in json mode super(JsonDefaultHandler, self).write(obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def raw_cookies(self): '''Raw access to cookies''' cookie_data = self.environ.get('HTTP_COOKIE', '') cookies = SimpleCookie() if not cookie_data: return cookies cookies.load(cookie_data) return cookies
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def cookies(self): '''Simplified Cookie access''' return { key: self.raw_cookies[key].value for key in self.raw_cookies.keys() }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_jpath(depth = 3): """ Generate random JPath with given node depth. """
chunks = [] while depth > 0: length = random.randint(5, 15) ident = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(length)) if random.choice((True, False)): index = random.randint(0, 10) ident = "{:s}[{:d}]".format(ident, index) chunks.append(ident) depth -= 1 return ".".join(chunks)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def feed_fetch_force(request, id, redirect_to): """Forcibly fetch tweets for the feed"""
feed = Feed.objects.get(id=id) feed.fetch(force=True) msg = _("Fetched tweets for %s" % feed.name) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_fetch_force(request, id, redirect_to): """Forcibly fetch tweets for the search"""
search = Search.objects.get(id=id) search.fetch(force=True) msg = _("Fetched tweets for %s" % search.criteria) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __connect(): """ Connect to a redis instance. """
global redis_instance if use_tcp_socket: redis_instance = redis.StrictRedis(host=hostname, port=port) else: redis_instance = redis.StrictRedis(unix_socket_path=unix_socket)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log(level, message): """ Publish `message` with the `level` the redis `channel`. :param level: the level of the message :param message: the message you want to log """
if redis_instance is None: __connect() if level not in __error_levels: raise InvalidErrorLevel('You have used an invalid error level. \ Please choose in: ' + ', '.join(__error_levels)) if channel is None: raise NoChannelError('Please set a channel.') c = '{channel}.{level}'.format(channel=channel, level=level) redis_instance.publish(c, message)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_certs() -> str: """Find suitable certificates for ``httplib2``. Warning: The default behaviour is to fall back to the bundled certificates when no system certificates can be found. If you're packaging ``jnrbase`` *please* set ``ALLOW_FALLBACK`` to ``False`` to disable this very much unwanted behaviour, but please maintain the option so that downstream users can inspect the configuration easily. See also: :pypi:`httplib2` Returns: Path to SSL certificates Raises: RuntimeError: When no suitable certificates are found """
bundle = path.realpath(path.dirname(httplib2.CA_CERTS)) # Some distros symlink the bundled path location to the system certs if not bundle.startswith(path.dirname(httplib2.__file__)): return bundle for platform, files in PLATFORM_FILES.items(): if sys.platform.startswith(platform): for cert_file in files: if path.exists(cert_file): return cert_file # An apparently common environment setting for macOS users to workaround # the lack of “standard” certs installation if path.exists(getenv('CURL_CA_BUNDLE', '')): return getenv('CURL_CA_BUNDLE') if ALLOW_FALLBACK: warnings.warn('No system certs detected, falling back to bundled', RuntimeWarning) return httplib2.CA_CERTS else: raise RuntimeError('No system certs detected!')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_absolute_url(self): """ If override_url was given, use that. Otherwise, if the content belongs to a blog, use a blog url. If not, use a regular article url. """
if self.override_url: return self.override_url if self.destination.is_blog: return reverse('blog_entry_detail', args=[self.destination.slug, self.slug]) return reverse('article_detail', args=[self.slug])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ Store summary if none was given and created formatted version of body text. """
if not self.summary: self.summary = truncatewords(self.body, 50) self.body_formatted = sanetize_text(self.body) super(Article, self).save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_vads_payment_config(self): """ vads_payment_config can be set only after object saving. A custom payment config can be set once PaymentRequest saved (adding elements to the m2m relationship). As a consequence we set vads_payment_config just before sending data elements to payzen."""
self.vads_payment_config = tools.get_vads_payment_config( self.payment_config, self.custom_payment_config.all())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ We set up vads_trans_id and theme according to payzen format. If fields values are explicitely set by user, we do not override their values. """
if not self.vads_trans_date: self.vads_trans_date = datetime.datetime.utcnow().replace( tzinfo=utc).strftime("%Y%m%d%H%M%S") if not self.vads_trans_id: self.vads_trans_id = tools.get_vads_trans_id( self.vads_site_id, self.vads_trans_date) if self.theme and not self.vads_theme_config: self.vads_theme_config = str(self.theme) if not self.pk: super(PaymentRequest, self).save() self.set_vads_payment_config() self.set_signature() super(PaymentRequest, self).save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def records(self): '''Return a list of dicts corresponding to the data returned by Factual.''' if self._records == None: self._records = self._get_records() return self._records
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GenerateConfigFile(load_hook, dump_hook, **kwargs) -> ConfigFile: """ Generates a ConfigFile object using the specified hooks. These hooks should be functions, and have one argument. When a hook is called, the ConfigFile object is passed to it. Use this to load your data from the fd object, or request, or whatever. This returns a ConfigFile object. """
def ConfigFileGenerator(filename, safe_load: bool=True): cfg = ConfigFile(fd=filename, load_hook=load_hook, dump_hook=dump_hook, safe_load=safe_load, **kwargs) return cfg return ConfigFileGenerator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GenerateNetworkedConfigFile(load_hook, normal_class_load_hook, normal_class_dump_hook, **kwargs) -> NetworkedConfigObject: """ Generates a NetworkedConfigObject using the specified hooks. """
def NetworkedConfigObjectGenerator(url, safe_load: bool=True): cfg = NetworkedConfigObject(url=url, load_hook=load_hook, safe_load=safe_load, normal_class_load_hook=normal_class_load_hook, normal_class_dump_hook=normal_class_dump_hook) return cfg return NetworkedConfigObjectGenerator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append(self, first, count): """ Add to the set a range of count consecutive ids starting at id first. """
self.__range.append(IdRange(first, count))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove(self, first, count): """ Remove a range of count consecutive ids starting at id first from all the ranges in the set. """
# Avoid trivialities if first < 0 or count < 1: return new_range = [] last = first + count - 1 for r in self.__range: if first <= r.last and r.first <= last: # There is an overlap if r.first < first: new_range.append(IdRange(r.first, first-r.first)) if last < r.last: new_range.append(IdRange(last+1, r.last-last)) else: # No overlap, range is kept new_range.append(r) self.__range = new_range
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def simplify(self): """ Reorganize the ranges in the set in order to ensure that each range is unique and that there is not overlap between to ranges. """
# Sort the ranges self.__range.sort() new_range = [] new_first = self.__range[0].first new_count = self.__range[0].count for r in self.__range: if r.first == new_first: # Longest range starting at new_first new_count = r.count elif r.first <= new_first + new_count: # Overlapping ranges if new_first + new_count - 1 < r.last: # There is a part of the range to add to the new range new_count = r.last - new_first + 1 else: # No overlap, this is a new disjoint range new_range.append(IdRange(new_first, new_count)) new_first = r.first new_count = r.count # End of the last range new_range.append(IdRange(new_first, new_count)) self.__range = new_range
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_fix_my_django_submission_url(self, tb_info, sanitized_tb): """ Links to the error submission url with pre filled fields """
err_post_create_path = '/create/' url = '{0}{1}'.format(base_url, err_post_create_path) return '{url}?{query}'.format( url=url, query=urlencode({ 'exception_type': clean_exception_type(tb_info['parsed_traceback']['exc_type']), 'error_message': tb_info['parsed_traceback']['exc_msg'], 'django_version': '{0[0]}.{0[1]}'.format(django.VERSION), 'traceback': sanitized_tb }))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_config(config_file): '''find, read and parse configuraton.''' parser = ConfigParser.SafeConfigParser() if os.path.lexists(config_file): try: log.info('Reading config: %s', config_file) inp = open(config_file) parser.readfp(inp) return parser except (IOError, ConfigParser.ParsingError), err: raise ConfigError("Failed to read configuration %s\n%s" % (config_file, err)) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getbool(key, default=False): """ Returns True or False for any TRUE, FALSE, 0, or 1. Other values return default. """
value = os.getenv(key) if value and value.lower() in ('true', '1'): value = True elif value and value.lower() in ('false', '0'): value = False else: value = default return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _locate_settings(settings=''): "Return the path to the DJANGO_SETTINGS_MODULE" import imp import sys sys.path.append(os.getcwd()) settings = settings or os.getenv('DJANGO_SETTINGS_MODULE') if settings: parts = settings.split('.') f = imp.find_module(parts[0])[1] args = [f] + parts[1:] path = os.path.join(*args) path = path + '.py' if os.path.exists(path): return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_from_relations(self, query,aliases): ''' Returns list of the names of all positive relations in the query ''' return [aliases[rel.get_name()] for rel in query.get_relations() if not rel.is_negated()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def photo(self, args): """ Retrieves metadata for a specific photo. flickr:(credsfile),photo,(photo_id) """
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0])) p = rsp['photo'] yield self._prep(p)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interesting(self, args=None): """ Gets interesting photos. flickr:(credsfile),interesting """
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'} return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _paged_api_call(self, func, kwargs, item_type='photo'): """ Takes a Flickr API function object and dict of keyword args and calls the API call repeatedly with an incrementing page value until all contents are exhausted. Flickr seems to limit to about 500 items. """
page = 1 while True: LOG.info("Fetching page %s" % page) kwargs['page'] = page rsp = self._load_rsp(func(**kwargs)) if rsp["stat"] == "ok": plural = item_type + 's' if plural in rsp: items = rsp[plural] if int(items["page"]) < page: LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"])) break for i in items[item_type]: yield self._prep(i) else: yield rsp page += 1 else: yield [rsp] break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _prep(e): """ Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes. """
if 'lastupdate' in e: e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate'])) for k in ['farm', 'server', 'id', 'secret']: if not k in e: return e e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"]) return e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_rsp(rsp): """ Converts raw Flickr string response to Python dict """
first = rsp.find('(') + 1 last = rsp.rfind(')') return json.loads(rsp[first:last])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def styleProperties(Class, style): """return a properties dict from a given cssutils style """
properties = Dict() for property in style.getProperties(all=True): stylename = property.name + ':' properties[stylename] = property.value if property.priority != '': properties[stylename] = ' !'+property.priority return properties
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_css(Class, csstext, encoding=None, href=None, media=None, title=None, validate=None): """parse CSS text into a Styles object, using cssutils """
styles = Class() cssStyleSheet = cssutils.parseString(csstext, encoding=encoding, href=href, media=media, title=title, validate=validate) for rule in cssStyleSheet.cssRules: if rule.type==cssutils.css.CSSRule.FONT_FACE_RULE: if styles.get('@font-face') is None: styles['@font-face'] = [] styles['@font-face'].append(Class.styleProperties(rule.style)) elif rule.type==cssutils.css.CSSRule.IMPORT_RULE: if styles.get('@import') is None: styles['@import'] = [] styles['@import'].append("url(%s)" % rule.href) elif rule.type==cssutils.css.CSSRule.NAMESPACE_RULE: if styles.get('@namespace') is None: styles['@namespace'] = {} styles['@namespace'][rule.prefix] = rule.namespaceURI elif rule.type==cssutils.css.CSSRule.MEDIA_RULE: if styles.get('@media') is None: styles['@media'] = [] styles['@media'].append(rule.cssText) elif rule.type==cssutils.css.CSSRule.PAGE_RULE: if styles.get('@page') is None: styles['@page'] = [] styles['@page'].append(rule.cssText) elif rule.type==cssutils.css.CSSRule.STYLE_RULE: for selector in rule.selectorList: sel = selector.selectorText if sel not in styles: styles[sel] = Class.styleProperties(rule.style) elif rule.type==cssutils.css.CSSRule.CHARSET_RULE: styles['@charset'] = rule.encoding elif rule.type==cssutils.css.CSSRule.COMMENT: # comments are thrown away pass elif rule.type==cssutils.css.CSSRule.VARIABLES_RULE: pass else: log.warning("Unknown rule type: %r" % rule.cssText) return styles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_token(code, token_service, client_id, client_secret, redirect_uri, grant_type): """Fetches an OAuth 2 token."""
data = { 'code': code, 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, 'grant_type': grant_type, } # Get the default http client resp = requests.post(token_service, data, verify=False) return resp.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_auth_uri(auth_service, client_id, scope, redirect_uri, response_type, state, access_type, approval_prompt): """Generates an authorization uri."""
errors = [] if response_type not in VALID_RESPONSE_TYPES: errors.append( '{0} is not a valid response_type, must be {1}.'.format( response_type, VALID_RESPONSE_TYPES)) if not client_id: errors.append('client_id is missing or empty.') if not redirect_uri: errors.append('redirect_uri is missing or empty.') if not scope: errors.append('scope is missing or empty.') if access_type not in VALID_ACCESS_TYPES: errors.append('access_type is invalid.') if approval_prompt not in VALID_APPROVAL_PROMPTS: errors.append('approval_prompt is invalid') if errors: raise ValueError('Invalid parameters: {0}'.format('\n'.join(errors))) params = { 'response_type': response_type, 'client_id': client_id, 'redirect_uri': redirect_uri, 'scope': scope, 'access_type': access_type, 'approval_prompt': approval_prompt, 'state': state, } return '?'.join([auth_service, urllib.urlencode(params)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh_token(token_service, refresh_token, client_id, client_secret): """Refreshes a token."""
data = { 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': 'refresh_token', } resp = requests.post(token_service, data) print resp, 'refreshing', resp.json() return resp.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_local(client): """Starts a local web server and wait for a redirect."""
webbrowser.open(client.get_auth_uri()) code = wait_for_redirect() return client.get_token(code)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(argv): """Entry point for command line script to perform OAuth 2.0."""
p = argparse.ArgumentParser() p.add_argument('-s', '--scope', nargs='+') p.add_argument('-o', '--oauth-service', default='google') p.add_argument('-i', '--client-id') p.add_argument('-x', '--client-secret') p.add_argument('-r', '--redirect-uri') p.add_argument('-f', '--client-secrets') args = p.parse_args(argv) client_args = (args.client_id, args.client_secret, args.client_id) if any(client_args) and not all(client_args): print('Must provide none of client-id, client-secret and redirect-uri;' ' or all of them.') p.print_usage() return 1 print args.scope if not args.scope: print('Scope must be provided.') p.print_usage() return 1 config = WizardClientConfig() config.scope = ' '.join(args.scope) print(run_local(UserOAuth2(config))['access_token']) return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_and_create_directories(paths): """ Check and create directories. If the directory is exist, It will remove it and create new folder. :type paths: Array of string or string :param paths: the location of directory """
for path in paths: if os.path.exists(path): shutil.rmtree(path) os.mkdir(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_directories(paths): """ Delete directories. If the directory is exist, It will delete it including files. :type paths: Array of string or string :param paths: the location of directory """
for path in paths: if os.path.exists(path): shutil.rmtree(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_files(paths): """ Delete files. If the file is exist, It will delete it. :type paths: Array of string or string :param paths: the location of file """
for path in paths: if os.path.exists(path): os.remove(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def truncate_table(self, tablename): """ Use 'TRUNCATE TABLE' to truncate the given table """
self.cursor.execute('TRUNCATE TABLE %s' %tablename) self.db.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_schema(self, schema_name): """ Create schema. This method only implemented for this class """
try: self.cursor.execute('CREATE SCHEMA %s' % schema_name) except Exception as e: raise e finally: self.db.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def datatype(dbtype, description, cursor): """Google AppEngine Helper to convert a data type into a string."""
dt = cursor.db.introspection.get_field_type(dbtype, description) if type(dt) is tuple: return dt[0] else: return dt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_filter_class(self, view, queryset=None): """ Return the django-filters `FilterSet` used to filter the queryset. """
filter_class = getattr(view, 'filter_class', None) if filter_class: filter_model = filter_class.Meta.model assert issubclass(queryset._document, filter_model), \ 'FilterSet model %s does not match queryset model %s' % \ (filter_model, queryset.model) return filter_class return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """Will tag the currently active git commit id with the next release tag id"""
sha = VersionUtils.run_git_command(["rev-parse", "HEAD"], self.git_dir) tag = self.distribution.get_version() if self.has_tag(tag, sha): tags_sha = VersionUtils.run_git_command(["rev-parse", tag], self.git_dir) if sha != tags_sha: logger.error( "git tag {0} sha does not match the sha requesting to be tagged, you need to increment the version number, Skipped Tagging!".format( tag ) ) return else: logger.info( "git tag {0} already exists for this repo, Skipped Tagging!".format( tag ) ) return logger.info("Adding tag {0} for commit {1}".format(tag, sha)) if not self.dry_run: VersionUtils.run_git_command( ["tag", "-m", '""', tag, sha], self.git_dir, throw_on_error=True ) logger.info("Pushing tag {0} to remote {1}".format(tag, self.remote)) VersionUtils.run_git_command( ["push", self.remote, tag], self.git_dir, throw_on_error=True )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extendMarkdown(self, md, md_globals=None): """Initializes markdown extension components."""
if any( x not in md.treeprocessors for x in self.REQUIRED_EXTENSION_INTERNAL_NAMES): raise RuntimeError( "The attr_cols markdown extension depends the following" " extensions which must preceded it in the extension" " list: %s" % ", ".join(self.REQUIRED_EXTENSIONS)) processor = AttrColTreeProcessor(md, self.conf) md.treeprocessors.register( processor, 'attr_cols', 5)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_file(paths): """read config from path or list of paths :param str|list[str] paths: path or list of paths :return dict: loaded and merged config """
if isinstance(paths, str): paths = [paths] re = {} for path in paths: cfg = yaml.load(open(path)) merge(re, cfg) return re
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_from_diybio_org(): """Scrapes data from diybio.org."""
r = requests.get(diy_bio_labs_url) if r.status_code == 200: # Fix a problem in the html source while loading it data = BeautifulSoup(r.text.replace(u'\xa0', u''), "lxml") else: data = "There was an error while accessing data on diybio.org." return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open(target): """Opens the target file or URL in the default application. **Attribution**: Written by user4815162342 and originally posted on `Stack Overflow <http://stackoverflow.com/a/17317468>`_. **Examples**: :: auxly.open("myfile.txt") auxly.open("https://www.github.com/") """
if sys.platform == "win32": os.startfile(target) else: opener = "open" if sys.platform == "darwin" else "xdg-open" subprocess.call([opener, target])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verbose(enabled): """Returns normal print function if enable, otherwise a dummy print function is returned which will suppress output."""
def _vprint(msg, **kwargs): print(msg, **kwargs) def _nprint(msg, **kwargs): pass return _vprint if enabled else _nprint
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def callstop(*args, **kwargs): """Limits the number of times a function can be called. Can be used as a function decorator or as a function that accepts another function. If used as a function, it returns a new function that will be call limited. **Params**: - func (func) - Function to call. Only available when used as a function. **Examples**: :: call = callstop(myfunc, limit=3) call(myarg1, myarg2) """
limit = kwargs.get('limit', 1) def decor(func): def wrapper(*args, **kwargs): if wrapper.calls < limit: wrapper.calls += 1 return func(*args, **kwargs) wrapper.calls = 0 return wrapper if len(args) > 0 and callable(args[0]): func = args[0] return decor(func) return decor
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_for_instance(instance): """ wait for instance status to be 'running' in which case return True, False otherwise """
status = None print("getting status for instance {} ...".format(instance.id)) while status is None: try: status = instance.update() if status is None: time.sleep(2) except EC2ResponseError: time.sleep(2) print("waiting for instance {} ...".format(instance.id)) while status == "pending": time.sleep(2) status = instance.update() if status != "running": print("Invalid status when starting instance {}: {}".format(instance.id, status)) return False print("New instance {} started: {}".format(instance.id, instance.ip_address)) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def validate(datapackage, schema='base'): '''Validate Data Package datapackage.json files against a jsonschema. Args: datapackage (str or dict): The Data Package descriptor file (i.e. datapackage.json) as a dict or its contents in a string. schema (str or dict): If a string, it can be the schema ID in the registry, a local path, a URL or the schema's JSON as a string. If a dict, it must be the JSON Schema itself. Returns: None Raises: DataPackageValidateException: This exception has the list of the validation errors in its `.errors` attribute. ''' errors = [] schema_obj = None datapackage_obj = None # Sanity check datapackage # If datapackage is a str, check json is well formed if isinstance(datapackage, six.string_types): try: datapackage_obj = json.loads(datapackage) except ValueError as e: errors.append(DataPackageValidateException(e)) elif not isinstance(datapackage, dict): msg = 'Data Package must be a dict or JSON string, but was a \'{0}\'' dp_type = type(datapackage).__name__ error = DataPackageValidateException(msg.format(dp_type)) errors.append(error) else: datapackage_obj = datapackage try: if isinstance(schema, six.string_types): try: schema = json.loads(schema) except ValueError: pass schema_obj = Schema(schema) except (SchemaError, RegistryError) as e: errors.append(e) # Validate datapackage against the schema if datapackage_obj is not None and schema_obj is not None: try: schema_obj.validate(datapackage_obj) except ValidationError as e: errors.append(e) if errors: exception = DataPackageValidateException() exception.errors = errors raise exception
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_request( ins, method, url, stripe_account=None, params=None, headers=None, **kwargs ): """ Return a deferred or handle error. For overriding in various classes. """
if txstripe.api_key is None: raise error.AuthenticationError( 'No API key provided. (HINT: set your API key using ' '"stripe.api_key = <API-KEY>"). You can generate API keys ' 'from the Stripe web interface. See https://stripe.com/api ' 'for details, or email support@stripe.com if you have any ' 'questions.') abs_url = '{}{}'.format(txstripe.api_base, url) ua = { 'lang': 'python', 'publisher': 'lextoumbourou', 'httplib': 'Twisted', } headers = headers or {} headers.update({ 'X-Stripe-Client-User-Agent': util.json.dumps(ua), 'User-Agent': 'txstripe', 'Authorization': 'Bearer %s' % (txstripe.api_key,) }) if stripe_account: headers['Stripe-Account'] = stripe_account if txstripe.api_version is not None: headers['Stripe-Version'] = txstripe.api_version if method == 'get' or method == 'delete': data = None elif method == 'post': data = {k: v for (k, v) in _api_encode(params)} params = None else: raise error.APIConnectionError( 'Unrecognized HTTP method %r. This may indicate a bug in the ' 'Stripe bindings.' % (method,)) resp = yield treq.request( method, abs_url, params=params, data=data, headers=headers, **kwargs) if resp.code >= 400: yield util.handle_api_error(resp) return body = yield resp.json() defer.returnValue( convert_to_stripe_object( body, txstripe.api_key, stripe_account))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_logger(self, args={}): """ Create and configure the program's logger object. Log levels: DEBUG - Log everything. Hidden unless --debug is used. INFO - information only ERROR - Critical error :param args: Object containing program's parsed command line arguments :return: None """
# Set up logging logger = logging.getLogger("SmartFileSorter") logger.level = logging.INFO if '--debug' in args and args['--debug'] is True: logger.setLevel(logging.DEBUG) file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S') console_log_formatter = logging.Formatter('%(message)s') # Log to stdout stdout_stream = logging.StreamHandler(stream=sys.stdout) stdout_stream.setFormatter(console_log_formatter) logger.addHandler(stdout_stream) # Log to file if the option is chosen if '--log' in args and args['--log'] is not None: logfile = open(args['--log'], 'w') logfile_stream = logging.StreamHandler(stream=logfile) logfile_stream.setFormatter(file_log_formatter) logger.addHandler(logfile_stream) if '--dry-run' in args and args['--dry-run'] is True: logger.info('Running with --dry-run parameter. Actions will not be performed.') self.logger = logger
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_plugins(self, plugin_path): """ Loads plugins from modules in plugin_path. Looks for the config_name property in each object that's found. If so, adds that to the dictionary with the config_name as the key. config_name should be unique between different plugins. :param plugin_path: Path to load plugins from :return: dictionary of plugins by config_name """
self.logger.debug('Loading plugins from {0}'.format(plugin_path)) plugins = {} plugin_dir = os.path.realpath(plugin_path) sys.path.append(plugin_dir) for f in os.listdir(plugin_dir): if f.endswith(".py"): name = f[:-3] elif f.endswith(".pyc"): name = f[:-4] # Possible support for plugins inside directories - worth doing? # elif os.path.isdir(os.path.join(plugin_dir, f)): # name = f else: continue try: self.logger.debug('Adding plugin from: {0}'.format(f)) mod = __import__(name, globals(), locals(), [], 0) for plugin_class in inspect.getmembers(mod): if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc continue if hasattr(plugin_class[1], 'config_name'): if plugin_class[1].config_name is not None: # Skip plugins where config_name is None, like the base classes plugins[plugin_class[1].config_name] = plugin_class[1] self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name)) # Todo: Add error checking here. If a plugin with that name already exists, # log an error. Quit or continue? except ImportError as e: self.logger.error(e) pass # problem importing self.logger.debug('Done loading plugins') return plugins
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deserialize_request(self, req): """ Uses the deserializers declared on the action method and its extensions to deserialize the request. Returns the result of the deserialization. Raises `webob.HTTPUnsupportedMediaType` if the media type of the request is unsupported. """
# See if we have a body if req.content_length == 0: return None # Get the primary deserializer try: deserializer = self.method.deserializers(req.content_type) except KeyError: raise webob.exc.HTTPUnsupportedMediaType() # If it has an attacher, attach all the deserializers for the # extensions if hasattr(deserializer, 'attach'): for ext in self.extensions: try: deserializer.attach(ext.deserializers(req.content_type)) except KeyError: pass # A deserializer is simply a callable, so call it return deserializer(req.body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serializer(self, req): """ Selects and returns the serializer to use, based on the serializers declared on the action method and its extensions. The returned content type is selected based on the types available and the best match generated from the HTTP `Accept` header. Raises `HTTPNotAcceptable` if the request cannot be serialized to an acceptable media type. Returns a tuple of the content type and the serializer. """
# Select the best match serializer content_types = self.method.serializers.get_types() content_type = req.accept.best_match(content_types) if content_type is None: raise webob.exc.HTTPNotAcceptable() # Select the serializer to use try: serializer = self.method.serializers(content_type) except KeyError: raise webob.exc.HTTPNotAcceptable() # If it has an attacher, attach all the serializers for the # extensions if hasattr(serializer, 'attach'): for ext in reversed(self.extensions): try: serializer.attach(ext.serializers(content_type)) except KeyError: pass # Return content type and serializer return content_type, serializer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wrap(self, req, result): """ Wrap method return results. The return value of the action method and of the action extensions is passed through this method before being returned to the caller. Instances of `webob.Response` are thrown, to abort the rest of action and extension processing; otherwise, objects which are not instances of ResponseObject will be wrapped in one. """
if isinstance(result, webob.exc.HTTPException): # It's a webob HTTP exception; use raise to bail out # immediately and pass it upstream raise result elif isinstance(result, webob.Response): # Straight-up webob Response object; we raise # AppathyResponse to bail out raise exceptions.AppathyResponse(result) elif isinstance(result, response.ResponseObject): # Already a ResponseObject; bind it to this descriptor result._bind(self) return result else: # Create a new, bound, ResponseObject return self.resp_type(req, result, _descriptor=self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def active(self): """Returns all outlets that are currently active and have sales."""
qs = self.get_queryset() return qs.filter( models.Q( models.Q(start_date__isnull=True) | models.Q(start_date__lte=now().date()) ) & models.Q( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) ) ).distinct()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def future(self): """Returns all outlets that are or will be active."""
qs = self.get_queryset() return qs.filter( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_chambers(): """ Ensures chambers are created """
france = Country.objects.get(name="France") for key in ('AN', 'SEN'): variant = FranceDataVariants[key] Chamber.objects.get_or_create(name=variant['chamber'], abbreviation=variant['abbreviation'], country=france)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def touch_model(self, model, **data): ''' This method create or look up a model with the given data it saves the given model if it exists, updating its updated field ''' instance, created = model.objects.get_or_create(**data) if not created: if instance.updated < self.import_start_datetime: instance.save() # Updates updated field return (instance, created)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_mandates(self, representative, rep_json): ''' Create mandates from rep data based on variant configuration ''' # Mandate in country group for party constituency if rep_json.get('parti_ratt_financier'): constituency, _ = Constituency.objects.get_or_create( name=rep_json.get('parti_ratt_financier'), country=self.france) group, _ = self.touch_model(model=Group, abbreviation=self.france.code, kind='country', name=self.france.name) _create_mandate(representative, group, constituency, 'membre') # Configurable mandates for mdef in self.variant['mandates']: if mdef.get('chamber', False): chamber = self.chamber else: chamber = None if 'from' in mdef: elems = mdef['from'](rep_json) else: elems = [rep_json] for elem in elems: name = _get_mdef_item(mdef, 'name', elem, '') abbr = _get_mdef_item(mdef, 'abbr', elem, '') group, _ = self.touch_model(model=Group, abbreviation=abbr, kind=mdef['kind'], chamber=chamber, name=name) role = _get_mdef_item(mdef, 'role', elem, 'membre') start = _get_mdef_item(mdef, 'start', elem, None) if start is not None: start = _parse_date(start) end = _get_mdef_item(mdef, 'end', elem, None) if end is not None: end = _parse_date(end) _create_mandate(representative, group, self.ch_constituency, role, start, end) logger.debug( '%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'], mdef['kind'], role, name, abbr, start, end))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def env(var_name, default=False): """ Get the environment variable. If not found use a default or False, but print to stderr a warning about the missing env variable."""
try: value = os.environ[var_name] if str(value).strip().lower() in ['false', 'f', 'no', 'off' '0', 'none', 'null', '', ]: return None return value except: from traceback import format_exc msg = "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default) sys.stderr.write(format_exc()) sys.stderr.write(msg) return default