text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_to_file(self, filename: str) -> ConfigFile: """ This converts the NetworkedConfigFile into a normal ConfigFile object. This requires the normal class hooks to be provided. """
newclass = ConfigFile(fd=filename, load_hook=self.normal_class_hook[0], dump_hook=self.normal_class_hook[1], safe_load=self.safe_load) return newclass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_args(s): """ parse a string into args and kwargs the input is a blank-delimited set of tokens, which may be grouped as strings (tick or double tick delimited) with embedded blanks. a non-string equal (=) acts as a delimiter between key-value pairs. the initial tokens are treated as args, followed by key-value pairs. Example: one 'two three' four=5 six='seven eight' parses to: args = ['one', 'two three'] kwargs = {'four': 5, 'six': 'seven eight'} Return: args as list kwargs as dict Notes: 1. Does not enforce args and keywords as valid python. 2. String delimiters can be escaped (\) within strings. 3. Key-value delimiters (=) can be surrounded by blanks. 4. Non-string integer kwarg values will be int; all other values are str. 5. Designed for functionality, not speed """
args = [] kwargs = {} state = 'arg' for token in to_tokens(s): if state == 'arg': if token.is_key: key = token.value state = 'value' else: args.append(token.value) elif state == 'key': if not token.is_key: raise ExpectingKey(token.value) key = token.value if key in kwargs: raise DuplicateKey(key) state = 'value' elif state == 'value': if token.is_key: raise ConsecutiveKeys(token.value) kwargs[key] = token.value state = 'key' if state == 'value': raise IncompleteKeyValue(key) return args, kwargs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dispatch(self, req): """ Called by the Routes middleware to dispatch the request to the appropriate controller. If a webob exception is raised, it is returned; if some other exception is raised, the webob `HTTPInternalServerError` exception is raised. Otherwise, the return value of the controller is returned. """
# Grab the request parameters params = req.environ['wsgiorg.routing_args'][1] # What controller is authoritative? controller = params.pop('controller') # Determine its name cont_class = controller.__class__ cont_name = "%s:%s" % (cont_class.__module__, cont_class.__name__) # Determine the origin of the request origin = req.remote_addr if req.remote_addr else '[local]' if req.remote_user: origin = '%s (%s)' % (origin, req.remote_user) # Log that we're processing the request LOG.info("%s %s %s (controller %r)" % (origin, req.method, req.url, cont_name)) # Call into that controller try: return controller(req, params) except webob.exc.HTTPException as e: # Return the HTTP exception directly return e except exceptions.AppathyResponse as e: # Return the webob.Response directly return e.response except Exception as e: # Log the controller exception LOG.exception("Exception occurred in controller %r" % cont_name) # These exceptions result in a 500. Note we're # intentionally not including the exception message, since # it could contain sensitive data. return webob.exc.HTTPInternalServerError()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query(self, ns, selector='*'): """ Query the label store for labels :param ns: Label namespace (`bind_pwd` for example) :type ns: str :param selector: Target selector (`test` or `test.guest` for example) :type selector: str """
q, r = HicaLabelStore.PREFIX + '.' + ns, [] for (key, value) in self.items: if not selector and key == q: r.append((key, value)) if key.startswith(q) and key != q: sub = key[len(q):] m = re.match('.' + selector, sub) if m: r.append((key, value)) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_value(self, label): """ Get value from a single fully-qualified name """
for (key, value) in self.items: if key == label: return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _initPatterns(ptc): """ Helper function to take the different localized bits from ptc and create the regex strings. """
# TODO add code to parse the date formats and build the regexes up from sub-parts # TODO find all hard-coded uses of date/time seperators ptc.RE_DATE4 = r'''(?P<date>(((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?(,)?(\s)?) (?P<mthname>(%(months)s|%(shortmonths)s))\s? (?P<year>\d\d(\d\d)?)? ) )''' % ptc.re_values # I refactored DATE3 to fix Issue 16 http://code.google.com/p/parsedatetime/issues/detail?id=16 # I suspect the final line was for a trailing time - but testing shows it's not needed # ptc.RE_DATE3 = r'''(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s? # ((?P<day>\d\d?)(\s?|%(daysuffix)s|$)+)? # (,\s?(?P<year>\d\d(\d\d)?))?)) # (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values ptc.RE_DATE3 = r'''(?P<date>( (((?P<mthname>(%(months)s|%(shortmonths)s))| ((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?))(\s)?){1,2} ((,)?(\s)?(?P<year>\d\d(\d\d)?))? ) )''' % ptc.re_values ptc.RE_MONTH = r'''(\s?|^) (?P<month>( (?P<mthname>(%(months)s|%(shortmonths)s)) (\s?(?P<year>(\d\d\d\d)))? )) (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values ptc.RE_WEEKDAY = r'''(\s?|^) (?P<weekday>(%(days)s|%(shortdays)s)) (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values ptc.RE_SPECIAL = r'(?P<special>^[%(specials)s]+)\s+' % ptc.re_values ptc.RE_UNITS = r'''(?P<qty>(-?\d+\s* (?P<units>((%(units)s)s?)) ))''' % ptc.re_values ptc.RE_QUNITS = r'''(?P<qty>(-?\d+\s? (?P<qunits>%(qunits)s) (\s?|,|$) ))''' % ptc.re_values ptc.RE_MODIFIER = r'''(\s?|^) (?P<modifier> (previous|prev|last|next|eod|eo|(end\sof)|(in\sa)))''' % ptc.re_values ptc.RE_MODIFIER2 = r'''(\s?|^) (?P<modifier> (from|before|after|ago|prior)) (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values ptc.RE_TIMEHMS = r'''(\s?|^) (?P<hours>\d\d?) (?P<tsep>%(timeseperator)s|) (?P<minutes>\d\d) (?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?''' % ptc.re_values ptc.RE_TIMEHMS2 = r'''(?P<hours>(\d\d?)) ((?P<tsep>%(timeseperator)s|) (?P<minutes>(\d\d?)) (?:(?P=tsep) (?P<seconds>\d\d? (?:[.,]\d+)?))?)?''' % ptc.re_values if 'meridian' in ptc.re_values: ptc.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % ptc.re_values dateSeps = ''.join(ptc.dateSep) + '.' ptc.RE_DATE = r'''(\s?|^) (?P<date>(\d\d?[%s]\d\d?([%s]\d\d(\d\d)?)?)) (\s?|$|[^0-9a-zA-Z])''' % (dateSeps, dateSeps) ptc.RE_DATE2 = r'[%s]' % dateSeps ptc.RE_DAY = r'''(\s?|^) (?P<day>(today|tomorrow|yesterday)) (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values ptc.RE_DAY2 = r'''(?P<day>\d\d?)|(?P<suffix>%(daysuffix)s) ''' % ptc.re_values ptc.RE_TIME = r'''(\s?|^) (?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now)) (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values ptc.RE_REMAINING = r'\s+' # Regex for date/time ranges ptc.RE_RTIMEHMS = r'''(\s?|^) (\d\d?)%(timeseperator)s (\d\d) (%(timeseperator)s(\d\d))? (\s?|$)''' % ptc.re_values ptc.RE_RTIMEHMS2 = r'''(\s?|^) (\d\d?) (%(timeseperator)s(\d\d?))? (%(timeseperator)s(\d\d?))?''' % ptc.re_values if 'meridian' in ptc.re_values: ptc.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % ptc.re_values ptc.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps ptc.RE_RDATE3 = r'''((((%(months)s))\s? ((\d\d?) (\s?|%(daysuffix)s|$)+)? (,\s?\d\d\d\d)?))''' % ptc.re_values # "06/07/06 - 08/09/06" ptc.DATERNG1 = ptc.RE_RDATE + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE ptc.DATERNG1 = ptc.DATERNG1 % ptc.re_values # "march 31 - june 1st, 2006" ptc.DATERNG2 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE3 ptc.DATERNG2 = ptc.DATERNG2 % ptc.re_values # "march 1rd -13th" ptc.DATERNG3 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?(\d\d?)\s?(rd|st|nd|th)?' ptc.DATERNG3 = ptc.DATERNG3 % ptc.re_values # "4:00:55 pm - 5:90:44 am", '4p-5p' ptc.TIMERNG1 = ptc.RE_RTIMEHMS2 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2 ptc.TIMERNG1 = ptc.TIMERNG1 % ptc.re_values # "4:00 - 5:90 ", "4:55:55-3:44:55" ptc.TIMERNG2 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS ptc.TIMERNG2 = ptc.TIMERNG2 % ptc.re_values # "4-5pm " ptc.TIMERNG3 = r'\d\d?\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2 ptc.TIMERNG3 = ptc.TIMERNG3 % ptc.re_values # "4:30-5pm " ptc.TIMERNG4 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2 ptc.TIMERNG4 = ptc.TIMERNG4 % ptc.re_values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _initConstants(ptc): """ Create localized versions of the units, week and month names """
# build weekday offsets - yes, it assumes the Weekday and shortWeekday # lists are in the same order and Mon..Sun (Python style) ptc.WeekdayOffsets = {} o = 0 for key in ptc.Weekdays: ptc.WeekdayOffsets[key] = o o += 1 o = 0 for key in ptc.shortWeekdays: ptc.WeekdayOffsets[key] = o o += 1 # build month offsets - yes, it assumes the Months and shortMonths # lists are in the same order and Jan..Dec ptc.MonthOffsets = {} o = 1 for key in ptc.Months: ptc.MonthOffsets[key] = o o += 1 o = 1 for key in ptc.shortMonths: ptc.MonthOffsets[key] = o o += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_to_file(self, file_path: str) -> None: """ Serialize and write the data into a JSON file. """
data = self.encode() with open(file_path, "w") as f: json.dump(data, f, indent=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_from_file(cls, file_path: str): """ Read and reconstruct the data from a JSON file. """
with open(file_path, "r") as f: data = json.load(f) item = cls.decode(data=data) return item
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gen_component_name(self, basename, postfix_length=13): """ Creates a resource identifier with a random postfix. This is an attempt to minimize name collisions in provider namespaces. :param str basename: The string that will be prefixed with the stack name, and postfixed with some random string. :param int postfix_length: The length of the postfix to be appended. """
def newcname(): postfix = ''.join( random.choice(_AWS_NAME_CHARS) for i in xrange(postfix_length) ) return '%s-%s' % (basename, postfix) cname = newcname() while cname in self.component_names: cname = newcname() self.component_names.append(cname) return cname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_slugs(apps, schema_editor): """ Include birthdate in slugs """
# Get model managers Representative = apps.get_model("representatives", "Representative") for rep in Representative.objects.all(): rep.slug = '%s-%s' % (rep.slug, rep.birth_date) rep.save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_parl_websites(apps, schema_editor): """ Prepare for remote_id removal by creating WebSite entities from it. """
# Get model managers Representative = apps.get_model("representatives", "Representative") WebSite = apps.get_model("representatives", "WebSite") today = datetime.date.today() # EP ep_url = 'http://www.europarl.europa.eu/meps/en/%s/_home.html' qs = Representative.objects.filter( models.Q(mandates__end_date__gte=today) | models.Q(mandates__end_date__isnull=True), mandates__group__chamber__abbreviation='EP' ) for rep in qs: changed = False url = ep_url % rep.remote_id try: site = WebSite.objects.get(representative=rep, kind='EP') except WebSite.DoesNotExist: site = WebSite(representative=rep, kind='EP', url=url) changed = True if site.url != url: site.url = url changed = True if changed: site.save() # AN/SEN for chamber in ['AN', 'SEN']: qs = Representative.objects.filter( models.Q(mandates__end_date__gte=today) | models.Q(mandates__end_date__isnull=True), mandates__group__chamber__abbreviation=chamber ) for rep in qs: changed = False url = rep.remote_id try: site = WebSite.objects.get(representative=rep, kind=chamber) except WebSite.DoesNotExist: site = WebSite(representative=rep, kind=chamber, url=url) changed = True if site.url != url: site.url = url changed = True if changed: site.save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def determine_file_extension_based_on_format(format_specifier): """ returns file extension string """
if format_specifier == FMT_INI: return 'ini' if format_specifier == FMT_DELIMITED: return '' if format_specifier == FMT_XML: return 'xml' if format_specifier == FMT_JSON: return 'json' if format_specifier == FMT_YAML: return 'yml' raise ValueError('invalid format specifier: {}'.format(format_specifier))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_element_using_argtuple(self, argtuple): """ takes a tuple of keys returns node found in cfg_dict found by traversing cfg_dict by successive application of keys from element_path """
# doesn't support DELIMITED, only dict-based formats if self.format == FMT_DELIMITED: return None node = self.cfg_dict for key in argtuple: node = node[key] return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def create_arc(self, mace_type, name): ''' Creates the story arc and initial tree for that arc for the current outline. Returns the resulting Arc instance. ''' arc = Arc(mace_type=mace_type, outline=self, name=name) arc.save() milestone_count = arc.generate_template_arc_tree() if milestone_count == 7: # pragma: no cover arc.refresh_from_db() return arc else: raise ArcIntegrityError('Something went wrong during arc template generation')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def generate_template_arc_tree(self): ''' Generate a seven point template in this arc. Arc must be empty. ''' arc_root = self.arc_root_node if not arc_root: arc_root = ArcElementNode.add_root( arc_element_type='root', description='root of arc %s' % self.name, arc=self ) if arc_root.get_children(): raise ArcIntegrityError(_("This arc already has elements. You cannot build a template on top of it")) for key, value in ARC_NODE_ELEMENT_DEFINITIONS.items(): if value['milestone']: arc_root.add_child(arc_element_type=key, description=value['template_description']) arc_root.refresh_from_db() return ArcElementNode.objects.get(pk=arc_root.pk).get_children().count()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def fetch_arc_errors(self): ''' Evaluates the current tree of the arc and provides a list of errors that the user should correct. ''' error_list = [] hnode = self.validate_first_element() if hnode: error_list.append({'hook_error': hnode}) rnode = self.validate_last_element() if rnode: error_list.append({'reso_error': rnode}) try: self.validate_generations() except ArcGenerationError as ag: error_list.append({'generation_error': str(ag)}) milecheck = self.validate_milestones() if milecheck: error_list.append({'mseq_error': milecheck}) return error_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def validate_generations(self): ''' Make sure that the descendent depth is valid. ''' nodes = self.arc_root_node.get_descendants() for node in nodes: logger.debug("Checking parent for node of type %s" % node.arc_element_type) parent = ArcElementNode.objects.get(pk=node.pk).get_parent(update=True) if 'mile' in node.arc_element_type and parent.get_depth() > 1: logger.debug("Milestone node... with leaf parent") raise ArcGenerationError(_("Milestones cannot be descendants of anything besides the root!")) if (parent.get_depth() > 1 and parent.arc_element_type not in ARC_NODE_ELEMENT_DEFINITIONS[node.arc_element_type]['allowed_parents']): raise ArcGenerationError(_("Node %s cannot be a descendant of node %s" % (node, parent))) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def validate_milestones(self): ''' Reviews the arc element tree to ensure that milestones appear in the right order. ''' milestones = self.arc_root_node.get_children().filter(arc_element_type__contains='mile') current_cursor = 0 for mile in milestones: seq = mile.milestone_seq if seq < current_cursor: return mile current_cursor = seq return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def all_characters(self): ''' Returns a queryset of all characters associated with this node and its descendants, excluding any duplicates. ''' qs = self.assoc_characters.all() for node in self.get_descendants(): qs2 = node.assoc_characters.all() qs = qs.union(qs2).distinct('pk') return qs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def impact_rating(self): ''' Returns the impact rating for this node. Impact rating is a measure of how powerful this moment in the story is by evaluting how many simultaneous arc elements are associated with it. There is also a generational bleed element, where the impact score creates shockwaves throughout their direct ancestor and descendant nodes. This echo fades fast, but the bigger the impact, the farther it goes. Currently, the impact bleed does not extend to sibling nodes. WARNING: Here be dragons. ''' if self.depth == 1: logger.debug('Root node. Skipping.') return 0 # pragma: no cover impact_bleed = { 'mile': 0.5, # A milestone extends it's influence by 50% per generation 'tf_beat': 0.25, } inherited_impact = 0 base_impact, add_impact, mile_impact = self._local_impact_rating() local_impact = base_impact + add_impact + mile_impact logger.debug("Local impact is %f" % local_impact) parents = self.get_ancestors().filter(depth__gt=1) children = self.get_descendants() logger.debug('Found %d parents and %d children' % (parents.count(), children.count())) for node in parents | children: if node.depth == 1: logger.debug("Skipping root node...") else: logger.debug('Checking a related node...') b, a, m = node._local_impact_rating() logger.debug('Related node has %f of additional impact and %f of milestone impact.' % (a, m)) if (a + m) > 0: if node.depth > self.depth: depth_diff = node.depth - self.depth else: depth_diff = self.depth - node.depth logger.debug('There is a generational difference of %f. Adjusting impact bleed.' % depth_diff) for x in range(depth_diff): a = a * impact_bleed['tf_beat'] m = m * impact_bleed['mile'] logger.debug('Additional impact bleed of %f. Milestone impact bleed of %f' % (a, m)) inherited_impact += a + m logger.debug('Final impact bleed of %f. Adding to inherited impact.' % inherited_impact) else: logger.debug('Node had 0 bleedworthy impact. Skipping...') logger.debug('Inherited impact of %f. Adding to local impact of %f' % (inherited_impact, local_impact)) return local_impact + inherited_impact
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def all_locations(self): ''' Returns a queryset of all locations associated with this node and its descendants, excluding any duplicates. ''' qs = self.assoc_locations.all() for node in self.get_descendants(): qs2 = node.assoc_locations.all() qs = qs.union(qs2).distinct('pk') return qs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def move(self, target, pos=None): ''' An override of the treebeard api in order to send a signal in advance. ''' if self.outline != target.outline: raise IntegrityError('Elements must be from the same outline!') tree_manipulation.send( sender=self.__class__, instance=self, action='move', target_node_type=None, target_node=target, pos=pos ) return super().move(target, pos)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_by_enclosures(self, enclosure_urls): """ Get bitlove data for a list of enclosure URLs """
# prepare URLs enclosure_urls = map(str.strip, enclosure_urls) enclosure_urls = filter(None, enclosure_urls) return BitloveResponse(self.opener, enclosure_urls)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, url): """ Get the response for the given enclosure URL """
self._query() return Enclosure(self._resp.get(url), url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _query(self): """ perform a request to the API, only when necessary """
if not self.urls: self._resp = {} elif self._resp is None: params = [ ('url', url) for url in self.urls] query = urllib.urlencode(params) # query API r = self.opener.open(BITLOVE_ENCLOSURE_API + query) self._resp = json.loads(r.read())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_parser(): """ Return a command-line arguments parser. """
parser = argparse.ArgumentParser(description='Search you Azure AD contacts from mutt or the command-line.') parser.add_argument('-c', '--config', help='Specify alternative configuration file.', metavar="FILE") parser.add_argument('-v', '--verbose', dest="log_level", action='store_const', const=logging.INFO, help='Be verbose about what is going on (stderr).') parser.add_argument('-V', '--version', action='version', version='%%(prog)s %s' % pkg_resources.get_distribution("aadbook").version, help="Print version and exit") parser.add_argument('-d', '--debug', dest="log_level", action='store_const', const=logging.DEBUG, help='Output debug info (stderr).') parser.set_defaults(config=CONFIG_FILE, log_level=logging.ERROR) subparsers = parser.add_subparsers() parser_config_template = subparsers.add_parser('config-template', description='Prints a template for .aadbookrc to stdout') parser_config_template.set_defaults(func=do_config_template) parser_reload = subparsers.add_parser('authenticate', description='Azure AD authentication.') parser_reload.set_defaults(func=do_authenticate) parser_reload = subparsers.add_parser('reload', description='Force reload of the cache.') parser_reload.set_defaults(func=do_reload) parser_query = subparsers.add_parser('query', description='Search contacts using query (regex).') parser_query.add_argument('query', help='regex to search for.', metavar='QUERY') parser_query.set_defaults(func=do_query) return parser
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_packages_under(path): """ Recursive list all of the packages under a specific package."""
all_packages = setuptools.find_packages() packages = [] for package in all_packages: package_split = package.split(".") if package_split[0] == path: packages.append(package) return packages
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_version_to_package(path): """ Copy the single source of truth version number into the package as well. """
init_file = os.path.join(path, "__init__.py") with open(init_file, "r") as original_file: lines = original_file.readlines() with open(init_file, "w") as new_file: for line in lines: if "__version__" not in line: new_file.write(line) else: new_file.write("__version__ = \"{}\"\n".format(VERSION))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self): """Validation of configuration to check for required values"""
if not self.server.enabled: if self.security.signature_certificate_file is self.security.defaults['signature_certificate_file']: print("ISSUE: If you are not configuring a server, you need to set 'signature_certificate_file'") if self.security.signature_private_key_file is self.security.defaults['signature_private_key_file']: print("ISSUE: If you are not configuring a server, you need to set 'signature_private_key_file'") else: if self.client.enabled: print("ISSUE: Client and server enabled at the same time?") if self.server.protocol is self.server.defaults['protocol']: if self.server.server_certificate_file is self.server.defaults['server_certificate_file'] or \ self.server.server_private_key_file is self.server.defaults['server_private_key_file']: print("ISSUE: 'server_certificate_file' and/or 'server_private_key_file' are not configured and will be auto-generated.") if self.server.certification_authority_certificate_file is self.server.defaults['certification_authority_certificate_file'] or \ self.server.certification_authority_private_key_file is self.server.defaults['certification_authority_private_key_file']: print("ISSUE: 'certification_authority_certificate_file' and/or 'certification_authority_private_key_file' are not configured and will be auto-generated - this is NOT recommended.") if self.server.authentication_script is self.server.defaults['authentication_script']: print("ISSUE: No 'authentication_script' has been provided and all authentication requests will be rejected!") if self.client.enabled: if self.client.server_endpoint is self.client.defaults['server_endpoint']: print("ISSUE: You are running in client mode, but you are using a default server address.") if not self.client.disable_peer_verification is self.client.defaults['disable_peer_verification'] or \ not self.client.disable_host_verification is self.client.defaults['disable_host_verification']: print("ISSUE: Disabling peer/host verification is NOT recommended - AT ALL.") if self.client.username is self.client.defaults['username'] or \ self.client.password is self.client.defaults['password']: print("ISSUE: No username and/or password has been configured for a client.") if self.fscp.contact is self.fscp.defaults['contact']: if not self.server.enabled and not self.client.enabled: print("ISSUE: You have not defined any contact points while you are neither running as server nor client.") ## hostname_resolution_protocol=ipv4/ipv6 ## ipv4_address_prefix_length=9.0.0.1/24 ## ipv6_address_prefix_length=2aa1::1/8 if self.security.authority_certificate_file is self.security.defaults['authority_certificate_file']: print("ISSUE: You need to set 'authority_certificate_file'") if self.tap_adapter.ipv4_address_prefix_length is self.tap_adapter.defaults['ipv4_address_prefix_length']: print("ISSUE: You are using the default network address - make sure you set a different ip for every machine 'ipv4_address_prefix_length'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_admin(app, admin): """Called on app initialization to register administration interface."""
category = 'Knowledge' admin.category_icon_classes[category] = "fa fa-mortar-board" admin.add_view( KnowledgeAdmin(app, KnwKB, db.session, name='Knowledge Base', category=category, endpoint="kb") ) admin.add_view( KnwKBRVALAdmin(app, KnwKBRVAL, db.session, name="Knowledge Mappings", category=category, endpoint="kbrval") )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def after_model_change(self, form, model, is_created): """Save model."""
super(KnowledgeAdmin, self).after_model_change(form, model, is_created) from invenio_collections.models import Collection if form.kbtype.data == KnwKB.KNWKB_TYPES['dynamic']: id_collection = form.id_collection.data or None collection = Collection.query.filter_by( id=id_collection).one() if id_collection else None model.set_dyn_config( field=form.output_tag.data, expression=form.search_expression.data, collection=collection) if form.kbtype.data == KnwKB.KNWKB_TYPES['taxonomy']: if form.tfile.data: file_name = model.get_filename() file_data = request.files[form.tfile.name].read() with open(file_name, 'w') as f: f.write(file_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def edit_form(self, obj=None): """Edit form."""
kbtype = request.args['kbtype'] if 'kbtype' in request.args else 'w' if kbtype == KnwKB.KNWKB_TYPES['written_as']: self.form = WrittenAsKnowledgeForm elif kbtype == KnwKB.KNWKB_TYPES['dynamic']: self.form = DynamicKnowledgeForm else: self.form = TaxonomyKnowledgeForm form = self.form(obj=obj) if not form.is_submitted(): # load extra data: obj => form if kbtype == KnwKB.KNWKB_TYPES['dynamic']: if obj.kbdefs: form.id_collection.data = obj.kbdefs.id_collection form.output_tag.data = obj.kbdefs.output_tag form.search_expression.data = obj.kbdefs.search_expression if kbtype == KnwKB.KNWKB_TYPES['taxonomy']: file_name = obj.get_filename() if os.path.isfile(file_name): form.tfile.label.text = form.tfile.label.text + " *" # TODO add the possibility to download the file form.tfile.description = _("Already uploaded %(name)s", name=obj.get_filename()) form.kbtype.data = kbtype return form
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_form(self, obj=None): """Create form."""
kbtype = request.args['kbtype'] if 'kbtype' in request.args else 'w' if kbtype == KnwKB.KNWKB_TYPES['written_as']: self.form = WrittenAsKnowledgeForm elif kbtype == KnwKB.KNWKB_TYPES['dynamic']: self.form = DynamicKnowledgeForm else: self.form = TaxonomyKnowledgeForm form = self.form() form.kbtype.data = kbtype return form
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _deep_value(*args, **kwargs): """ Drills down into tree using the keys """
node, keys = args[0], args[1:] for key in keys: node = node.get(key, {}) default = kwargs.get('default', {}) if node in ({}, [], None): node = default return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_pk(abspath, compress=False, enable_verbose=True): """Load Python Object from Pickle file. :param abspath: File path. Use absolute path as much as you can. File extension has to be ``.pickle`` or ``.gz``. (for compressed Pickle) :type abspath: string :param compress: (default False) Load from a gzip compressed Pickle file. Check :func:`dump_pk()<dump_pk>` function for more information. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: Complete! Elapse 0.000272 sec. {'a': 1, 'b': 2} **中文文档** 从Pickle文件中读取数据 参数列表 :param abspath: 文件路径, 扩展名需为 ``.pickle`` 或 ``.gz`` :type abspath: ``字符串`` :param compress: (默认 False) 是否从一个gzip压缩过的Pickle文件中读取数据。 请 参考 :func:`dump_pk()<dump_pk>` 获得更多信息. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值`` """
abspath = str(abspath) # try stringlize msg = Messenger(enable_verbose=enable_verbose) if compress: # check extension name if os.path.splitext(abspath)[1] != ".gz": raise Exception("compressed pickle has to use extension '.gz'!") else: if os.path.splitext(abspath)[1] != ".pickle": raise Exception("file extension are not '.pickle'!") msg.show("\nLoading from %s..." % abspath) st = time.clock() if compress: with gzip.open(abspath, "rb") as f: obj = pickle.loads(f.read()) else: with open(abspath, "rb") as f: obj = pickle.load(f) if enable_verbose: msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st)) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dump_pk(obj, abspath, pk_protocol=pk_protocol, replace=False, compress=False, enable_verbose=True): """Dump Picklable Python Object to file. Provides multiple choice to customize the behavior. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param replace: (default False) If ``True``, when you dump Pickle to a existing path, it silently overwrite it. If False, an exception will be raised. Default False setting is to prevent overwrite file by mistake. :type replace: boolean :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: Complete! Elapse 0.001763 sec **中文文档** 将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化) 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖 原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。 :type replace: ``布尔值`` :param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值`` """
abspath = str(abspath) # try stringlize msg = Messenger(enable_verbose=enable_verbose) if compress: # check extension name root, ext = os.path.splitext(abspath) if ext != ".gz": if ext != ".tmp": raise Exception( "compressed pickle has to use extension '.gz'!") else: _, ext = os.path.splitext(root) if ext != ".gz": raise Exception( "compressed pickle has to use extension '.gz'!") else: root, ext = os.path.splitext(abspath) if ext != ".pickle": if ext != ".tmp": raise Exception("file extension are not '.pickle'!") else: _, ext = os.path.splitext(root) if ext != ".pickle": raise Exception("file extension are not '.pickle'!") msg.show("\nDumping to %s..." % abspath) st = time.clock() if os.path.exists(abspath): # if exists, check replace option if replace: # replace existing file if compress: with gzip.open(abspath, "wb") as f: f.write(pickle.dumps(obj, protocol=pk_protocol)) else: with open(abspath, "wb") as f: pickle.dump(obj, f, protocol=pk_protocol) else: # stop, print error message raise Exception("\tCANNOT WRITE to %s, " "it's already exists" % abspath) else: # if not exists, just write to it if compress: with gzip.open(abspath, "wb") as f: f.write(pickle.dumps(obj, protocol=pk_protocol)) else: with open(abspath, "wb") as f: pickle.dump(obj, f, protocol=pk_protocol) msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def safe_dump_pk(obj, abspath, pk_protocol=pk_protocol, compress=False, enable_verbose=True): """A stable version of dump_pk, silently overwrite existing file. When your program been interrupted, you lose nothing. Typically if your program is interrupted by any reason, it only leaves a incomplete file. If you use replace=True, then you also lose your old file. So a bettr way is to: 1. dump pickle to a temp file. 2. when it's done, rename it to #abspath, overwrite the old one. This way guarantee atomic write. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: Complete! Elapse 0.001763 sec **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式 写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部 都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会 影响原文件。 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值`` """
abspath = str(abspath) # try stringlize temp_abspath = "%s.tmp" % abspath dump_pk(obj, temp_abspath, pk_protocol=pk_protocol, replace=True, compress=compress, enable_verbose=enable_verbose) shutil.move(temp_abspath, abspath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def obj2str(obj, pk_protocol=pk_protocol): """Convert arbitrary object to utf-8 string, using base64encode algorithm. Usage:: 'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg==' **中文文档** 将可Pickle化的Python对象转化为utf-8编码的"字符串" """
return base64.b64encode(pickle.dumps( obj, protocol=pk_protocol)).decode("utf-8")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """Run analysis. The basic idea is to recursively find all script files in specific programming language, and analyze each file then sum it up. """
n_target_file, n_other_file = 0, 0 code, comment, docstr, purecode = 0, 0, 0, 0 fc = FileCollection.from_path_except(self.workspace, self.ignore) fc_yes, fc_no = fc.select(self.filter, keepboth=True) n_other_file += len(fc_no) for abspath in fc_yes: try: with open(abspath, "rb") as f: code_text = f.read().decode("utf-8") code_, comment_, docstr_, purecode_ = self.analyzer( code_text) code += code_ comment += comment_ docstr += docstr_ purecode += purecode_ n_target_file += 1 except Exception as e: n_other_file += 1 lines = list() lines.append("Code statistic result for '%s'" % self.workspace) lines.append(" %r %r files, %r other files." % (n_target_file, self.language, n_other_file)) lines.append(" code line: %s" % code) lines.append(" comment line: %s" % comment) lines.append(" docstr line: %s" % docstr) lines.append(" purecode line: %s" % purecode) message = "\n".join(lines) print(message) return message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def analyzePython(code_text): """Count how many line of code, comment, dosstr, purecode in one Python script file. """
code, comment, docstr = 0, 0, 0 p1 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ('"""', '"""') p2 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ("'''", "'''") # count docstr for pattern in [p1, p2]: for res in re.findall(pattern, code_text)[::2]: lines = [i.strip() for i in res.split("\n") if i.strip()] docstr += len(lines) # count comment line and code lines = [i.strip() for i in code_text.split("\n") if i.strip()] for line in lines: if line.startswith("#"): comment += 1 else: code += 1 purecode = code - docstr # pure code = code - docstr return code, comment, docstr, purecode
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_default(__func: Callable, __arg: str) -> str: """Fetch default value for a function argument Args: __func: Function to inspect __arg: Argument to extract default value for """
return signature(__func).parameters[__arg].default
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_(name: str, local: bool, package: str, section: str, key: Optional[str]): """Extract or list values from config."""
cfg = config.read_configs(package, name, local=local) if key: with suppress(NoOptionError, NoSectionError): echo(cfg.get(section, key)) else: with suppress(NoSectionError): for opt in cfg.options(section): colourise.pinfo(opt) echo(' {}'.format(cfg.get(section, opt)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_tag(match: str, strict: bool, directory: str): """Find tag for git repository."""
with suppress(CalledProcessError): echo(git.find_tag(match, strict=strict, git_dir=directory))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty_time(timestamp: str): """Format timestamp for human consumption."""
try: parsed = iso_8601.parse_datetime(timestamp) except ValueError: now = datetime.utcnow().replace(tzinfo=timezone.utc) try: delta = iso_8601.parse_delta(timestamp) except ValueError: delta = human_time.parse_timedelta(timestamp) parsed = now - delta echo(human_time.human_timestamp(parsed))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gen_text(env: TextIOBase, package: str, tmpl: str): """Create output from Jinja template."""
if env: env_args = json_datetime.load(env) else: env_args = {} jinja_env = template.setup(package) echo(jinja_env.get_template(tmpl).render(**env_args))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def time(ctx: Context, command: str): """Time the output of a command."""
with timer.Timing(verbose=True): proc = run(command, shell=True) ctx.exit(proc.returncode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(cls, name, type_): """ Register a new type for an entry-type. The 2nd argument has to be a subclass of structures.Entry. """
if not issubclass(type_, Entry): raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_)) cls._registry[name.lower()] = type_
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, **kwargs): """ Validates each entry (passing the provided arguments down to them and also tries to resolve all cross-references between the entries. """
self.check_crossrefs() for value in self.values(): value.validate(**kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, raise_unsupported=False): """ Checks if the Entry instance includes all the required fields of its type. If ``raise_unsupported`` is set to ``True`` it will also check for potentially unsupported types. If a problem is found, an InvalidStructure exception is raised. """
fields = set(self.keys()) flattened_required_fields = set() required_errors = [] for field in self.required_fields: found = False if isinstance(field, (list, tuple)): # Check all alternatives for real_f in field: if real_f in fields: flattened_required_fields.add(real_f) found = True else: flattened_required_fields.add(field) if field in fields: found = True if not found: required_errors.append(field) unsupported_fields = fields - flattened_required_fields \ - set(self.optional_fields) if len(required_errors) or (raise_unsupported and len(unsupported_fields)): raise exceptions.InvalidStructure("Missing or unsupported fields found", required_fields=required_errors, unsupported_fields=unsupported_fields)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(file_name, key): """ Print a value for the specified key. If key is not found xon_db exists with code 1. """
db = XonoticDB.load_path(file_name) value = db.get(key) if value is None: sys.exit(1) else: click.echo(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(file_name, key, value, new): """ Set a new value for the specified key. """
db = XonoticDB.load_path(file_name) if key not in db and not new: click.echo('Key %s is not found in the database' % key, file=sys.stderr) sys.exit(1) else: db[key] = value db.save(file_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_cts_record(file_name, map, position): """ Remove cts record on MAP and POSITION """
db = XonoticDB.load_path(file_name) db.remove_cts_record(map, position) db.save(file_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_all_cts_records_by(file_name, crypto_idfp): """ Remove all cts records set by player with CRYPTO_IDFP """
db = XonoticDB.load_path(file_name) db.remove_all_cts_records_by(crypto_idfp) db.save(file_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_cts_records(file_name, crypto_idfp, crypto_idfps): """ Merge cts records made by CRYPTO_IDFPS to CRYPTO_IDFP """
db = XonoticDB.load_path(file_name) db.merge_cts_records(crypto_idfp, crypto_idfps) db.save(file_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_rotation(rotation): """checks rotation parameter if illegal value raises exception"""
if rotation not in ALLOWED_ROTATION: allowed_rotation = ', '.join(ALLOWED_ROTATION) raise UnsupportedRotation('Rotation %s is not allwoed. Allowed are %s' % (rotation, allowed_rotation))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_resize(resize): """checks resize parameter if illegal value raises exception"""
if resize is None: return resize = resize.lower().strip() if 'x' in resize: tmp = resize.lower().split('x') tmp = [x.strip() for x in resize.split('x')] if len(tmp) == 2 and tmp[0].isdigit() and tmp[1].isdigit(): return elif '%' in resize: tmp = resize.split('%')[0] if tmp.isnumeric(): tmp = int(tmp) if 1 <= tmp <= 1000: return else: raise PercentageOutOfRange("percentage must be between 1 and 1000") raise MallformedResize('Resize value "%s" is mallformed. ' 'Desired format is: {width}x{height} or {percentage}%%' % resize)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_response(response): """ checks the response if the server returned an error raises an exception. """
if response.status_code < 200 or response.status_code > 300: raise ServerError('API requests returned with error: %s' % response.status_code) try: response_text = loads(response.text) except ValueError: raise ServerError('The API did not returned a JSON string.') if not response_text: raise EmptyResponse() if 'failure' in response_text: if response_text['failure'] == 'Falscher Dateityp': raise UnsupportedFormat('Please look at picflash.org ' 'witch formats are supported') else: raise UnkownError(response_text['failure'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_if_redirect(url): """ checks if server redirects url """
response = head(url, headers={'User-Agent': USER_AGENT}) if response.status_code >= 300 and response.status_code < 400: return response.headers['location'] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_backup(configuration, backup_data): """Celery task. It will extract the backup archive into a unique folder in the temporary directory specified in the configuration. Once extracted, a Docker container will be started and will start a restoration procedure. The worker will wait for the container to exit and retrieve its return code. A notification is sent if the return code is != 0. If the return code == 0, the container will be removed. Lastly, it will remove the temporary workdir. """
extract_archive(backup_data['archive_path'], backup_data['workdir']) docker_client = Client(configuration['docker']['url']) container = run_container(docker_client, backup_data) return_code = docker_client.wait(container) print('Container return code: {}'.format(return_code)) if return_code != 0: notifier = MailNotifier(configuration['mail']) report = {'archive': backup_data['archive_path'], 'image': backup_data['image'], 'container_id': container.get('Id')} notifier.send_report(report) else: docker_client.remove_container(container) remove_file(backup_data['workdir'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def module_files(module, dependencies_dict=None): """ Scan a module and its entire dependency tree to create a dict of all files and their modified time. @param module: A <module> object @param dependencies_dict: Pass an existing dict to add only unscanned files or None to create a new file dict @return: A dict containing filenames as keys with their modified time as value """
if dependencies_dict is None: dependencies_dict = dict() if hasattr(module, '__file__'): filename = module.__file__ if filename not in dependencies_dict: realname, modified_time = _get_filename_and_modified(filename) if realname and realname not in dependencies_dict: dependencies_dict[realname] = modified_time for name in dir(module): try: item = getattr(module, name) if hasattr(item, '__file__'): module_files(item, dependencies_dict) elif hasattr(item, '__module__'): item = sys.modules[getattr(item, '__module__')] if hasattr(item, '__file__'): module_files(item, dependencies_dict) except (AttributeError, KeyError): pass return dependencies_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def files(): """ Scan all modules in the currently running app to create a dict of all files and their modified time. @note The scan only occurs the first time this function is called. Subsequent calls simply return the global dict. @return: A dict containing filenames as keys with their modified time as value """
if not _scanned: if not module_files(sys.modules['__main__'], _process_files): for module in sys.modules.values(): if hasattr(module, '__file__'): filename = module.__file__ if filename not in _process_files: realname, modified_time = _get_filename_and_modified(filename) if realname and realname not in _process_files: _process_files[realname] = modified_time return _process_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hup_hook(signal_or_callable=signal.SIGTERM, verbose=False): """ Register a signal handler for `signal.SIGHUP` that checks for modified files and only acts if at least one modified file is found. @type signal_or_callable: str, int or callable @param signal_or_callable: You can pass either a signal or a callable. The signal can be specified by name or number. If specifying by name, the 'SIG' portion is optional. For example, valid values for SIGINT include 'INT', 'SIGINT' and `signal.SIGINT`. Alternatively, you can pass a callable that will be called with the list of changed files. So the call signature should be `func(list)`. The return value of the callable is ignored. @type verbose: bool or callable @param verbose: Defaults to False. True indicates that a message should be printed. You can also pass a callable such as log.info. """
#noinspection PyUnusedLocal def handle_hup(signum, frame): changed = modified() if changed: if callable(signal_or_callable): func = signal_or_callable args = (changed,) op = 'Calling' try: name = signal_or_callable.__name__ except Exception: name = str(signal_or_callable) else: if isinstance(signal_or_callable, int): name = str(signal_or_callable) signum = signal_or_callable if verbose: for item in dir(signal): if item.startswith('SIG') and getattr(signal, item) == signal_or_callable: name = item break else: name = signal_or_callable if signal_or_callable.startswith('SIG') else 'SIG' + signal_or_callable signum = getattr(signal, name) func = os.kill args = (os.getpid(), signum) op = 'Sending' if verbose: more = ' and {0} other files'.format(len(changed)) if len(changed) > 1 else '' message = '{0} {1} because {2}{3} changed'.format(op, name, changed[0], more) if callable(verbose): #noinspection PyCallingNonCallable verbose(message) else: print(message) func(*args) files() signal.signal(signal.SIGHUP, handle_hup) signal.siginterrupt(signal.SIGHUP, False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def handle_segment_ended(self): ''' process end of the segment based on template ''' if self._state[1].value.endevent is not None: self.events.append((self._state[1].value.endevent, self._state[0], None)) if self._state[1].value.multiplier is 2: self.parentismap = 0 self.waitingforprop = 0 if(len(self._stack) is 0): self._scstate = ScannerState.IDLE return if self._state[1].value.valuetype is not ValueType.RAW: self._stack[-1][3] = self._stack[-1][3] - 1 # #??? if self._stack[-1][3] is 0: self._scstate = ScannerState.SEGMENT_ENDED self._state = self._stack.pop() # pop last state from stack if self._state[1].value.multiplier is 2: self.parentismap = 1 self.waitingforprop = 1 self.handle_segment_ended() else: if self._stack[-1][1].value.multiplier is 2: self.parentismap = 1 self.waitingforprop = 1 self._scstate = ScannerState.WAITING_FOR_HEADER
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ Do the things! Return: 0 Exceptions: """
description = 'Letter - a commandline interface' parser = argparse.ArgumentParser(description=description) parser.add_argument('--gmail', action='store_true', help='Send via Gmail', ) args = parser.parse_args() to = raw_input('To address > ') subject = raw_input('Subject > ') body = raw_input('Your Message > ') if args.gmail: user = fromaddr = raw_input('Gmail Address > ') pw = getpass.getpass() postie = letter.GmailPostman(user=user, pw=pw) else: postie = letter.Postman() # Unauthorized SMTP, localhost:25 fromaddr = raw_input('From address > ') class Message(letter.Letter): Postie = postie From = fromaddr To = to Subject = subject Body = body return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send(self, jsonstr): """ Send jsonstr to the UDP collector """
udp_sock = socket(AF_INET, SOCK_DGRAM) udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_file(path): """Read a UTF-8 file from the package. Takes a list of strings to join to make the path"""
file_path = os.path.join(here, *path) with open(file_path, encoding="utf-8") as f: return f.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exec_file(path, name): """Extract a constant from a python file by looking for a line defining the constant and executing it."""
result = {} code = read_file(path) lines = [line for line in code.split('\n') if line.startswith(name)] exec("\n".join(lines), result) return result[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pg_dump(db_name, backup_path): """Dump db_name to backup_path"""
logger.info("Dumping %s to %s", repr(db_name), repr(backup_path)) return shell( 'pg_dump "{db_name}" -U "{USER}" -h "{HOST}" ' "--schema=public --file={backup_path}".format( db_name=db_name, backup_path=backup_path, **DB ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_file_name(source, destination): """Create a filename for the destination zip file."""
number = 1 if os.path.exists(os.path.join(destination, os.path.basename(source) + '.zip')): while True: zip_filename = os.path.join(destination, os.path.basename(source) + '_' + str(number) + '.zip') if not os.path.exists(zip_filename): break number = number + 1 else: zip_filename = os.path.join(destination, os.path.basename(source) + '.zip') return zip_filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _backup_compresslevel(self, dirs): """Create a backup file with a compresslevel parameter."""
# Only supported in Python 3.7+ with ZipFile(self.zip_filename, 'w', compresslevel=self.compress_level) as backup_zip: for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)): backup_zip.write(path, path[len(self.source):len(path)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _backup_pb_gui(self, dirs): """Create a zip backup with a GUI progress bar."""
import PySimpleGUI as sg # Legacy support with ZipFile(self.zip_filename, 'w') as backup_zip: for count, path in enumerate(dirs): backup_zip.write(path, path[len(self.source):len(path)]) if not sg.OneLineProgressMeter('Writing Zip Files', count + 1, len(dirs) - 1, 'Files'): break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _backup_pb_tqdm(self, dirs): """Create a backup with a tqdm progress bar."""
with ZipFile(self.zip_filename, 'w') as backup_zip: for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)): backup_zip.write(path, path[len(self.source):len(path)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def backup(self, paths=None): """Backup method driver."""
if not paths: paths = self._get_paths() try: self._backup_compresslevel(paths) except TypeError: try: self._backup_pb_gui(paths) except ImportError: self._backup_pb_tqdm(paths) # Delete source if specified if self.delete_source: shutil.rmtree(self.source) return self.zip_filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_uris(config): """ returns a tuple of total file size in bytes, and the list of files """
file_names = [] if config.INPUT_DATA is None: sys.stderr.write("you need to provide INPUT_DATA in config\n") sys.exit(1) if isinstance(config.INPUT_DATA, basestring): config.INPUT_DATA = [config.INPUT_DATA] file_size = 0 for uri in config.INPUT_DATA: for regex, uri_method, _, _ in URI_REGEXES: m = regex.match(uri) if m is not None: file_size += uri_method(m, file_names, config) break print("going to process {} files...".format(len(file_names))) return file_size, file_names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buildschema(_cls=None, **kwargs): """Class decorator used to build a schema from the decorate class. :param type _cls: class to decorate. :param kwargs: schema attributes to set. :rtype: type :return: schema class. """
if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def funcschema(default=None, *args, **kwargs): """Decorator to use in order to transform a function into a schema."""
if default is None: return lambda default: funcschema(default=default, *args, **kwargs) return FunctionSchema(default=default, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_wiki_json(path='wikipedia_crawler_data.json', model=WikiItem, batch_len=100, db_alias='default', verbosity=2): """Read json file and create the appropriate records according to the given database model."""
return djdb.import_json(path=path, model=model, batch_len=batch_len, db_alias=db_alias, verbosity=verbosity)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_item(self, item, crawler='wiki', truncate_strings=True, verbosity=0): """Import a single record from a Scrapy Item dict >> WikiItem().import_item({'url': 'http://test.com', 'modified': '13 January 2014 00:15', 'crawler': 'more than thirty characters in this silly name'}) # doctest: +ELLIPSIS <WikiItem: WikiItem('more than thirty characters in', u'http://test.com', '', datetime.datetime(2014, 1, 13, 0, 15), '')> """
item = dict(item) self.crawler = str(crawler) for k, v in self._item_mapping.iteritems(): if verbosity > 2: print('%r: %r' % (k, v)) value = item.get(k, v['default']) if value is None: continue try: value = v['type'](value) except: pass field = self.__class__._meta.get_field_by_name(v['name'])[0] if isinstance(value, basestring): max_length = getattr(field, 'max_length', None) if max_length and len(value) > max_length: if truncate_strings: value = value[:max_length] else: raise RuntimeError('String loaded from json is length %s and destination field max_length is %s.' % (len(value), max_length)) if isinstance(field, (models.DateTimeField, models.DateField)): value = util.clean_wiki_datetime(value) setattr(self, v['name'], value) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def urlencode(self): """ Convert dictionary into a query string; keys are assumed to always be str """
output = ('%s=%s' % (k, quote(v)) for k, v in self.items()) return '&'.join(output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid(self): """ Error reporting is triggered when a form is checked for validity """
is_valid = super(GAErrorReportingMixin, self).is_valid() if self.is_bound and not is_valid: try: self.report_errors_to_ga(self.errors) except: # noqa: E722 logger.exception('Failed to report form errors to Google Analytics') return is_valid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ga_event_category(self): """ Event category, defaults to form class name """
return self.ga_event_category or '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_ga_hit(self, field_name, error_message): """ Format a single hit """
tracking_id = self.get_ga_tracking_id() if not tracking_id: warnings.warn('Google Analytics tracking ID is not set') return None query_dict = self.get_ga_query_dict() query_dict['tid'] = tracking_id query_dict['cid'] = self.get_ga_client_id() query_dict['ec'] = self.get_ga_event_category() query_dict['ea'] = field_name query_dict['el'] = error_message return query_dict.urlencode()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ga_tracking_id(self): """ Retrieve tracking ID from settings """
if hasattr(settings, self.ga_tracking_id_settings_key): return getattr(settings, self.ga_tracking_id_settings_key) return super(GARequestErrorReportingMixin, self).get_ga_tracking_id()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ga_client_id(self): """ Retrieve the client ID from the Google Analytics cookie, if available, and save in the current session """
request = self.get_ga_request() if not request or not hasattr(request, 'session'): return super(GARequestErrorReportingMixin, self).get_ga_client_id() if 'ga_client_id' not in request.session: client_id = self.ga_cookie_re.match(request.COOKIES.get('_ga', '')) client_id = client_id and client_id.group('cid') or str(uuid.uuid4()) request.session['ga_client_id'] = client_id return request.session['ga_client_id']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ga_query_dict(self): """ Adds user agent and IP to the default hit parameters """
query_dict = super(GARequestErrorReportingMixin, self).get_ga_query_dict() request = self.get_ga_request() if not request: return query_dict user_ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', '')) user_ip = user_ip.split(',')[0].strip() user_agent = request.META.get('HTTP_USER_AGENT') user_language = request.META.get('HTTP_ACCEPT_LANGUAGE') if user_ip: query_dict['uip'] = user_ip if user_agent: query_dict['ua'] = user_agent if user_language: query_dict['ul'] = user_language return query_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_js(ctx, force=False): """Build all javascript files. """
for fname in JSX_FILENAMES: jstools.babel( ctx, '{pkg.source_js}/' + fname, '{pkg.django_static}/{pkg.name}/js/' + fname + '.js', force=force )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build(ctx, less=False, docs=False, js=False, force=False): """Build everything and collectstatic. """
specified = any([less, docs, js]) buildall = not specified if buildall or less: less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less' if less_fname.exists(): lessc.LessRule( ctx, src='{pkg.source_less}/{pkg.name}.less', dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css', force=force ) elif less: print("WARNING: build --less specified, but no file at:", less_fname) if buildall or docs: if WARN_ABOUT_SETTINGS: warnings.warn( "autodoc might need a dummy settings file in the root of " "your package. Since it runs in a separate process you cannot" "use settings.configure()" ) doctools.build(ctx, force=force) if buildall or js: build_js(ctx, force) if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)): collectstatic(ctx, DJANGO_SETTINGS_MODULE)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def watch(ctx): """Automatically run build whenever a relevant file changes. """
watcher = Watcher(ctx) watcher.watch_directory( path='{pkg.source_less}', ext='.less', action=lambda e: build(ctx, less=True) ) watcher.watch_directory( path='{pkg.source_js}', ext='.jsx', action=lambda e: build(ctx, js=True) ) watcher.watch_directory( path='{pkg.docs}', ext='.rst', action=lambda e: build(ctx, docs=True) ) watcher.start()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _force_float(v): """ Converts given argument to float. On fail logs warning and returns 0.0. Args: v (any): value to convert to float Returns: float: converted v or 0.0 if conversion failed. """
try: return float(v) except Exception as exc: return float('nan') logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dict(self): """Return a dict that can be passed into the ColumnStats constructor"""
try: skewness = self.skewness kurtosis = self.kurtosis except ZeroDivisionError: skewness = kurtosis = float('nan') base_cols = [ ('name', self.column_name), ('flags', self.flags), ('type', self.type.__name__ ), ('lom', self.lom), ('count', self.n), ('nuniques', self.nuniques), ('width', self.size), ] descriptive_cols = [ ('mean', self.mean), ('std', self.stddev), ('min', self.min), ('p25', self.p25), ('p50', self.p50), ('p75', self.p75), ('max', self.max) ] distribution_cols = [ ('skewness', skewness), ('kurtosis', kurtosis), ('hist', self.bins), ('text_hist', text_hist(self.bins)), ] sample_values_cols = [ ('uvalues', self.uvalues) ] return OrderedDict( base_cols + (descriptive_cols if self.descriptive else []) + (distribution_cols if self.distribution else []) + (sample_values_cols if self.sample_values else []) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Run the stats. The source must yield Row proxies. """
self._func, self._func_code = self.build() def process_row(row): try: self._func(self._stats, row) except TypeError as e: raise TypeError("Failed for '{}'; {}".format(self._func_code, e)) except KeyError: raise KeyError( 'Failed to find key in row. headers = "{}", code = "{}" ' .format(list(row.keys()), self._func_code)) except Exception as e: raise type(e)( 'General exception in stats. headers = "{}", code = "{}": {} ' .format(list(row.keys()), self._func_code, e)) # Use all of the rows in the source if self._sample_size is None: for i, row in enumerate(self._source): process_row(row) # Use a sample of rows, evenly distributed though the source else: skip_rate = self._sample_size / self._n_rows i = 0 skip = skip_rate for j, row in enumerate(self._source): skip += skip_rate if skip >= 1: skip -= 1 i += 1 process_row(row) if i < 5000: # Since the hist bins aren't built until 5K row for k, v in self._stats.items(): v._build_hist_bins() return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_labs(format): """Gets Hackerspaces data from hackaday.io."""
hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url) hackerspaces = {} # Load all the Hackerspaces for i in hackerspaces_json: current_lab = Hackerspace() current_lab.id = i["id"] current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id current_lab.name = i["name"] if len(i["description"]) != 0: current_lab.description = i["description"] elif len(i["summary"]) != 0: current_lab.description = i["summary"] current_lab.created_at = i["moments"]["exact"] # Check if there are coordinates if i["latlon"] is not None: latlon = json.loads(i["latlon"]) current_lab.latitude = latlon["lat"] current_lab.longitude = latlon["lng"] # Get country, county and city from them country = geolocator.reverse( [latlon["lat"], latlon["lng"]]) current_lab.country = country.raw[ "address"]["country"] current_lab.address = country.raw["display_name"] current_lab.address_1 = country.raw["display_name"] current_lab.country_code = country.raw[ "address"]["country_code"] current_lab.county = country.raw[ "address"]["state_district"] current_lab.city = country.raw[ "address"]["city"] current_lab.postal_code = country.raw[ "address"]["postcode"] else: # For labs without a location or coordinates # add 0,0 as coordinates current_lab.latitude = 0.0 current_lab.longitude = 0.0 # Add the lab hackerspaces[i["name"]] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in hackerspaces: single = hackerspaces[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = hackerspaces # Default: return an oject else: output = hackerspaces # Return a proper json if format.lower() == "json": output = json.dumps(output) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_ramp_for_errors(ramp_data): """Checks ramp for errors. This is experiment specific checklist."""
error_list = [] keyframe_list = ramps.KeyFrameList(ramp_data['keyframes']) sorted_key_list = keyframe_list.sorted_key_list() channel_list = [ramps.Channel(ch_name, ramp_data['channels'][ch_name], keyframe_list) for ch_name in ramp_data['channels']] sorted_absolute_times = [keyframe_list.get_absolute_time(sk) for sk in sorted_key_list] ramp_properties = ramp_data['properties'] jump_resolution = ramp_properties['jump_resolution'] for key_name, abs_time in zip(sorted_key_list, sorted_absolute_times): # check if all times are +ve if abs_time < 0.0: error_fmt = "keyframe \'{0}\' has negative absolute time {1}" error_str = error_fmt.format(key_name, abs_time) error_list.append(error_str) # check if all times are a multiple of minimum resolution steps_float = abs_time / jump_resolution steps_residue = steps_float - round(steps_float) if steps_residue > 0.0001: error_fmt = ("keyframe \'{0}\' has absolute time {1} which is not" " a multiple of jump_resolution {2}") error_str = error_fmt.format(key_name, abs_time, jump_resolution) error_list.append(error_str) # find missing channels ch_ids = digital_channel_ids() ch_ids += dev2_analog_ids() # ignore p31, since we used that for Dev1 timing for ch_id in ch_ids: n_found = 0 for ch in channel_list: if ch.dct['id'] == ch_id: n_found += 1 if n_found != 1: error_fmt = '{0} copies of {1} found. There should only be 1' error_str = error_fmt.format(n_found, ch_id) error_list.append(error_str) # check for timing overlap in keyframelist error_keyname = keyframe_list.do_keyframes_overlap() if error_keyname is not None: error_fmt = '{0} overlaps with the next keyframe' error_str = error_fmt.format(error_keyname) error_list.append(error_str) return error_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _colourise(text: str, colour: str) -> str: """Colour text, if possible. Args: text: Text to colourise colour: Colour to display text in Returns: Colourised text, if possible """
if COLOUR: text = style(text, fg=colour, bold=True) return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _apply(self, ctx: ExtensionContext) -> AugmentedDict: """ Performs the actual loading of an external resource into the current model. Args: ctx: The processing context. Returns: Returns a dictionary that gets incorporated into the actual model. """
def process(pattern: Pattern[str], _str: str) -> Any: _match = pattern.match(_str) if _match is None: return _str # pragma: no cover # We got a match # Group 0: Whole match; Group 1: Our placeholder; # Group 2: file path to external resource placeholder, external_path = _match.group(1), _match.group(2) with open(self.locator( external_path, cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None )) as fhandle: # Json does not support line breaks. We will have to mask them content = fhandle.read() return _str.replace(placeholder, content) node_key, node_value = ctx.node _pattern = re.compile(self.__pattern__) return {node_key: process(_pattern, node_value)}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _apply(self, ctx: ExtensionContext) -> Any: """ Loads a yaml fragment from an external file. Args: ctx: The processing context. Returns: The external resource as a python dictionary. The fragment is already send through the processor as well. """
_, external_path = ctx.node return ctx.mentor.load_yaml(self.locator( external_path, cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def preprocess(self): ''' Performs initial cell conversions to standard types. This will strip units, scale numbers, and identify numeric data where it's convertible. ''' self.processed_tables = [] self.flags_by_table = [] self.units_by_table = [] for worksheet, rtable in enumerate(self.raw_tables): ptable, flags, units = self.preprocess_worksheet(rtable, worksheet) self.processed_tables.append(ptable) self.flags_by_table.append(flags) self.units_by_table.append(units) return self.processed_tables
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def generate_blocks(self, assume_complete_blocks=None): ''' Identifies and extracts all blocks from the input tables. These blocks are logical identifiers for where related information resides in the original table. Any block can be converted into a row-titled table which can then be stitched together with other tables from other blocks to form a fully converted data set. Args: assume_complete_blocks: Optimizes block loopups by not allowing titles to be extended. Blocks should be perfectly dense to be found when active. Optional, defaults to constructor value. ''' # Store this value to restore object settings later _track_assume_blocks = self.assume_complete_blocks try: if assume_complete_blocks != None: self.assume_complete_blocks = assume_complete_blocks if self.processed_tables == None: self.preprocess() self.processed_blocks = [] for worksheet in range(len(self.processed_tables)): ptable = self.processed_tables[worksheet] flags = self.flags_by_table[worksheet] units = self.units_by_table[worksheet] if not self.assume_complete_blocks: self.fill_in_table(ptable, worksheet, flags) self.processed_blocks.extend(self._find_blocks(ptable, worksheet, flags, units, { 'worksheet': worksheet })) return self.processed_blocks finally: # After execution, reset assume_complete_blocks back self.assume_complete_blocks = _track_assume_blocks
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def preprocess_worksheet(self, table, worksheet): ''' Performs a preprocess pass of the table to attempt naive conversions of data and to record the initial types of each cell. ''' table_conversion = [] flags = {} units = {} for rind, row in enumerate(table): conversion_row = [] table_conversion.append(conversion_row) if self.skippable_rows and worksheet in self.skippable_rows and rind in self.skippable_rows[worksheet]: self.flag_change(flags, 'interpreted', (rind, None), worksheet, self.FLAGS['skipped-row']) continue for cind, cell in enumerate(row): position = (rind, cind) if self.skippable_columns and worksheet in self.skippable_columns and cind in self.skippable_columns[worksheet]: conversion = None self.flag_change(flags, 'interpreted', position, worksheet, self.FLAGS['skipped-column']) else: # Do the heavy lifting in pre_process_cell conversion = auto_convert_cell(self, cell, position, worksheet, flags, units, parens_as_neg=self.parens_as_neg) conversion_row.append(conversion) # Give back our conversions, type labeling, and conversion flags return table_conversion, flags, units