text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_tweet(self, tweet, user_stream): """ Check if a tweet matches the defined criteria :param tweet: The tweet in question :type tweet: :class:`~responsebot.models.Tweet` :return: True if matched, False otherwise """
if user_stream: if len(self.track) > 0: return self.is_tweet_match_track(tweet) return True return self.is_tweet_match_track(tweet) or self.is_tweet_match_follow(tweet)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connectMSExchange(server): """ Creates a connection for the inputted server to a Microsoft Exchange server. :param server | <smtplib.SMTP> :return (<bool> success, <str> reason) """
if not sspi: return False, 'No sspi module found.' # send the SMTP EHLO command code, response = server.ehlo() if code != SMTP_EHLO_OKAY: return False, 'Server did not respond to EHLO command.' sspi_client = sspi.ClientAuth('NTLM') # generate NTLM Type 1 message sec_buffer = None err, sec_buffer = sspi_client.authorize(sec_buffer) # noinspection PyShadowingBuiltins buffer = sec_buffer[0].Buffer ntlm_message = base64.encodestring(buffer).replace('\n', '') # send NTLM Type 1 message -- Authentication Request code, response = server.docmd('AUTH', 'NTLM ' + ntlm_message) # verify the NTLM Type 2 response -- Challenge Message if code != SMTP_AUTH_CHALLENGE: msg = 'Server did not respond as expected to NTLM negotiate message' return False, msg # generate NTLM Type 3 message err, sec_buffer = sspi_client.authorize(base64.decodestring(response)) # noinspection PyShadowingBuiltins buffer = sec_buffer[0].Buffer ntlm_message = base64.encodestring(buffer).replace('\n', '') # send the NTLM Type 3 message -- Response Message code, response = server.docmd('', ntlm_message) if code != SMTP_AUTH_OKAY: return False, response return True, ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_entries(self, entries: List[Tuple[str, str]], titles, resources): """ Provide the template the data for the toc entries """
self.entries = [] for flag, pagename in entries: title = titles[pagename].children[0] resource = resources.get(pagename, None) if resource and hasattr(resource, 'is_published') and not \ resource.is_published: continue # Even if there is no resource for this tocentry, we can # use the toctree info self.entries.append(dict( title=title, href=pagename, resource=resource )) self.result_count = len(self.entries)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render(self, builder, context, sphinx_app: Sphinx): """ Given a Sphinx builder and context with site in it, generate HTML """
context['sphinx_app'] = sphinx_app context['toctree'] = self html = builder.templates.render(self.template + '.html', context) return html
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_code(url): """ Parse the code parameter from the a URL :param str url: URL to parse :return: code query parameter :rtype: str """
result = urlparse(url) query = parse_qs(result.query) return query['code']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_token(scopes, client_id=None, client_secret=None, redirect_uri=None): """ Generate a user access token :param List[str] scopes: Scopes to get :param str client_id: Spotify Client ID :param str client_secret: Spotify Client secret :param str redirect_uri: Spotify redirect URI :return: Generated access token :rtype: User """
webbrowser.open_new(authorize_url(client_id=client_id, redirect_uri=redirect_uri, scopes=scopes)) code = parse_code(raw_input('Enter the URL that you were redirected to: ')) return User(code, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def consume_file(self, infile): """Load the specified GFF3 file into memory."""
reader = tag.reader.GFF3Reader(infilename=infile) self.consume(reader)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def consume(self, entrystream): """ Load a stream of entries into memory. Only Feature objects and sequence-region directives are loaded, all other entries are discarded. """
for entry in entrystream: if isinstance(entry, tag.directive.Directive) and \ entry.type == 'sequence-region': self.consume_seqreg(entry) elif isinstance(entry, tag.feature.Feature): self.consume_feature(entry)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query(self, seqid, start, end, strict=True): """ Query the index for features in the specified range. :param seqid: ID of the sequence to query :param start: start of the query interval :param end: end of the query interval :param strict: indicates whether query is strict containment or overlap (:code:`True` and :code:`False`, respectively) """
return sorted([ intvl.data for intvl in self[seqid].search(start, end, strict) ])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cli(ctx, stage): """Show the functions that are available, bubble system and custom."""
if not ctx.bubble: ctx.say_yellow( 'There is no bubble present, will not show any transformer functions') raise click.Abort() rule_functions = get_registered_rule_functions() ctx.gbc.say('before loading functions:' + str(len(rule_functions))) load_rule_functions(ctx) ctx.gbc.say('after loading functions:' + str(len(rule_functions))) ctx.gbc.say('rule_functions:', stuff=rule_functions, verbosity=10) rule_functions.set_parent(ctx.gbc) for f in rule_functions: ctx.say('fun: ' + f, verbosity=1) ctx.gbc.say('funs: ', stuff=rule_functions.get_rule_functions(), verbosity=100) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_utc(a_datetime, keep_utc_tzinfo=False): """ Convert a time awared datetime to utc datetime. :param a_datetime: a timezone awared datetime. (If not, then just returns) :param keep_utc_tzinfo: whether to retain the utc time zone information. **中文文档** 将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。 """
if a_datetime.tzinfo: utc_datetime = a_datetime.astimezone(utc) # convert to utc time if keep_utc_tzinfo is False: utc_datetime = utc_datetime.replace(tzinfo=None) return utc_datetime else: return a_datetime
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def utc_to_tz(utc_datetime, tzinfo, keep_tzinfo=False): """ Convert a UTC datetime to a time awared local time :param utc_datetime: :param tzinfo: :param keep_tzinfo: """
tz_awared_datetime = utc_datetime.replace(tzinfo=utc).astimezone(tzinfo) if keep_tzinfo is False: tz_awared_datetime = tz_awared_datetime.replace(tzinfo=None) return tz_awared_datetime
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def repr_data_size(size_in_bytes, precision=2): # pragma: no cover """Return human readable string represent of a file size. Doesn"t support size greater than 1EB. For example: - 100 bytes => 100 B - 100,000 bytes => 97.66 KB - 100,000,000 bytes => 95.37 MB - 100,000,000,000 bytes => 93.13 GB - 100,000,000,000,000 bytes => 90.95 TB - 100,000,000,000,000,000 bytes => 88.82 PB Magnitude of data:: 1000 kB kilobyte 1000 ** 2 MB megabyte 1000 ** 3 GB gigabyte 1000 ** 4 TB terabyte 1000 ** 5 PB petabyte 1000 ** 6 EB exabyte 1000 ** 7 ZB zettabyte 1000 ** 8 YB yottabyte """
if size_in_bytes < 1024: return "%s B" % size_in_bytes magnitude_of_data = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"] index = 0 while 1: index += 1 size_in_bytes, mod = divmod(size_in_bytes, 1024) if size_in_bytes < 1024: break template = "{0:.%sf} {1}" % precision s = template.format(size_in_bytes + mod / 1024.0, magnitude_of_data[index]) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_toctrees(kb_app: kb, sphinx_app: Sphinx, doctree: doctree, fromdocname: str): """ Look in doctrees for toctree and replace with custom render """
# Only do any of this if toctree support is turned on in KaybeeSettings. # By default, this is off. settings: KaybeeSettings = sphinx_app.config.kaybee_settings if not settings.articles.use_toctree: return # Setup a template and context builder: StandaloneHTMLBuilder = sphinx_app.builder env: BuildEnvironment = sphinx_app.env # Toctree support. First, get the registered toctree class, if any registered_toctree = ToctreeAction.get_for_context(kb_app) for node in doctree.traverse(toctree): if node.attributes['hidden']: continue custom_toctree = registered_toctree(fromdocname) context = builder.globalcontext.copy() context['sphinx_app'] = sphinx_app # Get the toctree entries. We only handle one level of depth for # now. To go further, we need to recurse like sphinx's # adapters.toctree._toctree_add_classes function entries = node.attributes['entries'] # The challenge here is that some items in a toctree # might not be resources in our "database". So we have # to ask Sphinx to get us the titles. custom_toctree.set_entries(entries, env.titles, sphinx_app.env.resources) output = custom_toctree.render(builder, context, sphinx_app) # Put the output into the node contents listing = [nodes.raw('', output, format='html')] node.replace_self(listing)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stamp_excerpt(kb_app: kb, sphinx_app: Sphinx, doctree: doctree): """ Walk the tree and extract excert into resource.excerpt """
# First, find out which resource this is. Won't be easy. resources = sphinx_app.env.resources confdir = sphinx_app.confdir source = PurePath(doctree.attributes['source']) # Get the relative path inside the docs dir, without .rst, then # get the resource docname = str(source.relative_to(confdir)).split('.rst')[0] resource = resources.get(docname) if resource: # Stamp the excerpt on the resource excerpt = getattr(resource.props, 'excerpt', False) auto_excerpt = getattr(resource.props, 'auto_excerpt', False) if excerpt: resource.excerpt = excerpt elif not auto_excerpt: resource.excerpt = None else: # Extract the excerpt based on the number of paragraphs # in auto_excerpt resource.excerpt = get_rst_excerpt(doctree, auto_excerpt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bitfieldify(buff, count): """Extract a bitarray out of a bytes array. Some hardware devices read from the LSB to the MSB, but the bit types available prefer to put pad bits on the LSB side, completely changing the data. This function takes in bytes and the number of bits to extract starting from the LSB, and produces a bitarray of those bits. """
databits = bitarray() databits.frombytes(buff) return databits[len(databits)-count:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_byte_align_buff(bits): """Pad the left side of a bitarray with 0s to align its length with byte boundaries. Args: bits: A bitarray to be padded and aligned. Returns: A newly aligned bitarray. """
bitmod = len(bits)%8 if bitmod == 0: rdiff = bitarray() else: #KEEP bitarray rdiff = bitarray(8-bitmod) rdiff.setall(False) return rdiff+bits
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, name, cidr, **kwargs): """This function will create a user network. Within OpenStack, it will create a network and a subnet Within AWS, it will create a VPC and a subnet :param name: string :param cidr: string E.x: "10.0.0.0/24" :param kwargs: dict :return: dict """
return self.driver.create(name, cidr, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_whole_word(w): """ Scan through string looking for a location where this word produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern; note that this is different from finding a zero-length match at some point in the string. """
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetCompressedFilesInDir(fileDir, fileList, ignoreDirList, supportedFormatList = ['.rar',]): """ Get all supported files from given directory folder. Appends to given file list. Parameters fileDir : string File directory to search. fileList : list List which any file matches will be added to. ignoreDirList : list List of directories to ignore in recursive lookup (currently unused). supportedFormatList : list [optional : default = ['.rar',]] List of supported file formats to search for. """
goodlogging.Log.Info("EXTRACT", "Parsing file directory: {0}".format(fileDir)) if os.path.isdir(fileDir) is True: for globPath in glob.glob(os.path.join(fileDir, '*')): if os.path.splitext(globPath)[1] in supportedFormatList: fileList.append(globPath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, otherPartFilePath = None): """ Archive all parts of multi-part compressed file. If file has been extracted (via part1) then move all subsequent parts directly to archive directory. If file has not been extracted then if part >1 add to other part skipped list and only archive when the first part is sent for archiving. Parameters firstPartExtractList : list File directory to search. otherPartSkippedList : list List which any file matches will be added to. archiveDir : list List of directories to ignore in recursive lookup (currently unused). otherPartFilePath : list [optional : default = None] List of supported file formats to search for. """
if otherPartFilePath is None: for filePath in list(otherPartSkippedList): MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, filePath) else: baseFileName = re.findall("(.+?)[.]part.+?rar", otherPartFilePath)[0] if baseFileName in firstPartExtractList: util.ArchiveProcessedFile(otherPartFilePath, archiveDir) if otherPartFilePath in otherPartSkippedList: otherPartSkippedList.remove(otherPartFilePath) elif otherPartFilePath not in otherPartSkippedList: otherPartSkippedList.append(otherPartFilePath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DoRarExtraction(rarArchive, targetFile, dstDir): """ RAR extraction with exception catching Parameters rarArchive : RarFile object RarFile object to extract. targetFile : string Target file name. dstDir : string Target directory. Returns boolean False if rar extraction failed, otherwise True. """
try: rarArchive.extract(targetFile, dstDir) except BaseException as ex: goodlogging.Log.Info("EXTRACT", "Extract failed - Exception: {0}".format(ex)) return False else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetRarPassword(skipUserInput): """ Get password for rar archive from user input. Parameters skipUserInput : boolean Set to skip user input. Returns string or boolean If no password is given then returns False otherwise returns user response string. """
goodlogging.Log.Info("EXTRACT", "RAR file needs password to extract") if skipUserInput is False: prompt = "Enter password, 'x' to skip this file or 'exit' to quit this program: " response = goodlogging.Log.Input("EXTRACT", prompt) response = util.CheckEmptyResponse(response) else: response = 'x' if response.lower() == 'x': goodlogging.Log.Info("EXTRACT", "File extraction skipped without password") return False elif response.lower() == 'exit': goodlogging.Log.Fatal("EXTRACT", "Program terminated by user 'exit'") else: return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CheckPasswordReuse(skipUserInput): """ Check with user for password reuse. Parameters skipUserInput : boolean Set to skip user input. Returns int Integer from -1 to 2 depending on user response. """
goodlogging.Log.Info("EXTRACT", "RAR files needs password to extract") if skipUserInput is False: prompt = "Enter 't' to reuse the last password for just this file, " \ "'a' to reuse for all subsequent files, " \ "'n' to enter a new password for this file " \ "or 's' to enter a new password for all files: " response = goodlogging.Log.Input("EXTRACT", prompt) response = util.ValidUserResponse(response, ('t','a','n','s')) else: response = 'a' if response.lower() == 's': return -1 if response.lower() == 'n': return 0 elif response.lower() == 't': return 1 elif response.lower() == 'a': return 2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(self, func, singleton=False, threadlocal=False, name=None): """ Register a dependency function """
func._giveme_singleton = singleton func._giveme_threadlocal = threadlocal if name is None: name = func.__name__ self._registered[name] = func return func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_value(self, name): """ Get return value of a dependency factory or a live singleton instance. """
factory = self._registered.get(name) if not factory: raise KeyError('Name not registered') if factory._giveme_singleton: if name in self._singletons: return self._singletons[name] self._singletons[name] = factory() return self._singletons[name] elif factory._giveme_threadlocal: if hasattr(self._threadlocals, name): return getattr(self._threadlocals, name) setattr(self._threadlocals, name, factory()) return getattr(self._threadlocals, name) return factory()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trace(fun, *a, **k): """ define a tracer for a rule function for log and statistic purposes """
@wraps(fun) def tracer(*a, **k): ret = fun(*a, **k) print('trace:fun: %s\n ret=%s\n a=%s\nk%s\n' % (str(fun), str(ret), str(a), str(k))) return ret return tracer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timer(fun, *a, **k): """ define a timer for a rule function for log and statistic purposes """
@wraps(fun) def timer(*a, **k): start = arrow.now() ret = fun(*a, **k) end = arrow.now() print('timer:fun: %s\n start:%s,end:%s, took [%s]' % ( str(fun), str(start), str(end), str(end - start))) return ret return timer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_function(self, fun=None): """get function as RuleFunction or return a NoRuleFunction function"""
sfun = str(fun) self.say('get_function:' + sfun, verbosity=100) if not fun: return NoRuleFunction() # dummy to execute via no_fun if sfun in self._rule_functions: return self._rule_functions[sfun] else: self.add_function(name=sfun, fun=self.rule_function_not_found(fun)) self.cry('fun(%s) not found, returning dummy' % (sfun), verbosity=10) if sfun in self._rule_functions: return self._rule_functions[sfun] else: self.rule_function_not_found(fun)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_function(self, fun=None, name=None, fun_type=FUN_TYPE): """actually replace function"""
if not name: if six.PY2: name = fun.func_name else: name = fun.__name__ self.say('adding fun(%s)' % name, verbosity=50) self.say('adding fun_type:%s' % fun_type, verbosity=50) if self.function_exists(name): self.cry('overwriting :fun(%s)' % name, verbosity=10) self.say('added :' + name, verbosity=10) self._rule_functions[name] = RuleFunction(name, fun, fun_type) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def function_exists(self, fun): """ get function's existense """
res = fun in self._rule_functions self.say('function exists:' + str(fun) + ':' + str(res), verbosity=10) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rule_function_not_found(self, fun=None): """ any function that does not exist will be added as a dummy function that will gather inputs for easing into the possible future implementation """
sfun = str(fun) self.cry('rule_function_not_found:' + sfun) def not_found(*a, **k): return(sfun + ':rule_function_not_found', k.keys()) return not_found
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_value(val): """ Parse values from html """
val = val.replace("%", " ")\ .replace(" ","")\ .replace(",", ".")\ .replace("st","").strip() missing = ["Ejdeltagit", "N/A"] if val in missing: return val elif val == "": return None return float(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_html(self, url): """ Get html from url """
self.log.info(u"/GET {}".format(url)) r = requests.get(url) if hasattr(r, 'from_cache'): if r.from_cache: self.log.info("(from cache)") if r.status_code != 200: throw_request_err(r) return r.content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_json(self, url): """ Get json from url """
self.log.info(u"/GET " + url) r = requests.get(url) if hasattr(r, 'from_cache'): if r.from_cache: self.log.info("(from cache)") if r.status_code != 200: throw_request_err(r) return r.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def regions(self): """ Get a list of all regions """
regions = [] elem = self.dimensions["region"].elem for option_elem in elem.find_all("option"): region = option_elem.text.strip() regions.append(region) return regions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_region_slug(self, id_or_label): """ Get the regional slug to be used in url "Norrbotten" => "Norrbottens" :param id_or_label: Id or label of region """
#region = self.dimensions["region"].get(id_or_label) region = id_or_label slug = region\ .replace(u" ","-")\ .replace(u"ö","o")\ .replace(u"Ö","O")\ .replace(u"ä","a")\ .replace(u"å","a") + "s" EXCEPTIONS = { "Jamtland-Harjedalens": "Jamtlands", "Rikets": "Sveriges", } if slug in EXCEPTIONS: slug = EXCEPTIONS[slug] return slug
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_value(self): """ The default category when making a query """
if not hasattr(self, "_default_value"): if self.elem_type == "select": try: # Get option marked "selected" def_value = get_option_value(self.elem.select_one("[selected]")) except AttributeError: # ...or if that one doesen't exist get the first option def_value = get_option_value(self.elem.select_one("option")) elif self.elem_type == "checkbox": def_value = self.elem.get("value") elif self.elem_type == "radio": def_value = [x for x in self.elem if x.has_attr("checked")][0].get("value") self._default_value = def_value assert def_value is not None return self._default_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_horizontal_scroll_table(self, table_html): """ Get list of dicts from horizontally scrollable table """
row_labels = [parse_text(x.text) for x in table_html.select(".DTFC_LeftBodyWrapper tbody tr")] row_label_ids = [None] * len(row_labels) cols = [parse_text(x.text) for x in table_html.select(".dataTables_scrollHead th")] value_rows = table_html.select(".dataTables_scrollBody tbody tr") values = [] for row_i, value_row in enumerate(value_rows): row_values = [parse_value(x.text) for x in value_row.select("td")] values.append(row_values) sheet = Sheet(zip(row_label_ids, row_labels), cols, values) return sheet.long_format
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_json_file(filename, show_warnings = False): """Check configuration file type is JSON Return a boolean indicating wheather the file is JSON format or not """
try: config_dict = load_config(filename, file_type = "json") is_json = True except: is_json = False return(is_json)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_yaml_file(filename, show_warnings = False): """Check configuration file type is yaml Return a boolean indicating wheather the file is yaml format or not """
if is_json_file(filename): return(False) try: config_dict = load_config(filename, file_type = "yaml") if(type(config_dict) == str): is_yaml = False else: is_yaml = True except: is_yaml = False return(is_yaml)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_ini_file(filename, show_warnings = False): """Check configuration file type is INI Return a boolean indicating wheather the file is INI format or not """
try: config_dict = load_config(filename, file_type = "ini") if config_dict == {}: is_ini = False else: is_ini = True except: is_ini = False return(is_ini)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_toml_file(filename, show_warnings = False): """Check configuration file type is TOML Return a boolean indicating wheather the file is TOML format or not """
if is_yaml_file(filename): return(False) try: config_dict = load_config(filename, file_type = "toml") is_toml = True except: is_toml = False return(is_toml)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _collect_settings(self, apps): """ Iterate over given apps or INSTALLED_APPS and collect the content of each's settings file, which is expected to be in JSON format. """
contents = {} if apps: for app in apps: if app not in settings.INSTALLED_APPS: raise CommandError("Application '{0}' not in settings.INSTALLED_APPS".format(app)) else: apps = settings.INSTALLED_APPS for app in apps: module = import_module(app) for module_dir in module.__path__: json_file = os.path.abspath(os.path.join(module_dir, self.json_file)) if os.path.isfile(json_file): with open(json_file, 'r') as fp: contents[app] = json.load(fp) return contents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def required_unique(objects, key): """ A pyrsistent invariant which requires all objects in the given iterable to have a unique key. :param objects: The objects to check. :param key: A one-argument callable to compute the key of an object. :return: An invariant failure if any two or more objects have the same key computed. An invariant success otherwise. """
keys = {} duplicate = set() for k in map(key, objects): keys[k] = keys.get(k, 0) + 1 if keys[k] > 1: duplicate.add(k) if duplicate: return (False, u"Duplicate object keys: {}".format(duplicate)) return (True, u"")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def item_by_name(self, name): """ Find an item in this collection by its name metadata. :param unicode name: The name of the object for which to search. :raise KeyError: If no object matching the given name is found. :return IObject: The object with the matching name. """
for obj in self.items: if obj.metadata.name == name: return obj raise KeyError(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init_name_core(self, name: str): """Runs whenever a new instance is initialized or `sep` is set."""
self.__regex = re.compile(rf'^{self._pattern}$') self.name = name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_name(self, **values) -> str: """Get a new name string from this object's name values. :param values: Variable keyword arguments where the **key** should refer to a field on this object that will use the provided **value** to build the new name. """
if not values and self.name: return self.name if values: # if values are provided, solve compounds that may be affected for ck, cvs in _sorted_items(self.compounds): if ck in cvs and ck in values: # redefined compound name to outer scope e.g. fifth = (fifth, sixth) continue comp_values = [values.pop(cv, getattr(self, cv)) for cv in cvs] if None not in comp_values: values[ck] = ''.join(rf'{v}' for v in comp_values) return self._get_nice_name(**values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cast_config(cls, config: typing.Mapping[str, str]) -> typing.Dict[str, str]: """Cast `config` to grouped regular expressions."""
return {k: cls.cast(v, k) for k, v in config.items()}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _execute_primitives(self, commands): """Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises. Args: commands: A list of Executable Primitives to be run in order. """
for p in commands: if self._scanchain and self._scanchain._debug: print(" Executing", p)#pragma: no cover p.execute(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty_version_text(): """Return pretty version text listing all plugins."""
version_lines = ["dtool, version {}".format(dtool_version)] version_lines.append("\nBase:") version_lines.append("dtoolcore, version {}".format(dtoolcore.__version__)) version_lines.append("dtool-cli, version {}".format(__version__)) # List the storage broker packages. version_lines.append("\nStorage brokers:") for ep in iter_entry_points("dtool.storage_brokers"): package = ep.module_name.split(".")[0] dyn_load_p = __import__(package) version = dyn_load_p.__version__ storage_broker = ep.load() version_lines.append( "{}, {}, version {}".format( storage_broker.key, package.replace("_", "-"), version)) # List the plugin packages. modules = [ep.module_name for ep in iter_entry_points("dtool.cli")] packages = set([m.split(".")[0] for m in modules]) version_lines.append("\nPlugins:") for p in packages: dyn_load_p = __import__(p) version_lines.append( "{}, version {}".format( p.replace("_", "-"), dyn_load_p.__version__)) return "\n".join(version_lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dtool(debug): """Tool to work with datasets."""
level = logging.WARNING if debug: level = logging.DEBUG logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_nic(self, instance_id, net_id): """Add a Network Interface Controller"""
#TODO: upgrade with port_id and fixed_ip in future self.client.servers.interface_attach( instance_id, None, net_id, None) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_nic(self, instance_id, port_id): """Delete a Network Interface Controller"""
self.client.servers.interface_detach(instance_id, port_id) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disassociate_public_ip(self, public_ip_id): """Disassociate a external IP"""
floating_ip = self.client.floating_ips.get(public_ip_id) floating_ip = floating_ip.to_dict() instance_id = floating_ip.get('instance_id') address = floating_ip.get('ip') self.client.servers.remove_floating_ip(instance_id, address) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split(self, bitindex): """Split a promise into two promises at the provided index. A common operation in JTAG is reading/writing to a register. During the operation, the TMS pin must be low, but during the writing of the last bit, the TMS pin must be high. Requiring all reads or writes to have full arbitrary control over the TMS pin is unrealistic. Splitting a promise into two sub promises is a way to mitigate this issue. The final read bit is its own subpromise that can be associated with a different primitive than the 'rest' of the subpromise. Returns: Two TDOPromise instances: the 'Rest' and the 'Tail'. The 'Rest' is the first chunk of the original promise. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned """
if bitindex < 0: raise ValueError("bitindex must be larger or equal to 0.") if bitindex > len(self): raise ValueError( "bitindex larger than the array's size. " "Len: %s; bitindex: %s"%(len(self), bitindex)) if bitindex == 0: return None, self if bitindex == len(self): return self, None left = TDOPromise(self._chain, self._bitstart, bitindex, _parent=self) #Starts at 0 because offset is for incoming data from #associated primitive, not location in parent. right = TDOPromise(self._chain, 0, len(self)-bitindex, _parent=self) self._components = [] self._addsub(left, 0) self._addsub(right, bitindex) return left, right
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fulfill(self, bits, ignore_nonpromised_bits=False): """Supply the promise with the bits from its associated primitive's execution. The fulfillment process must walk the promise chain backwards until it reaches the original promise and can supply the final value. The data that comes in can either be all a bit read for every bit written by the associated primitive, or (if the primitive supports it), only the bits that are used by promises. The ignore_nonpromised_bits flag specifies which format the incoming data is in. Args: bits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin. ignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data). """
if self._allsubsfulfilled(): if not self._components: if ignore_nonpromised_bits: self._value = bits[self._bitstartselective: self._bitstartselective + self._bitlength] else: self._value = bits[self._bitstart:self._bitend] else: self._value = self._components[0][0]._value for sub, offset in self._components[1:]: self._value += sub._value if self._parent is not None: self._parent._fulfill(None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def makesubatoffset(self, bitoffset, *, _offsetideal=None): """Create a copy of this promise with an offset, and use it as this promise's child. If this promise's primitive is being merged with another primitive, a new subpromise may be required to keep track of the new offset of data coming from the new primitive. Args: bitoffset: An integer offset of the data in the new primitive. _offsetideal: integer offset of the data if terms of bits actually used for promises. Used to calculate the start index to read if the associated primitive has arbitrary TDO control. Returns: A TDOPromise registered with this promise, and with the correct offset. """
if _offsetideal is None: _offsetideal = bitoffset if bitoffset is 0: return self newpromise = TDOPromise( self._chain, self._bitstart + bitoffset, self._bitlength, _parent=self, bitstartselective=self._bitstartselective+_offsetideal ) self._addsub(newpromise, 0) return newpromise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, promise, bitoffset, *, _offsetideal=None): """Add a promise to the promise collection at an optional offset. Args: promise: A TDOPromise to add to this collection. bitoffset: An integer offset for this new promise in the collection. _offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control. """
#This Assumes that things are added in order. #Sorting or checking should likely be added. if _offsetideal is None: _offsetideal = bitoffset if isinstance(promise, TDOPromise): newpromise = promise.makesubatoffset( bitoffset, _offsetideal=_offsetideal) self._promises.append(newpromise) elif isinstance(promise, TDOPromiseCollection): for p in promise._promises: self.add(p, bitoffset, _offsetideal=_offsetideal)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split(self, bitindex): """Split a promise into two promises. A tail bit, and the 'rest'. Same operation as the one on TDOPromise, except this works with a collection of promises and splits the appropriate one. Returns: The 'Rest' and the 'Tail'. The 'Rest' is TDOPromiseCollection containing the first chunk of the original TDOPromiseCollection. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned """
if bitindex < 0: raise ValueError("bitindex must be larger or equal to 0.") if bitindex == 0: return None, self lastend = 0 split_promise = False for splitindex, p in enumerate(self._promises): if bitindex in range(lastend, p._bitstart): split_promise = False break if bitindex in range(p._bitstart, p._bitend): if bitindex-p._bitstart == 0: split_promise = False else: split_promise = True break lastend = p._bitend else: raise Exception("Should be impossible") processed_left = TDOPromiseCollection(self._chain) processed_right = TDOPromiseCollection(self._chain) if split_promise: left, right = p.split(bitindex-p._bitstart) for i in range(splitindex): processed_left.add(self._promises[i], 0) processed_left.add(left, 0) processed_right.add(right, 0) for tmpprim in self._promises[splitindex+1:]: processed_right.add(tmpprim, -bitindex) return processed_left, processed_right else: for i in range(splitindex): processed_left.add(self._promises[i], 0) for i in range(splitindex, len(self._promises)): processed_right.add(self._promises[i], -bitindex) return processed_left, processed_right
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def makesubatoffset(self, bitoffset, *, _offsetideal=None): """Create a copy of this PromiseCollection with an offset applied to each contained promise and register each with their parent. If this promise's primitive is being merged with another primitive, a new subpromise may be required to keep track of the new offset of data coming from the new primitive. Args: bitoffset: An integer offset of the data in the new primitive. _offsetideal: An integer offset to use if the associated primitive supports arbitrary TDO control. Returns: A new TDOPromiseCollection registered with this promise collection, and with the correct offset. """
if _offsetideal is None: _offsetideal = bitoffset if bitoffset is 0: return self newpromise = TDOPromiseCollection(self._chain) for promise in self._promises: newpromise.add(promise, bitoffset, _offsetideal=_offsetideal) return newpromise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cli(ctx, stage): """Show transformer rules"""
if not ctx.bubble: ctx.say_yellow('There is no bubble present, ' + 'will not show any transformer rules') raise click.Abort() path = ctx.home + '/' RULES = None ctx.say('Stage:'+stage, verbosity=10) if stage in STAGES: if stage in ctx.cfg.CFG: STAGE = ctx.cfg.CFG[stage] ctx.say('Stage found:', stuff=STAGE,verbosity=100) if 'TRANSFORM' in STAGE: TRANSFORM = STAGE.TRANSFORM ctx.say('Transform found:', stuff=TRANSFORM, verbosity=100) if 'RULES' in TRANSFORM: RULES = TRANSFORM.RULES ctx.say('Rules found:', stuff=RULES, verbosity=100) if not RULES: ctx.say_red('There is no TRANSFORM.RULES in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if type(RULES) == str and RULES.endswith('.bubble'): ctx.say('loading rules',verbosity=10) rules = get_bubble(ctx, path + RULES) rule_type = 'bubble' transformer = Transformer(rules=rules, rule_type=rule_type, bubble_path=path, verbose=ctx.get_verbose()) rules = transformer._rules.get_rules() ctx.say('current number of rules:' + str(len(rules)), verbosity=1) for r in rules: ctx.say('rule: ' + str(r), verbosity=1) ctx.gbc.say('rules: ', stuff=rules, verbosity=100) else: ctx.say('no rules!') return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connectExec(connection, protocol, commandLine): """Connect a Protocol to a ssh exec session """
deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestExec(commandLine) return deferred
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connectShell(connection, protocol): """Connect a Protocol to a ssh shell session """
deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestShell() return deferred
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connectSubsystem(connection, protocol, subsystem): """Connect a Protocol to a ssh subsystem channel """
deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestSubsystem(subsystem) return deferred
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connectSession(connection, protocol, sessionFactory=None, *args, **kwargs): """Open a SSHSession channel and connect a Protocol to it @param connection: the SSH Connection to open the session channel on @param protocol: the Protocol instance to connect to the session @param sessionFactory: factory method to generate a SSHSession instance @note: :args: and :kwargs: are passed to the sessionFactory """
factory = sessionFactory or defaultSessionFactory session = factory(*args, **kwargs) session.dataReceived = protocol.dataReceived session.closed = lambda: protocol.connectionLost(connectionDone) deferred = defer.Deferred() @deferred.addCallback def connectProtocolAndReturnSession(specificData): protocol.makeConnection(session) return session session.sessionOpen = deferred.callback session.openFailed = deferred.errback connection.openChannel(session) return deferred
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def requestSubsystem(self, subsystem): """Request a subsystem and return a deferred reply. """
data = common.NS(subsystem) return self.sendRequest('subsystem', data, wantReply=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def requestPty(self, term=None, rows=0, cols=0, xpixel=0, ypixel=0, modes=''): """Request allocation of a pseudo-terminal for a channel @param term: TERM environment variable value (e.g., vt100) @param columns: terminal width, characters (e.g., 80) @param rows: terminal height, rows (e.g., 24) @param width: terminal width, pixels (e.g., 640) @param height: terminal height, pixels (e.g., 480) @param modes: encoded terminal modes The dimension parameters are only informational. Zero dimension parameters are ignored. The columns/rows dimensions override the pixel dimensions (when nonzero). Pixel dimensions refer to the drawable area of the window. """
#TODO: Needs testing! term = term or os.environ.get('TERM', '') data = packRequest_pty_req(term, (rows, cols, xpixel, ypixel), modes) return self.sendRequest('pty-req', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def requestEnv(self, env={}): """Send requests to set the environment variables for the channel """
for variable, value in env.items(): data = common.NS(variable) + common.NS(value) self.sendRequest('env', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commandstr(command): """Convert command into string."""
if command == CMD_MESSAGE_ERROR: msg = "CMD_MESSAGE_ERROR" elif command == CMD_MESSAGE_LIST: msg = "CMD_MESSAGE_LIST" elif command == CMD_MESSAGE_PASSWORD: msg = "CMD_MESSAGE_PASSWORD" elif command == CMD_MESSAGE_MP3: msg = "CMD_MESSAGE_MP3" elif command == CMD_MESSAGE_DELETE: msg = "CMD_MESSAGE_DELETE" elif command == CMD_MESSAGE_VERSION: msg = "CMD_MESSAGE_VERSION" elif command == CMD_MESSAGE_CDR_AVAILABLE: msg = "CMD_MESSAGE_CDR_AVAILABLE" elif command == CMD_MESSAGE_CDR: msg = "CMD_MESSAGE_CDR" else: msg = "CMD_MESSAGE_UNKNOWN" return msg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(): """Command for reflection database objects"""
parser = OptionParser( version=__version__, description=__doc__, ) parser.add_option( '-u', '--url', dest='url', help='Database URL (connection string)', ) parser.add_option( '-r', '--render', dest='render', default='dot', choices=['plantuml', 'dot'], help='Output format - plantuml or dot', ) parser.add_option( '-l', '--list', dest='list', action='store_true', help='Output database list of tables and exit', ) parser.add_option( '-i', '--include', dest='include', help='List of tables to include through ","', ) parser.add_option( '-e', '--exclude', dest='exclude', help='List of tables to exlude through ","', ) (options, args) = parser.parse_args() if not options.url: print('-u/--url option required') exit(1) engine = create_engine(options.url) meta = MetaData() meta.reflect(bind=engine) if options.list: print('Database tables:') tables = sorted(meta.tables.keys()) def _g(l, i): try: return tables[i] except IndexError: return '' for i in range(0, len(tables), 2): print(' {0}{1}{2}'.format( _g(tables, i), ' ' * (38 - len(_g(tables, i))), _g(tables, i + 1), )) exit(0) tables = set(meta.tables.keys()) if options.include: tables &= set(map(string.strip, options.include.split(','))) if options.exclude: tables -= set(map(string.strip, options.exclude.split(','))) desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables)) print(getattr(render, options.render)(desc))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh(self): """ Refresh all class attributes. """
strawpoll_response = requests.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=self.id)) raise_status(strawpoll_response) self.status_code = strawpoll_response.status_code self.response_json = strawpoll_response.json() self.id = self.response_json['id'] self.title = self.response_json['title'] self.options = self.response_json['options'] self.votes = self.response_json['votes'] self.captcha = self.response_json['captcha'] self.dupcheck = self.response_json['dupcheck'] self.url = 'https://www.strawpoll.me/{id}'.format(id=self.id) self.results_url = 'https://www.strawpoll.me/{id}/r'.format(id=self.id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_json_file(self, path): """ Serialize this VariantCollection to a JSON representation and write it out to a text file. """
with open(path, "w") as f: f.write(self.to_json())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_json_file(cls, path): """ Construct a VariantCollection from a JSON file. """
with open(path, 'r') as f: json_string = f.read() return cls.from_json(json_string)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dumps(data, escape=False, **kwargs): """A wrapper around `json.dumps` that can handle objects that json module is not aware. This function is aware of a list of custom serializers that can be registered by the API user, making it possible to convert any kind of object to types that the json library can handle. """
if 'sort_keys' not in kwargs: kwargs['sort_keys'] = True converted = json.dumps(data, default=_converter, **kwargs) if escape: # We're escaping the whole dumped string here cause there's no (easy) # way to hook into the native json library and change how they process # values like strings, None objects and some other "literal" stuff. # # Also, we're not escaping quotes here cause they're escaped by the # native json library already. So, we just escape basic html entities, # like <, > and &; return cgi.escape(converted) return converted
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deserialize(klass, data): """Helper function to access a method that creates objects of a given `klass` with the received `data`. """
handler = DESERIALIZE_REGISTRY.get(klass) if handler: return handler(data) raise TypeError("There is no deserializer registered to handle " "instances of '{}'".format(klass.__name__))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _convert_from(data): """Internal function that will be hooked to the native `json.loads` Find the right deserializer for a given value, taking into account the internal deserializer registry. """
try: module, klass_name = data['__class__'].rsplit('.', 1) klass = getattr(import_module(module), klass_name) except (ImportError, AttributeError, KeyError): # But I still haven't found what I'm looking for # # Waiting for three different exceptions here. KeyError will # raise if can't find the "__class__" entry in the json `data` # dictionary. ImportError happens when the module present in the # dotted name can't be resolved. Finally, the AttributeError # happens when we can find the module, but couldn't find the # class on it. return data return deserialize(klass, data['__value__'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _converter(data): """Internal function that will be passed to the native `json.dumps`. This function uses the `REGISTRY` of serializers and try to convert a given instance to an object that json.dumps can understand. """
handler = REGISTRY.get(data.__class__) if handler: full_name = '{}.{}'.format( data.__class__.__module__, data.__class__.__name__) return { '__class__': full_name, '__value__': handler(data), } raise TypeError(repr(data) + " is not JSON serializable")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_error(self, error): """ Try to detect repetitive errors and sleep for a while to avoid being marked as spam """
logging.exception("try to sleep if there are repeating errors.") error_desc = str(error) now = datetime.datetime.now() if error_desc not in self.error_time_log: self.error_time_log[error_desc] = now return time_of_last_encounter = self.error_time_log[str(error)] time_since_last_encounter = now - time_of_last_encounter if time_since_last_encounter.total_seconds() > self.config.get('min_seconds_between_errors'): self.error_time_log[error_desc] = now return if error_desc not in self.error_sleep_log: time.sleep(self.config.get('sleep_seconds_on_consecutive_errors')) self.error_sleep_log[error_desc] = 1 else: sys.exit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_isodate(datestr): """Parse a string that loosely fits ISO 8601 formatted date-time string """
m = isodate_rx.search(datestr) assert m, 'unrecognized date format: ' + datestr year, month, day = m.group('year', 'month', 'day') hour, minute, second, fraction = m.group('hour', 'minute', 'second', 'fraction') tz, tzhh, tzmm = m.group('tz', 'tzhh', 'tzmm') dt = datetime.datetime(int(year), int(month), int(day), int(hour)) if fraction is None: fraction = 0 else: fraction = float('0.' + fraction) if minute is None: dt = dt.replace(minute=int(60 * fraction)) else: dt = dt.replace(minute=int(minute)) if second is None: dt = dt.replace(second=int(60 * fraction)) else: dt = dt.replace(second=int(second), microsecond=int(1000000 * fraction)) if tz is not None: if tz[0] == 'Z': offset = 0 else: offset = datetime.timedelta(minutes=int(tzmm or 0), hours=int(tzhh)) if tz[0] == '-': offset = -offset dt = dt.replace(tzinfo=UTCOffset(offset)) return dt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ls( self, rev, path, recursive=False, recursive_dirs=False, directory=False, report=() ): """List directory or file :param rev: The revision to use. :param path: The path to list. May start with a '/' or not. Directories may end with a '/' or not. :param recursive: Recursively list files in subdirectories. :param recursive_dirs: Used when recursive=True, also list directories. :param directory: If path is a directory, list path itself instead of its contents. :param report: A list or tuple of extra attributes to return that may require extra processing. Recognized values are 'size', 'target', 'executable', and 'commit'. Returns a list of dictionaries with the following keys: **type** The type of the file: 'f' for file, 'd' for directory, 'l' for symlink. **name** The name of the file. Not present if directory=True. **size** The size of the file. Only present for files when 'size' is in report. **target** The target of the symlink. Only present for symlinks when 'target' is in report. **executable** True if the file is executable, False otherwise. Only present for files when 'executable' is in report. Raises PathDoesNotExist if the path does not exist. """
raise NotImplementedError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log( self, revrange=None, limit=None, firstparent=False, merges=None, path=None, follow=False ): """Get commit logs :param revrange: Either a single revision or a range of revisions as a 2-element list or tuple. :param int limit: Limit the number of log entries. :param bool firstparent: Only follow the first parent of merges. :param bool merges: True means only merges, False means no merges, None means both merges and non-merges. :param str path: Only match commits containing changes on this path. :param bool follow: Follow file history across renames. :returns: log information :rtype: :class:`CommitLogEntry` or list of :class:`CommitLogEntry` If revrange is None, return a list of all log entries in reverse chronological order. If revrange is a single revision, return a single log entry. If revrange is a 2 element list [A,B] or tuple (A,B), return a list of log entries starting at B and following that branch back to A or one of its ancestors (not inclusive. If A is None, follow branch B back to the beginning of history. If B is None, list all descendants in reverse chronological order. """
raise NotImplementedError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_create(self, cloudflare_email, cloudflare_pass, unique_id=None): """ Create new cloudflare user with selected email and id. Optionally also select unique_id which can be then used to get user information. :param cloudflare_email: new user cloudflare email :type cloudflare_email: str :param cloudflare_pass: new user cloudflare password :type cloudflare_pass: str :param unique_id: new user unique id :type unique_id: str (optional) :returns: :rtype: dict """
params = { 'act': 'user_create', 'cloudflare_email': cloudflare_email, 'cloudflare_pass': cloudflare_pass } if unique_id: params['unique_id'] = unique_id return self._request(params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zone_set(self, user_key, zone_name, resolve_to, subdomains): """ Create new zone for user associated with this user_key. :param user_key: The unique 3auth string,identifying the user's CloudFlare Account. Generated from a user_create or user_auth :type user_key: str :param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. "example.com". :type zone_name: str :param resolve_to: The CNAME that CloudFlare should ultimately resolve web connections to after they have been filtered :type resolve_to: str :param subdomains: A comma-separated string of subdomain(s) that CloudFlare should host, e.g. "www,blog,forums" :type subdomains: str :returns: :rtype: dict """
params = { 'act': 'zone_set', 'user_key': user_key, 'zone_name': zone_name, 'resolve_to': resolve_to, 'subdomains': subdomains, } return self._request(params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def full_zone_set(self, user_key, zone_name): """ Create new zone and all subdomains for user associated with this user_key. :param user_key: The unique 3auth string,identifying the user's CloudFlare Account. Generated from a user_create or user_auth :type user_key: str :param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. "example.com". :type zone_name: str :returns: :rtype: dict """
params = { 'act': 'full_zone_set', 'user_key': user_key, 'zone_name': zone_name, } return self._request(params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_lookup(self, cloudflare_email=None, unique_id=None): """ Lookup user data based on either his cloudflare_email or his unique_id. :param cloudflare_email: email associated with user :type cloudflare_email: str :param unique_id: unique id associated with user :type unique_id: str :returns: :rtype: dict """
if not cloudflare_email and not unique_id: raise KeyError( 'Either cloudflare_email or unique_id must be present') params = {'act': 'user_lookup'} if cloudflare_email: params['cloudflare_email'] = cloudflare_email else: params['unique_id'] = unique_id return self._request(params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_auth( self, cloudflare_email=None, cloudflare_pass=None, unique_id=None ): """ Get user_key based on either his email and password or unique_id. :param cloudflare_email: email associated with user :type cloudflare_email: str :param cloudflare_pass: pass associated with user :type cloudflare_pass: str :param unique_id: unique id associated with user :type unique_id: str :returns: :rtype: dict """
if not (cloudflare_email and cloudflare_pass) and not unique_id: raise KeyError( 'Either cloudflare_email and cloudflare_pass or unique_id must be present') params = {'act': 'user_auth'} if cloudflare_email and cloudflare_pass: params['cloudflare_email'] = cloudflare_email params['cloudflare_pass'] = cloudflare_pass if unique_id: params['unique_id'] = unique_id return self._request(params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zone_list( self, user_key, limit=100, offset=0, zone_name=None, sub_id=None, zone_status='ALL', sub_status='ALL', ): """ List zones for a user. :param user_key: key for authentication of user :type user_key: str :param limit: limit of zones shown :type limit: int :param offset: offset of zones to be shown :type offset: int :param zone_name: name of zone to lookup :type zone_name: str :param sub_id: subscription id of reseller (only for use by resellers) :type sub_id: str :param zone_status: status of zones to be shown :type zone_status: str (one of: V(active), D(deleted), ALL) :param sub_status: status of subscription of zones to be shown :type zone_name: str (one of: V(active), CNL(cancelled), ALL ) :returns: :rtype: dict """
if zone_status not in ['V', 'D', 'ALL']: raise ValueError('zone_status has to be V, D or ALL') if sub_status not in ['V', 'CNL', 'ALL']: raise ValueError('sub_status has to be V, CNL or ALL') params = { 'act': 'zone_list', 'user_key': user_key, 'limit': limit, 'offset': offset, 'zone_status': zone_status, 'sub_status': sub_status } if zone_name: params['zone_name'] = zone_name if sub_id: params['sub_id'] = sub_id return self._request(params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attr_exists(self, attr): """Returns True if at least on instance of the attribute is found """
gen = self.attr_gen(attr) n_instances = len(list(gen)) if n_instances > 0: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def datasets(self): """Method returns a list of dataset paths. Examples -------- print(dataset) '/dataset1/data1/data' '/dataset1/data2/data' '/dataset2/data1/data' '/dataset2/data2/data' """
HiisiHDF._clear_cache() self.visititems(HiisiHDF._is_dataset) return HiisiHDF.CACHE['dataset_paths']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_from_filedict(self, filedict): """ Creates h5 file from dictionary containing the file structure. Filedict is a regular dictinary whose keys are hdf5 paths and whose values are dictinaries containing the metadata and datasets. Metadata is given as normal key-value -pairs and dataset arrays are given using 'DATASET' key. Datasets must be numpy arrays. Method can also be used to append existing hdf5 file. If the file is opened in read only mode, method does nothing. Examples -------- Create newfile.h5 and fill it with data and metadata '/dataset1/data1/data':{'DATASET':np.zeros(100), 'quantity':'emptyarray'}, 'B':'b'} """
if self.mode in ['r+','w', 'w-', 'x', 'a']: for h5path, path_content in filedict.iteritems(): if path_content.has_key('DATASET'): # If path exist, write only metadata if h5path in self: for key, value in path_content.iteritems(): if key != 'DATASET': self[h5path].attrs[key] = value else: try: group = self.create_group(os.path.dirname(h5path)) except ValueError: group = self[os.path.dirname(h5path)] pass # This pass has no effect? new_dataset = group.create_dataset(os.path.basename(h5path), data=path_content['DATASET']) for key, value in path_content.iteritems(): if key != 'DATASET': new_dataset.attrs[key] = value else: try: group = self.create_group(h5path) except ValueError: group = self[h5path] for key, value in path_content.iteritems(): group.attrs[key] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search(self, attr, value, tolerance=0): """Find paths with a key value match Parameters attr : str name of the attribute value : str or numerical value value of the searched attribute Keywords -------- tolerance : float tolerance used when searching for matching numerical attributes. If the value of the attribute found from the file differs from the searched value less than the tolerance, attributes are considered to be the same. Returns ------- results : list a list of all matching paths Examples -------- print(result) '/dataset1/where' print(result) '/dataset1/data2/what' '/dataset2/data2/what' '/dataset3/data2/what' '/dataset4/data2/what' '/dataset5/data2/what' """
found_paths = [] gen = self.attr_gen(attr) for path_attr_pair in gen: # if attribute is numerical use numerical_value_tolerance in # value comparison. If attribute is string require exact match if isinstance(path_attr_pair.value, str): type_name = 'str' else: type_name = path_attr_pair.value.dtype.name if 'int' in type_name or 'float' in type_name: if abs(path_attr_pair.value - value) <= tolerance: found_paths.append(path_attr_pair.path) else: if path_attr_pair.value == value: found_paths.append(path_attr_pair.path) return found_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extractReporterIons(ionArrays, reporterMz, mzTolerance): """Find and a list of reporter ions and return mz and intensity values. Expected reporter mz values are searched in "ionArray['mz']" and reported if the observed relative deviation is less than specified by "mzTolerance". In the case of multiple matches, the one with the minimal deviation is picked. If no matching entries are found numpy.nan is returned for the mz value and an intensity of 0. The returned arrays are in the order of "reporterMz" values. :param ionArrays: a dictionary containing two numpy arrays of equal size, {"i": an array of ion intensities, "mz" an array of ion mz values} :param reporterMz: a list of reporter mz values :param mzTolerance: maximum allowed relative mz deviation :returns: {'mz': numpy.array(), 'i': numpy.array()} """
reporterIons = {'mz': [], 'i': []} for reporterMzValue in reporterMz: limHi = reporterMzValue * (1+mzTolerance) limLo = reporterMzValue * (1-mzTolerance) loPos = bisect.bisect_left(ionArrays['mz'], limLo) upPos = bisect.bisect_right(ionArrays['mz'], limHi) matchingValues = ionArrays['mz'][loPos:upPos] if matchingValues.size == 0: reporterIons['i'].append(0) reporterIons['mz'].append(numpy.nan) elif matchingValues.size == 1: reporterIons['i'].append(ionArrays['i'][loPos]) reporterIons['mz'].append(ionArrays['mz'][loPos]) else: mzDeviations = numpy.abs(matchingValues-reporterMzValue) minDeviationPos = numpy.argmin(mzDeviations) bestMatchArrayPos = range(loPos, upPos)[minDeviationPos] reporterIons['i'].append(ionArrays['i'][bestMatchArrayPos]) reporterIons['mz'].append(ionArrays['mz'][bestMatchArrayPos]) reporterIons['mz'] = numpy.array(reporterIons['mz'], dtype=ionArrays['mz'].dtype ) reporterIons['i'] = numpy.array(reporterIons['i'], dtype=ionArrays['i'].dtype ) return reporterIons
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _correctIsotopeImpurities(matrix, intensities): """Corrects observed reporter ion intensities for isotope impurities. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a COLUMN. Use maspy.isobar._transposeMatrix() if channels are written in rows. :param intensities: numpy array of observed reporter ion intensities. :returns: a numpy array of reporter ion intensities corrected for isotope impurities. """
correctedIntensities, _ = scipy.optimize.nnls(matrix, intensities) return correctedIntensities
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _normalizeImpurityMatrix(matrix): """Normalize each row of the matrix that the sum of the row equals 1. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a row. :returns: a matrix containing normalized values """
newMatrix = list() for line in matrix: total = sum(line) if total != 0: newMatrix.append([i / total for i in line]) else: newMatrix.append(line) return newMatrix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _padImpurityMatrix(matrix, preChannels, postChannels): """Align the values of an isotope impurity matrix and fill up with 0. NOTE: The length of the rows in the "matrix" must be the sum of "preChannels" and "postChannels" + 1. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a row. :params preChannels: number of matrix columns with a nominal mass shift < 0 (-1, -2,..) in respect to the reporter ion mz value. :params postChannels: number of matrix columns with a nominal mass shift > 0 (+1, +2,..) in respect to the reporter ion mz value. :returns: extended matrix, where the number of rows is unchanged but the length of each row is extend to the number of rows. """
extendedMatrix = list() lastMatrixI = len(matrix)-1 for i, line in enumerate(matrix): prePadding = itertools.repeat(0., i) postPadding = itertools.repeat(0., lastMatrixI-i) newLine = list(itertools.chain(prePadding, line, postPadding)) extendedMatrix.append(newLine[preChannels:-postChannels]) return extendedMatrix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _processImpurityMatrix(self): """Process the impurity matrix so that it can be used to correct observed reporter intensities. """
processedMatrix = _normalizeImpurityMatrix(self.impurityMatrix) processedMatrix = _padImpurityMatrix( processedMatrix, self.matrixPreChannels, self.matrixPostChannels ) processedMatrix = _transposeMatrix(processedMatrix) return processedMatrix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exception(message): """Exception method convenience wrapper."""
def decorator(method): """Inner decorator so we can accept arguments.""" @wraps(method) def wrapper(self, *args, **kwargs): """Innermost decorator wrapper - this is confusing.""" if self.messages: kwargs['message'] = args[0] if args else kwargs.get('message', message) else: kwargs['message'] = None kwargs['prefix'] = self.prefix kwargs['statsd'] = self.statsd return method(self, **kwargs) return wrapper return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """Convert Exception class to a Python dictionary."""
val = dict(self.payload or ()) if self.message: val['message'] = self.message return val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_app(self, app, config=None, statsd=None): """Init Flask Extension."""
if config is not None: self.config = config elif self.config is None: self.config = app.config self.messages = self.config.get('EXCEPTION_MESSAGE', True) self.prefix = self.config.get('EXCEPTION_PREFIX', DEFAULT_PREFIX) self.statsd = statsd