text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Client(version=__version__, resource=None, provider=None, **kwargs): """Initialize client object based on given version. :params version: version of CAL, define at setup.cfg :params resource: resource type (network, compute, object_storage, block_storage) :params provider: provider object :params cloud_config: cloud auth config :params **kwargs: specific args for resource :return: class Client HOW-TO: The simplest way to create a client instance is initialization:: >> from calplus import client >> calplus = client.Client(version='1.0.0', resource='compute', provider=provider_object, some_needed_args_for_ComputeClient) """
versions = _CLIENTS.keys() if version not in versions: raise exceptions.UnsupportedVersion( 'Unknown client version or subject' ) if provider is None: raise exceptions.ProviderNotDefined( 'Not define Provider for Client' ) support_types = CONF.providers.driver_mapper.keys() if provider.type not in support_types: raise exceptions.ProviderTypeNotFound( 'Unknow provider.' ) resources = _CLIENTS[version].keys() if not resource: raise exceptions.ResourceNotDefined( 'Not define Resource, choose one: compute, network,\ object_storage, block_storage.' ) elif resource.lower() not in resources: raise exceptions.ResourceNotFound( 'Unknow resource: compute, network,\ object_storage, block_storage.' ) LOG.info('Instantiating {} client ({})' . format(resource, version)) return _CLIENTS[version][resource]( provider.type, provider.config, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def accession(self): """ Parse accession number from commonly supported formats. If the defline does not match one of the following formats, the entire description (sans leading caret) will be returned. * >gi|572257426|ref|XP_006607122.1| * >gnl|Tcas|XP_008191512.1 * >lcl|PdomMRNAr1.2-10981.1 """
accession = None if self.defline.startswith('>gi|'): match = re.match('>gi\|\d+\|[^\|]+\|([^\|\n ]+)', self.defline) if match: accession = match.group(1) elif self.defline.startswith('>gnl|'): match = re.match('>gnl\|[^\|]+\|([^\|\n ]+)', self.defline) if match: accession = match.group(1) elif self.defline.startswith('>lcl|'): match = re.match('>lcl\|([^\|\n ]+)', self.defline) if match: accession = match.group(1) return accession
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_seq(self, outstream=None, linewidth=70): """ Print a sequence in a readable format. :param outstream: if `None`, formatted sequence is returned as a string; otherwise, it is treated as a file-like object and the formatted sequence is printed to the outstream :param linewidth: width for wrapping sequences over multiple lines; set to 0 for no wrapping """
if linewidth == 0 or len(self.seq) <= linewidth: if outstream is None: return self.seq else: print(self.seq, file=outstream) return i = 0 seq = '' while i < len(self.seq): if outstream is None: seq += self.seq[i:i+linewidth] + '\n' else: print(self.seq[i:i+linewidth], file=outstream) i += linewidth if outstream is None: return seq
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_validator(filter_data): """ ask every matcher whether it can serve such filter data :param filter_data: :return: """
for matcher_type, m in matchers.items(): if hasattr(m, 'can_handle') and m.can_handle(filter_data): filter_data = m.handle(filter_data) return filter_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(): """Run the examples"""
# NOTE(kiennt): Until now, this example isn't finished yet, # because we don't have any completed driver # Get a network client with openstack driver. network_client = client.Client(version=_VERSION, resource=_RESOURCES[0], provider=_PROVIDER) # net = network_client.create('daikk', '10.0.0.0/24') # list_subnet = network_client.list() # network_client.show(list_subnet[0].get("id")) network_client.delete("4b983028-0f8c-4b63-b10c-6e8420bb7903")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort(self, attr): """Sort the ratings based on an attribute"""
self.entries = Sorter(self.entries, self.category, attr).sort_entries() return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_title(self): """Title is either the chart header for a cable ratings page or above the opening description for a broadcast ratings page. """
if self.category == 'cable': strings = get_strings(self.soup, 'strong') else: strings = get_strings(self.soup, 'b') if len(strings) == 0: strings = get_strings(self.soup, 'strong') if len(strings) >= 1 and self.category == 'cable': return strings[0] elif len(strings) > 0 and 'Fast' in strings[-1]: return strings[0] return ''.join(strings)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_json(self): """Serialize ratings object as JSON-formatted string"""
ratings_dict = { 'category': self.category, 'date': self.date, 'day': self.weekday, 'next week': self.next_week, 'last week': self.last_week, 'entries': self.entries, 'url': self.url } return to_json(ratings_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_url_params(self, shorten=True): """Returns a list of each parameter to be used for the url format."""
cable = True if self.category == 'cable' else False url_date = convert_month(self.date, shorten=shorten, cable=cable) return [ BASE_URL, self.weekday.lower(), self.category + '-ratings', url_date.replace(' ', '-') ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _verify_page(self): """Verify the ratings page matches the correct date"""
title_date = self._get_date_in_title().lower() split_date = self.date.lower().split() split_date[0] = split_date[0][:3] return all(term in title_date for term in split_date)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_ratings_page(self): """Do a limited search for the correct url."""
# Use current posted date to build url self._build_url() soup = get_soup(self.url) if soup: return soup # Try building url again with unshortened month self._build_url(shorten=False) soup = get_soup(self.url) if soup: return soup # If not page is found, use search return SearchDaily(self.category, date=self.date).fetch_result()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_url(self, shorten=True): """Build the url for a cable ratings page"""
self.url = URL_FORMAT.format(*self._get_url_params(shorten=shorten))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_entries(self): """Fetch data and parse it to build a list of cable entries."""
data = [] for row in self.get_rows(): # Stop fetching data if limit has been met if exceeded_limit(self.limit, len(data)): break entry = row.find_all('td') entry_dict = {} show = entry[0].string net = entry[1].string if not self._match_query(show, net): continue entry_dict['show'] = show entry_dict['net'] = net entry_dict['time'] = entry[2].string if ',' in entry[3].string: entry_dict['viewers'] = entry[3].string.replace(',', '.') else: entry_dict['viewers'] = '0.' + entry[3].string entry_dict['rating'] = entry[4].string # Add data to create cable entry data.append(Entry(**entry_dict)) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_url(self, shorten=True): """Build the url for a broadcast ratings page"""
url_order = self._get_url_params(shorten=shorten) # For fast ratings, switch weekday and category in url if self.category != 'final': url_order[1], url_order[2] = url_order[2], url_order[1] self.url = URL_FORMAT.format(*url_order)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_rows(self): """Get the rows from a broadcast ratings chart"""
table = self.soup.find_all('tr')[1:-3] return [row for row in table if row.contents[3].string]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_entries(self): """Fetch data and parse it to build a list of broadcast entries."""
current_time = '' data = [] for row in self.get_rows(): # Stop fetching data if limit has been met if exceeded_limit(self.limit, len(data)): break entry = row.find_all('td') entry_dict = {} show_time = entry[0].string if show_time and show_time != current_time: current_time = show_time if not show_time: show_time = current_time entry_dict['time'] = show_time show_string = entry[1].string.split('(') show = show_string[0][:-1] net = self._get_net(show_string) if not self._match_query(show, net): continue entry_dict['show'] = show entry_dict['net'] = net entry_dict['viewers'] = entry[3].string.strip('*') entry_dict['rating'], entry_dict['share'] = self._get_rating(entry) # Add data to initialize broadcast entry data.append(Entry(**entry_dict)) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_averages(self): """Get the broadcast network averages for that day. Returns a dictionary: key: network name value: sub-dictionary with 'viewers', 'rating', and 'share' as keys """
networks = [unescape_html(n.string) for n in self.soup.find_all('td', width='77')] table = self.soup.find_all('td', style=re.compile('^font')) # Each element is a list split as [rating, share] rateshares = [r.string.split('/') for r in table[:5] if r.string] viewers = [v.string for v in table[5:] if v.string] averages = {} # Load the averages dict for index, network in enumerate(networks): viewer = convert_float(unescape_html(viewers[index])) rating = convert_float(unescape_html(rateshares[index][0])) share = convert_float(unescape_html(rateshares[index][1])) averages[network] = {'viewer': viewer, 'rating': rating, 'share': share} return averages
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_net(self, entry): """Get the network for a specific row"""
try: net = entry[1] return net[net.find('(')+1:net.find(')')] except IndexError: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_rating(self, entry): """Get the rating and share for a specific row"""
r_info = '' for string in entry[2].strings: r_info += string rating, share = r_info.split('/') return (rating, share.strip('*'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _visit(self, L, marked, tempmarked): """ Sort features topologically. This recursive function uses depth-first search to find an ordering of the features in the feature graph that is sorted both topologically and with respect to genome coordinates. Implementation based on Wikipedia's description of the algorithm in Cormen's *Introduction to Algorithms*. http://en.wikipedia.org/wiki/Topological_sorting#Algorithms There are potentially many valid topological sorts of a feature graph, but only one that is also sorted with respect to genome coordinates (excluding different orderings of, for example, exons and CDS features with the same coordinates). Iterating through feature children in reversed order (in this functions' inner-most loop) seems to be the key to sorting with respect to genome coordinates. """
assert not self.is_pseudo if self in tempmarked: raise Exception('feature graph is cyclic') if self not in marked: tempmarked[self] = True features = list() if self.siblings is not None and self.is_toplevel: features.extend(reversed(self.siblings)) if self.children is not None: features.extend(reversed(self.children)) if len(features) > 0: for feature in features: feature._visit(L, marked, tempmarked) marked[self] = True del tempmarked[self] L.insert(0, self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_child(self, child, rangecheck=False): """Add a child feature to this feature."""
assert self.seqid == child.seqid, \ ( 'seqid mismatch for feature {} ({} vs {})'.format( self.fid, self.seqid, child.seqid ) ) if rangecheck is True: assert self._strand == child._strand, \ ('child of feature {} has a different strand'.format(self.fid)) assert self._range.contains(child._range), \ ( 'child of feature {} is not contained within its span ' '({}-{})'.format(self.fid, child.start, child.end) ) if self.children is None: self.children = list() self.children.append(child) self.children.sort()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pseudoify(self): """ Derive a pseudo-feature parent from the given multi-feature. The provided multi-feature does not need to be the representative. The newly created pseudo-feature has the same seqid as the provided multi- feature, and spans its entire range. Otherwise, the pseudo-feature is empty. It is used only for convenience in sorting. """
assert self.is_toplevel assert self.is_multi assert len(self.multi_rep.siblings) > 0 rep = self.multi_rep start = min([s.start for s in rep.siblings + [rep]]) end = max([s.end for s in rep.siblings + [rep]]) parent = Feature(None) parent._pseudo = True parent._seqid = self._seqid parent.set_coord(start, end) parent._strand = self._strand for sibling in rep.siblings + [rep]: parent.add_child(sibling, rangecheck=True) parent.children = sorted(parent.children) rep.siblings = sorted(rep.siblings) return parent
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def slug(self): """ A concise slug for this feature. Unlike the internal representation, which is 0-based half-open, the slug is a 1-based closed interval (a la GFF3). """
return '{:s}@{:s}[{:d}, {:d}]'.format(self.type, self.seqid, self.start + 1, self.end)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_sibling(self, sibling): """ Designate this a multi-feature representative and add a co-feature. Some features exist discontinuously on the sequence, and therefore cannot be declared with a single GFF3 entry (which can encode only a single interval). The canonical encoding for these types of features is called a multi-feature, in which a single feature is declared on multiple lines with multiple entries all sharing the same feature type and ID attribute. This is commonly done with coding sequence (CDS) features. In this package, each multi-feature has a single "representative" feature object, and all other objects/entries associated with that multi-feature are attached to it as "siblings". Invoking this method will designate the calling feature as the multi-feature representative and add the argument as a sibling. """
assert self.is_pseudo is False if self.siblings is None: self.siblings = list() self.multi_rep = self sibling.multi_rep = self self.siblings.append(sibling)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def source(self, newsource): """When modifying source, also update children with matching source."""
oldsource = self.source for feature in self: if feature.source == oldsource: feature._source = newsource
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def type(self, newtype): """If the feature is a multifeature, update all entries."""
self._type = newtype if self.is_multi: for sibling in self.multi_rep.siblings: sibling._type = newtype
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform(self, offset, newseqid=None): """Transform the feature's coordinates by the given offset."""
for feature in self: feature._range.transform(offset) if newseqid is not None: feature.seqid = newseqid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_attribute(self, attrkey, attrvalue, append=False, oldvalue=None): """ Add an attribute to this feature. Feature attributes are stored as nested dictionaries. Each feature can only have one ID, so ID attribute mapping is 'string' to 'string'. All other attributes can have multiple values, so mapping is 'string' to 'dict of strings'. By default, adding an attribute that already exists will cause the old value to be overwritten. If the `append` option is true, the new attribute value will not overwrite the old value, but will be appended as a second value. (Note: ID attributes can have only 1 value.) If the `oldvalue` option is set, the new value will replace the old value. This is necessary for updating an attribute that has multiple values without completely overwriting all old values. (Note: The `append` option is ignored when `oldvalue` is set.) """
# Handle ID/Parent relationships if attrkey == 'ID': if self.children is not None: oldid = self.get_attribute('ID') for child in self.children: child.add_attribute('Parent', attrvalue, oldvalue=oldid) self._attrs[attrkey] = attrvalue if self.is_multi: self.multi_rep._attrs[attrkey] = attrvalue for sibling in self.multi_rep.siblings: sibling._attrs[attrkey] = attrvalue return # Handle all other attribute types if oldvalue is not None: if attrkey in self._attrs: assert oldvalue in self._attrs[attrkey] del self._attrs[attrkey][oldvalue] if attrkey not in self._attrs or append is False: self._attrs[attrkey] = dict() self._attrs[attrkey][attrvalue] = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_attribute(self, attrkey, as_string=False, as_list=False): """ Get the value of an attribute. By default, returns a string for ID and attributes with a single value, and a list of strings for attributes with multiple values. The `as_string` and `as_list` options can be used to force the function to return values as a string (comma-separated in case of multiple values) or a list. """
assert not as_string or not as_list if attrkey not in self._attrs: return None if attrkey == 'ID': return self._attrs[attrkey] attrvalues = list(self._attrs[attrkey]) attrvalues.sort() if len(attrvalues) == 1 and not as_list: return attrvalues[0] elif as_string: return ','.join(attrvalues) return attrvalues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_attributes(self, attrstring): """ Parse an attribute string. Given a string with semicolon-separated key-value pairs, populate a dictionary with the given attributes. """
if attrstring in [None, '', '.']: return dict() attributes = dict() keyvaluepairs = attrstring.split(';') for kvp in keyvaluepairs: if kvp == '': continue key, value = kvp.split('=') if key == 'ID': assert ',' not in value attributes[key] = value continue values = value.split(',') valdict = dict((val, True) for val in values) attributes[key] = valdict return attributes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attribute_crawl(self, key): """ Grab all attribute values associated with the given feature. Traverse the given feature (and all of its descendants) to find all values associated with the given attribute key. ['Ot01g00060', 'XM_003074019.1', 'XP_003074065.1'] ['Ot01g00070', 'XM_003074020.1', 'XP_003074066.1'] ['Ot01g00080', 'XM_003074021.1', 'XP_003074067.1'] ['Ot01g00090', 'XM_003074022.1', 'XP_003074068.1'] ['Ot01g00100', 'XM_003074023.1', 'XP_003074069.1'] ['Ot01g00110', 'XM_003074024.1', 'XP_003074070.1'] """
union = set() for feature in self: values = feature.get_attribute(key, as_list=True) if values is not None: union.update(set(values)) return union
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ncbi_geneid(self): """ Retrieve this feature's NCBI GeneID if it's present. NCBI GFF3 files contain gene IDs encoded in **Dbxref** attributes (example: `Dbxref=GeneID:103504972`). This function locates and returns the GeneID if present, or returns `None` otherwise. """
values = self.get_attribute('Dbxref', as_list=True) if values is None: return None for value in values: if value.startswith('GeneID:'): key, geneid = value.split(':') return geneid return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cdslen(self): """ Translated length of this feature. Undefined for non-mRNA features. """
if self.type != 'mRNA': return None return sum([len(c) for c in self.children if c.type == 'CDS'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_querystring(msg): 'parse a querystring into keys and values' for part in msg.querystring.strip().lstrip('?').split('&'): key, value = part.split('=') yield key, value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddClusterTags(r, tags, dry_run=False): """ Adds tags to the cluster. @type tags: list of str @param tags: tags to add to the cluster @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """
query = { "dry-run": dry_run, "tag": tags, } return r.request("put", "/2/tags", query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DeleteClusterTags(r, tags, dry_run=False): """ Deletes tags from the cluster. @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run """
query = { "dry-run": dry_run, "tag": tags, } return r.request("delete", "/2/tags", query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetInstances(r, bulk=False): """ Gets information about instances on the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or list of str @return: if bulk is True, info about the instances, else a list of instances """
if bulk: return r.request("get", "/2/instances", query={"bulk": 1}) else: instances = r.request("get", "/2/instances") return r.applier(itemgetters("id"), instances)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetInstanceInfo(r, instance, static=None): """ Gets information about an instance. @type instance: string @param instance: Instance name @rtype: string @return: Job ID """
if static is None: return r.request("get", "/2/instances/%s/info" % instance) else: return r.request("get", "/2/instances/%s/info" % instance, query={"static": static})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DeleteInstance(r, instance, dry_run=False): """ Deletes an instance. @type instance: str @param instance: the instance to delete @rtype: int @return: job id """
return r.request("delete", "/2/instances/%s" % instance, query={"dry-run": dry_run})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ActivateInstanceDisks(r, instance, ignore_size=False): """ Activates an instance's disks. @type instance: string @param instance: Instance name @type ignore_size: bool @param ignore_size: Whether to ignore recorded size @return: job id """
return r.request("put", "/2/instances/%s/activate-disks" % instance, query={"ignore_size": ignore_size})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RecreateInstanceDisks(r, instance, disks=None, nodes=None): """Recreate an instance's disks. @type instance: string @param instance: Instance name @type disks: list of int @param disks: List of disk indexes @type nodes: list of string @param nodes: New instance nodes, if relocation is desired @rtype: string @return: job id """
body = {} if disks is not None: body["disks"] = disks if nodes is not None: body["nodes"] = nodes return r.request("post", "/2/instances/%s/recreate-disks" % instance, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GrowInstanceDisk(r, instance, disk, amount, wait_for_sync=False): """ Grows a disk of an instance. More details for parameters can be found in the RAPI documentation. @type instance: string @param instance: Instance name @type disk: integer @param disk: Disk index @type amount: integer @param amount: Grow disk by this amount (MiB) @type wait_for_sync: bool @param wait_for_sync: Wait for disk to synchronize @rtype: int @return: job id """
body = { "amount": amount, "wait_for_sync": wait_for_sync, } return r.request("post", "/2/instances/%s/disk/%s/grow" % (instance, disk), content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddInstanceTags(r, instance, tags, dry_run=False): """ Adds tags to an instance. @type instance: str @param instance: instance to add tags to @type tags: list of str @param tags: tags to add to the instance @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """
query = { "tag": tags, "dry-run": dry_run, } return r.request("put", "/2/instances/%s/tags" % instance, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DeleteInstanceTags(r, instance, tags, dry_run=False): """ Deletes tags from an instance. @type instance: str @param instance: instance to delete tags from @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run """
query = { "tag": tags, "dry-run": dry_run, } return r.request("delete", "/2/instances/%s/tags" % instance, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RebootInstance(r, instance, reboot_type=None, ignore_secondaries=False, dry_run=False): """ Reboots an instance. @type instance: str @param instance: instance to rebot @type reboot_type: str @param reboot_type: one of: hard, soft, full @type ignore_secondaries: bool @param ignore_secondaries: if True, ignores errors for the secondary node while re-assembling disks (in hard-reboot mode only) @type dry_run: bool @param dry_run: whether to perform a dry run """
query = { "ignore_secondaries": ignore_secondaries, "dry-run": dry_run, } if reboot_type: if reboot_type not in ("hard", "soft", "full"): raise GanetiApiError("reboot_type must be one of 'hard'," " 'soft', or 'full'") query["type"] = reboot_type return r.request("post", "/2/instances/%s/reboot" % instance, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ShutdownInstance(r, instance, dry_run=False, no_remember=False, timeout=120): """ Shuts down an instance. @type instance: str @param instance: the instance to shut down @type dry_run: bool @param dry_run: whether to perform a dry run @type no_remember: bool @param no_remember: if true, will not record the state change @rtype: string @return: job id """
query = { "dry-run": dry_run, "no-remember": no_remember, } content = { "timeout": timeout, } return r.request("put", "/2/instances/%s/shutdown" % instance, query=query, content=content)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def StartupInstance(r, instance, dry_run=False, no_remember=False): """ Starts up an instance. @type instance: str @param instance: the instance to start up @type dry_run: bool @param dry_run: whether to perform a dry run @type no_remember: bool @param no_remember: if true, will not record the state change @rtype: string @return: job id """
query = { "dry-run": dry_run, "no-remember": no_remember, } return r.request("put", "/2/instances/%s/startup" % instance, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ReinstallInstance(r, instance, os=None, no_startup=False, osparams=None): """ Reinstalls an instance. @type instance: str @param instance: The instance to reinstall @type os: str or None @param os: The operating system to reinstall. If None, the instance's current operating system will be installed again @type no_startup: bool @param no_startup: Whether to start the instance automatically """
if INST_REINSTALL_REQV1 in r.features: body = { "start": not no_startup, } if os is not None: body["os"] = os if osparams is not None: body["osparams"] = osparams return r.request("post", "/2/instances/%s/reinstall" % instance, content=body) # Use old request format if osparams: raise GanetiApiError("Server does not support specifying OS" " parameters for instance reinstallation") query = { "nostartup": no_startup, } if os: query["os"] = os return r.request("post", "/2/instances/%s/reinstall" % instance, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ReplaceInstanceDisks(r, instance, disks=None, mode=REPLACE_DISK_AUTO, remote_node=None, iallocator=None, dry_run=False): """ Replaces disks on an instance. @type instance: str @param instance: instance whose disks to replace @type disks: list of ints @param disks: Indexes of disks to replace @type mode: str @param mode: replacement mode to use (defaults to replace_auto) @type remote_node: str or None @param remote_node: new secondary node to use (for use with replace_new_secondary mode) @type iallocator: str or None @param iallocator: instance allocator plugin to use (for use with replace_auto mode) @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """
if mode not in REPLACE_DISK: raise GanetiApiError("Invalid mode %r not one of %r" % (mode, REPLACE_DISK)) query = { "mode": mode, "dry-run": dry_run, } if disks: query["disks"] = ",".join(str(idx) for idx in disks) if remote_node: query["remote_node"] = remote_node if iallocator: query["iallocator"] = iallocator return r.request("post", "/2/instances/%s/replace-disks" % instance, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ExportInstance(r, instance, mode, destination, shutdown=None, remove_instance=None, x509_key_name=None, destination_x509_ca=None): """ Exports an instance. @type instance: string @param instance: Instance name @type mode: string @param mode: Export mode @rtype: string @return: Job ID """
body = { "destination": destination, "mode": mode, } if shutdown is not None: body["shutdown"] = shutdown if remove_instance is not None: body["remove_instance"] = remove_instance if x509_key_name is not None: body["x509_key_name"] = x509_key_name if destination_x509_ca is not None: body["destination_x509_ca"] = destination_x509_ca return r.request("put", "/2/instances/%s/export" % instance, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def MigrateInstance(r, instance, mode=None, cleanup=None): """ Migrates an instance. @type instance: string @param instance: Instance name @type mode: string @param mode: Migration mode @type cleanup: bool @param cleanup: Whether to clean up a previously failed migration """
body = {} if mode is not None: body["mode"] = mode if cleanup is not None: body["cleanup"] = cleanup return r.request("put", "/2/instances/%s/migrate" % instance, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def FailoverInstance(r, instance, iallocator=None, ignore_consistency=False, target_node=None): """Does a failover of an instance. @type instance: string @param instance: Instance name @type iallocator: string @param iallocator: Iallocator for deciding the target node for shared-storage instances @type ignore_consistency: bool @param ignore_consistency: Whether to ignore disk consistency @type target_node: string @param target_node: Target node for shared-storage instances @rtype: string @return: job id """
body = { "ignore_consistency": ignore_consistency, } if iallocator is not None: body["iallocator"] = iallocator if target_node is not None: body["target_node"] = target_node return r.request("put", "/2/instances/%s/failover" % instance, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RenameInstance(r, instance, new_name, ip_check, name_check=None): """ Changes the name of an instance. @type instance: string @param instance: Instance name @type new_name: string @param new_name: New instance name @type ip_check: bool @param ip_check: Whether to ensure instance's IP address is inactive @type name_check: bool @param name_check: Whether to ensure instance's name is resolvable """
body = { "ip_check": ip_check, "new_name": new_name, } if name_check is not None: body["name_check"] = name_check return r.request("put", "/2/instances/%s/rename" % instance, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def WaitForJobChange(r, job_id, fields, prev_job_info, prev_log_serial): """ Waits for job changes. @type job_id: int @param job_id: Job ID for which to wait """
body = { "fields": fields, "previous_job_info": prev_job_info, "previous_log_serial": prev_log_serial, } return r.request("get", "/2/jobs/%s/wait" % job_id, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CancelJob(r, job_id, dry_run=False): """ Cancels a job. @type job_id: int @param job_id: id of the job to delete @type dry_run: bool @param dry_run: whether to perform a dry run """
return r.request("delete", "/2/jobs/%s" % job_id, query={"dry-run": dry_run})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetNodes(r, bulk=False): """ Gets all nodes in the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or str @return: if bulk is true, info about nodes in the cluster, else list of nodes in the cluster """
if bulk: return r.request("get", "/2/nodes", query={"bulk": 1}) else: nodes = r.request("get", "/2/nodes") return r.applier(itemgetters("id"), nodes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def EvacuateNode(r, node, iallocator=None, remote_node=None, dry_run=False, early_release=False, mode=None, accept_old=False): """ Evacuates instances from a Ganeti node. @type node: str @param node: node to evacuate @type iallocator: str or None @param iallocator: instance allocator to use @type remote_node: str @param remote_node: node to evaucate to @type dry_run: bool @param dry_run: whether to perform a dry run @type early_release: bool @param early_release: whether to enable parallelization @type accept_old: bool @param accept_old: Whether caller is ready to accept old-style (pre-2.5) results @rtype: string, or a list for pre-2.5 results @return: Job ID or, if C{accept_old} is set and server is pre-2.5, list of (job ID, instance name, new secondary node); if dry_run was specified, then the actual move jobs were not submitted and the job IDs will be C{None} @raises GanetiApiError: if an iallocator and remote_node are both specified """
if iallocator and remote_node: raise GanetiApiError("Only one of iallocator or remote_node can" " be used") query = { "dry-run": dry_run, } if iallocator: query["iallocator"] = iallocator if remote_node: query["remote_node"] = remote_node if NODE_EVAC_RES1 in r.features: # Server supports body parameters body = { "early_release": early_release, } if iallocator is not None: body["iallocator"] = iallocator if remote_node is not None: body["remote_node"] = remote_node if mode is not None: body["mode"] = mode else: # Pre-2.5 request format body = None if not accept_old: raise GanetiApiError("Server is version 2.4 or earlier and" " caller does not accept old-style" " results (parameter accept_old)") # Pre-2.5 servers can only evacuate secondaries if mode is not None and mode != NODE_EVAC_SEC: raise GanetiApiError("Server can only evacuate secondary instances") if iallocator is not None: query["iallocator"] = iallocator if remote_node is not None: query["remote_node"] = remote_node if query: query["early_release"] = 1 return r.request("post", "/2/nodes/%s/evacuate" % node, query=query, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def MigrateNode(r, node, mode=None, dry_run=False, iallocator=None, target_node=None): """ Migrates all primary instances from a node. @type node: str @param node: node to migrate @type mode: string @param mode: if passed, it will overwrite the live migration type, otherwise the hypervisor default will be used @type dry_run: bool @param dry_run: whether to perform a dry run @type iallocator: string @param iallocator: instance allocator to use @type target_node: string @param target_node: Target node for shared-storage instances @rtype: int @return: job id """
query = { "dry-run": dry_run, } if NODE_MIGRATE_REQV1 in r.features: body = {} if mode is not None: body["mode"] = mode if iallocator is not None: body["iallocator"] = iallocator if target_node is not None: body["target_node"] = target_node else: # Use old request format if target_node is not None: raise GanetiApiError("Server does not support specifying" " target node for node migration") body = None if mode is not None: query["mode"] = mode return r.request("post", "/2/nodes/%s/migrate" % node, query=query, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def SetNodeRole(r, node, role, force=False, auto_promote=False): """ Sets the role for a node. @type node: str @param node: the node whose role to set @type role: str @param role: the role to set for the node @type force: bool @param force: whether to force the role change @type auto_promote: bool @param auto_promote: Whether node(s) should be promoted to master candidate if necessary @rtype: int @return: job id """
query = { "force": force, "auto_promote": auto_promote, } return r.request("put", "/2/nodes/%s/role" % node, query=query, content=role)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def PowercycleNode(r, node, force=False): """ Powercycles a node. @type node: string @param node: Node name @type force: bool @param force: Whether to force the operation @rtype: string @return: job id """
query = { "force": force, } return r.request("post", "/2/nodes/%s/powercycle" % node, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetNodeStorageUnits(r, node, storage_type, output_fields): """ Gets the storage units for a node. @type node: str @param node: the node whose storage units to return @type storage_type: str @param storage_type: storage type whose units to return @type output_fields: str @param output_fields: storage type fields to return @rtype: int @return: job id where results can be retrieved """
query = { "storage_type": storage_type, "output_fields": output_fields, } return r.request("get", "/2/nodes/%s/storage" % node, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ModifyNodeStorageUnits(r, node, storage_type, name, allocatable=None): """ Modifies parameters of storage units on the node. @type node: str @param node: node whose storage units to modify @type storage_type: str @param storage_type: storage type whose units to modify @type name: str @param name: name of the storage unit @type allocatable: bool or None @param allocatable: Whether to set the "allocatable" flag on the storage unit (None=no modification, True=set, False=unset) @rtype: int @return: job id """
query = { "storage_type": storage_type, "name": name, } if allocatable is not None: query["allocatable"] = allocatable return r.request("put", "/2/nodes/%s/storage/modify" % node, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RepairNodeStorageUnits(r, node, storage_type, name): """ Repairs a storage unit on the node. @type node: str @param node: node whose storage units to repair @type storage_type: str @param storage_type: storage type to repair @type name: str @param name: name of the storage unit to repair @rtype: int @return: job id """
query = { "storage_type": storage_type, "name": name, } return r.request("put", "/2/nodes/%s/storage/repair" % node, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddNodeTags(r, node, tags, dry_run=False): """ Adds tags to a node. @type node: str @param node: node to add tags to @type tags: list of str @param tags: tags to add to the node @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """
query = { "tag": tags, "dry-run": dry_run, } return r.request("put", "/2/nodes/%s/tags" % node, query=query, content=tags)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DeleteNodeTags(r, node, tags, dry_run=False): """ Delete tags from a node. @type node: str @param node: node to remove tags from @type tags: list of str @param tags: tags to remove from the node @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """
query = { "tag": tags, "dry-run": dry_run, } return r.request("delete", "/2/nodes/%s/tags" % node, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetGroups(r, bulk=False): """ Gets all node groups in the cluster. @type bulk: bool @param bulk: whether to return all information about the groups @rtype: list of dict or str @return: if bulk is true, a list of dictionaries with info about all node groups in the cluster, else a list of names of those node groups """
if bulk: return r.request("get", "/2/groups", query={"bulk": 1}) else: groups = r.request("get", "/2/groups") return r.applier(itemgetters("name"), groups)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CreateGroup(r, name, alloc_policy=None, dry_run=False): """ Creates a new node group. @type name: str @param name: the name of node group to create @type alloc_policy: str @param alloc_policy: the desired allocation policy for the group, if any @type dry_run: bool @param dry_run: whether to peform a dry run @rtype: int @return: job id """
query = { "dry-run": dry_run, } body = { "name": name, "alloc_policy": alloc_policy } return r.request("post", "/2/groups", query=query, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DeleteGroup(r, group, dry_run=False): """ Deletes a node group. @type group: str @param group: the node group to delete @type dry_run: bool @param dry_run: whether to peform a dry run @rtype: int @return: job id """
query = { "dry-run": dry_run, } return r.request("delete", "/2/groups/%s" % group, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RenameGroup(r, group, new_name): """ Changes the name of a node group. @type group: string @param group: Node group name @type new_name: string @param new_name: New node group name @rtype: int @return: job id """
body = { "new_name": new_name, } return r.request("put", "/2/groups/%s/rename" % group, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AssignGroupNodes(r, group, nodes, force=False, dry_run=False): """ Assigns nodes to a group. @type group: string @param group: Node gropu name @type nodes: list of strings @param nodes: List of nodes to assign to the group @rtype: int @return: job id """
query = { "force": force, "dry-run": dry_run, } body = { "nodes": nodes, } return r.request("put", "/2/groups/%s/assign-nodes" % group, query=query, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddGroupTags(r, group, tags, dry_run=False): """ Adds tags to a node group. @type group: str @param group: group to add tags to @type tags: list of string @param tags: tags to add to the group @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: string @return: job id """
query = { "dry-run": dry_run, "tag": tags, } return r.request("put", "/2/groups/%s/tags" % group, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DeleteGroupTags(r, group, tags, dry_run=False): """ Deletes tags from a node group. @type group: str @param group: group to delete tags from @type tags: list of string @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: string @return: job id """
query = { "dry-run": dry_run, "tag": tags, } return r.request("delete", "/2/groups/%s/tags" % group, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Query(r, what, fields, qfilter=None): """ Retrieves information about resources. @type what: string @param what: Resource name, one of L{constants.QR_VIA_RAPI} @type fields: list of string @param fields: Requested fields @type qfilter: None or list @param qfilter: Query filter @rtype: string @return: job id """
body = { "fields": fields, } if qfilter is not None: body["qfilter"] = body["filter"] = qfilter return r.request("put", "/2/query/%s" % what, content=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def QueryFields(r, what, fields=None): """ Retrieves available fields for a resource. @type what: string @param what: Resource name, one of L{constants.QR_VIA_RAPI} @type fields: list of string @param fields: Requested fields @rtype: string @return: job id """
query = {} if fields is not None: query["fields"] = ",".join(fields) return r.request("get", "/2/query/%s/fields" % what, query=query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def createalphabet(alphabetinput=None): """ Creates a sample alphabet containing printable ASCII characters """
if alphabetinput and os.path.isfile(alphabetinput): return _load_alphabet(alphabetinput) elif alphabetinput: alpha = [] setlist = alphabetinput.split(',') for alphaset in setlist: a = int(alphaset.split('-')[0]) b = int(alphaset.split('-')[1]) for i in range(a, b): alpha.append(str(unichr(i))) return alpha alpha = [] for i in range(32, 127): alpha.append(str(unichr(i))) return alpha
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _instant_search(self): """Determine possible keys after a push or pop """
_keys = [] for k,v in self.searchables.iteritems(): if self.string in v: _keys.append(k) self.candidates.append(_keys)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def best_guess(self): """Return the gnomekeyring position of the closest matching """
best_guess_ever = (0, 0) # (key, string) points = defaultdict(float) points[0] = 0 if len(self.string) > 0: for key in self.candidate_keys: guess = self.searchables[key] if guess == self.string: points[key] += 100 break # skip, entry longer then guess if len(self.string) > len(guess): continue # begins with if guess.startswith(self.string): points[key] += 1 # contained in if self.string in guess: points[key] += 1 # percentage of user search string in best guess if points[key] > 0: points[key] += float(len(self.string))/len(guess) for k,v in points.iteritems(): if points[best_guess_ever[0]] < points[k]: best_guess_ever = (k, self.searchables[k]) return best_guess_ever
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_html_files(self, destination): """ Finds all html files in the given destination. """
for root, dirs, files in os.walk(destination): for f in files: if f.endswith('.html'): yield os.path.join(root, f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def minify_file(self, target): """ Minifies the target html file. """
html = open(target, 'rb').read() enc = chardet.detect(html)['encoding'] with codecs.open(target, 'r+', enc) as f: result = htmlmin.minify(f.read(), **self.options) f.seek(0) f.write(result) f.truncate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def on_after_build_all(self, builder, **extra): """ after-build-all lektor event """
# NOTE(vesuvium): compatibility for lektor 2.X and 3.X try: is_enabled = self.is_enabled(builder.build_flags) except AttributeError: is_enabled = self.is_enabled(builder.extra_flags) if not is_enabled: return reporter.report_generic('Starting HTML minification') for htmlfile in self.find_html_files(builder.destination_path): self.minify_file(htmlfile) reporter.report_generic('HTML minification finished')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def features(entrystream, type=None, traverse=False): """ Pull features out of the specified entry stream. :param entrystream: a stream of entries :param type: retrieve only features of the specified type; set to :code:`None` to retrieve all features :param traverse: by default, only top-level features are selected; set to :code:`True` to search each feature graph for the specified feature type """
for feature in entry_type_filter(entrystream, tag.Feature): if traverse: if type is None: message = 'cannot traverse without a specific feature type' raise ValueError(message) if type == feature.type: yield feature else: for subfeature in feature: if type == subfeature.type: yield subfeature else: if not type or type == feature.type: yield feature
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def window(featurestream, seqid, start=None, end=None, strict=True): """ Pull features out of the designated genomic interval. This function uses 0-based half-open intervals, not the 1-based closed intervals used by GFF3. :param featurestream: a stream of feature entries :param seqid: ID of the sequence from which to select features :param start: start of the genomic interval :param end: end of the genomic interval :param strict: when set to :code:`True`, only features completely contained within the interval are selected; when set to :code:`False`, any feature overlapping the interval is selected """
region = None if start and end: region = tag.Range(start, end) for feature in featurestream: if feature.seqid != seqid: continue if region: if strict: if region.contains(feature._range): yield feature else: if region.overlap(feature._range): yield feature else: yield feature
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def directives(entrystream, type=None): """ Pull directives out of the specified entry stream. :param entrystream: a stream of entries :param type: retrieve only directives of the specified type; set to :code:`None` to retrieve all directives """
for directive in entry_type_filter(entrystream, tag.Directive): if not type or type == directive.type: yield directive
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_driver(f): """Check driver on"""
def check_driver(request): drivers = get_all_driver() drivers = filter(drivers, request) if drivers: return f(request, drivers) else: raise Exception('Driver is not found') return check_driver
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def strings_to_integers(strings: Iterable[str]) -> Iterable[int]: """ Convert a list of strings to a list of integers. :param strings: a list of string :return: a list of converted integers .. doctest:: [1, 1, 0] """
return strings_to_(strings, lambda x: int(float(x)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def string_to_double_precision_float(s: str) -> float: """ Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float by Python ``float`` function, so I wrote this function to help conversion. For example, :param s: a string denoting a double precision number :return: a Python floating point number .. doctest:: 1e-82 1e-82 8e+233 8e+233 """
first, second, exponential = re.match( "(-?\d*)\.?(-?\d*)d(-?\d+)", s, re.IGNORECASE).groups() return float(first + '.' + second + 'e' + exponential)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def string_to_general_float(s: str) -> float: """ Convert a string to corresponding single or double precision scientific number. :param s: a string could be '0.1', '1e-5', '1.0D-5', or any other validated number :return: a float or raise an error .. doctest:: 1e-05 Traceback (most recent call last): ValueError: The string '1Dx' does not corresponds to a double precision number! 8e+233 0.1 """
if 'D' in s.upper(): # Possible double precision number try: return string_to_double_precision_float(s) except ValueError: raise ValueError( "The string '{0}' does not corresponds to a double precision number!".format(s)) else: return float(s)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_one_string(pattern: str, s: str, *args): """ Make sure you know only none or one string will be matched! If you are not sure, use `match_one_pattern` instead. :param pattern: :param s: :param args: :return: .. doctest:: 123 Pattern "\d+" not found, or more than one found in string abc! None Pattern "\d+" not found, or more than one found in string abc 123 def 456! None """
try: # `match` is either an empty list or a list of string. match, = re.findall(pattern, s) if len(args) == 0: # If no wrapper argument is given, return directly the matched string return match elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match wrapper, = args return wrapper(match) else: raise TypeError( 'Multiple wrappers are given! Only one should be given!') except ValueError: print("Pattern \"{0}\" not found, or more than one found in string {1}!".format( pattern, s))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_one_pattern(pattern: str, s: str, *args: Optional[Callable], **flags): """ Find a pattern in a certain string. If found and a wrapper is given, then return the wrapped matched-string; if no wrapper is given, return the pure matched string. If no match is found, return None. :param pattern: a pattern, can be a string or a regular expression :param s: a string :param args: at most 1 argument can be given :param flags: the same flags as ``re.findall``'s :return: .. doctest:: ['123', '456'] [123, 456] ['123'] Pattern "s" not found in string abc! None ['S', 's'] """
match: Optional[List[str]] = re.findall(pattern, s, **flags) # `match` is either an empty list or a list of strings. if match: if len(args) == 0: # If no wrapper argument is given, return directly the matched string return match elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match wrapper, = args return [wrapper(m) for m in match] else: raise TypeError( 'Multiple wrappers are given! Only one should be given!') else: # If no match is found print("Pattern \"{0}\" not found in string {1}!".format(pattern, s)) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_string_like(iterable: Iterable[object]) -> bool: """ If any element of an iterable is not a string, return `True`. :param iterable: Can be a set, a tuple, a list, etc. :return: Whether any element of an iterable is not a string. .. doctest:: False True """
return all(is_string_like(_) for _ in iterable)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def source_filename(self, docname: str, srcdir: str): """ Get the full filename to referenced image """
docpath = Path(srcdir, docname) parent = docpath.parent imgpath = parent.joinpath(self.filename) # Does this exist? if not imgpath.exists(): msg = f'Image does not exist at "{imgpath}"' raise SphinxError(msg) return imgpath
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def env_updated(self, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, resource ): """ Make images and enter them in Sphinx's output writer """
docname = resource.docname srcdir = sphinx_app.env.srcdir source_imgpath = self.source_filename(docname, srcdir) # Copy the image to the Sphinx build directory build_dir = sphinx_app.outdir docpath = Path(docname) parent = docpath.parent target_imgpath = str(Path(build_dir, parent, self.filename)) # Does the target dir exist yet in the build dir? Probably not. If # not, make it target_dir = Path(build_dir, parent) if not target_dir.exists(): target_dir.mkdir(parents=True, exist_ok=True) shutil.copy(source_imgpath, target_imgpath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def catalog(self, table='', column=''): """Lookup the values available for querying."""
lookup_table = self.lookup_table if lookup_table is not None: if table: if column: column = column.upper() return lookup_table[table][column] return lookup_table[table] # Show what methods are available. return self.lookup_methods return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_call(self, table, column='', value='', **kwargs): """Internal method to resolve the API wrapper call."""
if not column: return self.catalog(table) elif not value: return self.catalog(table, column) # We have all the table, column, and value, and now need to # ensure they're all strings and uppercase. column = column.upper() value = str(value).upper() data = self.call_api(table, column, value, **kwargs) if isinstance(data, dict): # Data is actually the first value. data = data.values()[0] return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def call_api(self, table, column, value, **kwargs): """Exposed method to connect and query the EPA's API."""
try: output_format = kwargs.pop('output_format') except KeyError: output_format = self.output_format url_list = [self.base_url, table, column, quote(value), 'rows'] rows_count = self._number_of_rows(**kwargs) url_list.append(rows_count) url_string = '/'.join(url_list) xml_data = urlopen(url_string).read() data = self._format_data(output_format, xml_data) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _number_of_rows(self, start=0, count=100, **kwargs): """Internal method to format the number of rows the EPA API returns."""
first = str(start) last = str(start + count) string_format = ':'.join([first, last]) return string_format
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resource_references(self, resource) -> Mapping[str, List[Any]]: """ Resolve and return reference resources pointed to by object Fields in resource.props can flag that they are references by using the references type. This method scans the model, finds any fields that are references, and returns the reference resources pointed to by those references. Note that we shouldn't get to the point of dangling references. Our custom Sphinx event should raise a references error during the build process (though maybe it is just a warning?) """
references = dict() for reference_label in resource.props.references: references[reference_label] = [] # Iterate over each value on this field, e.g. # tags: tag1, tag2, tag3 for target_label in resource.props.references.get(reference_label): # Ask the site to get the object target = self.get_reference(reference_label, target_label) references[reference_label].append(target) return references
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self, retry_limit=None): """ Try to connect to Twitter's streaming API. :param retry_limit: The maximum number of retries in case of failures. Default is None (unlimited) :raises :class:`~tweepy.error.TweepyError`: If there's some critical API error """
# Run tweepy stream wrapper_listener = TweepyWrapperListener(listener=self.listener) stream = tweepy.Stream(auth=self.client.tweepy_api.auth, listener=wrapper_listener) retry_counter = 0 while retry_limit is None or retry_counter <= retry_limit: try: retry_counter += 1 if not self.client.config.get('user_stream'): logging.info('Listening to public stream') stream.filter(follow=self.filter.follow, track=self.filter.track) else: if self.filter.follow: logging.warning('Follow filters won\'t be used in user stream') logging.info('Listening to user stream') stream.userstream(track=self.filter.track) except AttributeError as e: # Known Tweepy's issue https://github.com/tweepy/tweepy/issues/576 if "'NoneType' object has no attribute 'strip'" in str(e): pass else: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _findProteinClusters(protToPeps, pepToProts): """Find protein clusters in the specified protein to peptide mappings. A protein cluster is a group of proteins that are somehow directly or indirectly connected by shared peptides. :param protToPeps: dict, for each protein (=key) contains a set of :param pepToProts: dict, for each peptide (=key) contains a set of parent :returns: a list of protein clusters, each cluster is a set of proteins """
clusters = list() resolvingProteins = set(protToPeps) while resolvingProteins: protein = resolvingProteins.pop() proteinCluster = set([protein]) peptides = set(protToPeps[protein]) parsedPeptides = set() while len(peptides) != len(parsedPeptides): for peptide in peptides: proteinCluster.update(pepToProts[peptide]) parsedPeptides.update(peptides) for protein in proteinCluster: peptides.update(protToPeps[protein]) clusters.append(proteinCluster) resolvingProteins = resolvingProteins.difference(proteinCluster) return clusters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _findSamesetProteins(protToPeps, proteins=None): """Find proteins that are mapped to an identical set of peptides. :param protToPeps: dict, for each protein (=key) contains a set of :param proteins: iterable, proteins that are tested for having equal evidence. If not specified all proteins are tested :returns: a list of sorted protein tuples that share equal peptide evidence """
proteins = viewkeys(protToPeps) if proteins is None else proteins equalEvidence = ddict(set) for protein in proteins: peptides = protToPeps[protein] equalEvidence[tuple(sorted(peptides))].add(protein) equalProteins = list() for proteins in viewvalues(equalEvidence): if len(proteins) > 1: equalProteins.append(tuple(sorted(proteins))) return equalProteins