text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_keys(row, line_num): """ Perform some sanity checks on they keys Each key in the row should not be named None cause (that's an overrun). A key named `type` MUST be present on the row & have a string value. :param row: dict :param line_num: int """
link = 'tools.ietf.org/html/rfc4180#section-2' none_keys = [key for key in row.keys() if key is None] if none_keys: fail('You have more fields defined on row number {} ' 'than field headers in your CSV data. Please fix ' 'your request body.'.format(line_num), link) elif not row.get('type'): fail('Row number {} does not have a type value defined. ' 'Please fix your request body.'.format(line_num), link)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_field_headers(reader): """ Perform some validations on the CSV headers A `type` field header must be present & all field headers must be strings. :param reader: csv reader object """
link = 'tools.ietf.org/html/rfc4180#section-2' for field in reader.fieldnames: if not isinstance(field, str): fail('All headers in your CSV payload must be ' 'strings.', link) if 'type' not in reader.fieldnames: fail('A type header must be present in your CSV ' 'payload.', link)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __create_xml_request(self, text): """ make xml content from given text """
# create base stucture soap_root = ET.Element('soap:Envelope', { 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema', 'xmlns:soap': 'http://schemas.xmlsoap.org/soap/envelope/', }) body = ET.SubElement(soap_root, 'soap:Body') process_text = ET.SubElement(body, 'ProcessText', { 'xmlns': 'http://typograf.artlebedev.ru/webservices/', }) # add contents ET.SubElement(process_text, 'text').text = text ET.SubElement(process_text, 'entityType').text = str(self._entityType) ET.SubElement(process_text, 'useBr').text = str(self._useBr) ET.SubElement(process_text, 'useP').text = str(self._useP) ET.SubElement(process_text, 'maxNobr').text = str(self._maxNobr) # create tree and write it string = Container() soap = ET.ElementTree(soap_root) soap.write(string, encoding=self._encoding, xml_declaration=True) if PY3: return string.getvalue().decode(self._encoding) return string.getvalue()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __parse_xml_response(self, response): """ parse response and get text result """
# get xml from response xml_response = response[response.find('<?xml'):].replace(' encoding=""', '') xml_content = xml.dom.minidom.parseString(xml_response) return xml_content.getElementsByTagName('ProcessTextResult')[0].firstChild.nodeValue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_text(self, text): """ send request with given text and get result """
# escape base char text = text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') # make xml request body soap_body = self.__create_xml_request(text) # make total request length = len(soap_body.encode('UTF-8')) if PY3 else len(soap_body) soap_request = self.SOAP_REQUEST.format( length=length, host=self.HOST, content=soap_body) if PY3: # convert to bytes soap_request = soap_request.encode(self._encoding) # send request use soket connector = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connector.settimeout(self._timeout) connector.connect((self.HOST, 80)) connector.sendall(soap_request) # call for response response = b'' buf = '0' while len(buf): buf = connector.recv(8192) response += buf connector.close() if PY3: # convert to str response = response.decode() # parse response text_result = self.__parse_xml_response(response) # back replace and return return text_result.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def try_process_text(self, text): """ safe process text if error - return not modifyed text """
if not text: return text try: return self.process_text(text) except (socket.gaierror, socket.timeout, xml.parsers.expat.ExpatError): return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _set_align(self, orientation, value): '''We define a setter because it's better to diagnose this kind of programmatic error here than have to work out why alignment is odd when we sliently fail! ''' orientation_letter = orientation[0] possible_alignments = getattr( self, '_possible_{}aligns'.format(orientation_letter)) all_alignments = getattr( self, '_all_{}aligns'.format(orientation_letter)) if value not in possible_alignments: if value in all_alignments: msg = 'non-permitted' else: msg = 'non-existant' raise ValueError( "Can't set {} {} alignment {!r} on element {!r}".format( msg, orientation, value, self)) setattr(self, '_{}align'.format(orientation_letter), value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _populate_lines(self, block, terminal, styles, default_esc_seq): '''Takes some lines to draw to the terminal, which may contain formatting placeholder objects, and inserts the appropriate concrete escapes sequences by using data from the terminal object and styles dictionary. ''' for line in block: if hasattr(line, 'populate'): line = line.populate(terminal, styles, default_esc_seq) else: line = default_esc_seq + line yield line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ascii_find_urls(bytes, mimetype, extra_tokens=True): """ This function finds URLs inside of ASCII bytes. """
tokens = _tokenize(bytes, mimetype, extra_tokens=extra_tokens) return tokens
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pdf_find_urls(bytes, mimetype): """ This function finds URLs inside of PDF bytes. """
# Start with only the ASCII bytes. Limit it to 12+ character strings. try: ascii_bytes = b' '.join(re.compile(b'[\x00\x09\x0A\x0D\x20-\x7E]{12,}').findall(bytes)) ascii_bytes = ascii_bytes.replace(b'\x00', b'') except: return [] urls = [] # Find the embedded text sandwiched between [ ] embedded_text = set(re.compile(b'(\[(\([\x20-\x27\x2A-\x7E]{1,3}\)[\-\d]*){5,}\])').findall(ascii_bytes)) # Get the text inside the parentheses. This catches URLs embedded in the text of the PDF that don't # use the normal "URI/URI()>>" method. for match in embedded_text: text = match[0] parentheses_text = b''.join(re.compile(b'\((.*?)\)').findall(text)) urls.append(parentheses_text) # Find any URLs that use the "URI/URI()>>" method. urls += re.compile(b'\/URI\s*\((.*?)\)\s*>>').findall(ascii_bytes) if urls: # PDF URLs escape certain characters. We want to remove any of the escapes (backslashes) # from the URLs so that we get the original URL. urls = [u.replace(b'\\', b'') for u in urls] return urls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid(url, fix=True): """ Returns True if this is what we consider to be a valid URL. A valid URL has: * http OR https scheme * a valid TLD If there is no scheme, it will check the URL assuming the scheme is http. Returns False if the URL is not valid. """
try: # Convert the url to a string if we were given it as bytes. if isinstance(url, bytes): url = url.decode('ascii', errors='replace') # Hacky way to deal with URLs that have a username:password notation. user_pass_url = '' # Check for no scheme and assume http. split_url = urlsplit(url) # If there is no scheme, there is a higher chance that this might not actually be a URL. # For example, it might be something that resembles a URL that got pulled out of random bytes. # As such, we can probably safely exclude URLs that have unusual characters in them. if not split_url.scheme: invalid_chars = ['\''] if any(c in url for c in invalid_chars): return False # Append the http scheme to the URL if it doesn't have any scheme. if fix and not split_url.scheme: split_url = urlsplit('http://{}'.format(url)) # Check for the edge case of results returned by find_urls, such as google.com URLs # like: http://google.com#default#userData if split_url.netloc and not split_url.path and split_url.fragment: return False # Check if the netloc has a ':' in it, which indicates that # there is a port number specified. We need to remove that in order # to properly check if it is a valid IP address. if ':' in split_url.netloc: netloc = split_url.netloc.split(':')[0] else: netloc = split_url.netloc # Make sure the URL doesn't have a \ character in it. if '\\' in url: return False # Some quick and dirty tests to detect invalid characters from different parts of the URL. # Domain names need to have only: a-z, 0-9, -, and . But due to how urlsplit works, they # might also contain : and @ if there is a user/pass or port number specified. if re.compile(r'([^a-zA-Z0-9\-\.\:\@]+)').findall(split_url.netloc): return False # Check if the valid URL conditions are now met. if split_url.scheme == 'http' or split_url.scheme == 'https' or split_url.scheme == 'ftp': # Look for the edge case of the URL having a username:password notation. if ':' in split_url.netloc and '@' in split_url.netloc: user_pass = re.compile(r'(.*?:.*?@)').findall(split_url.netloc)[0] user_pass_url = url.replace(user_pass, '') split_url = urlsplit(user_pass_url) netloc = split_url.netloc # Check the netloc. Check if it is an IP address. try: ipaddress.ip_address(netloc) return True # If we got an exception, it must be a domain name. except: # Hacky way to out which version of the URL we need to check. if user_pass_url: url_to_check = user_pass_url else: url_to_check = url # Hacky way to deal with FTP URLs since the tld package cannot handle them. if split_url.scheme == 'ftp': url_to_check = url_to_check.replace('ftp', 'http') # Check the URL for a valid TLD. res = get_tld(url_to_check, fix_protocol=True, as_object=True) # The tld package likes to consider single words (like "is") as a valid domain. To fix this, # we want to only consider it a valid URL if there is actually a suffix. Additionally, to weed # out "URLs" that are probably e-mail addresses or other garbage, we do not want to consider # anything that has invalid characters in it. if res.fld and res.tld and res.domain: if all(ord(c) == 45 or ord(c) == 46 or (48 <= ord(c) <= 57) or (65 <= ord(c) <= 90) or (97 <= ord(c) <= 122) for c in netloc): # Finally, check if all of the characters in the URL are ASCII. if all(32 <= ord(c) <= 126 for c in url): return True # Return False by default. return False except: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_to_one(self, value): """ Check if the to_one should exist & casts properly """
if value.rid and self.typeness is int: validators.validate_int(value) if value.rid and not self.skip_exists: if not value.load(): raise ValidationError(self.messages['exists']) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_body(self): """ Return a HTTPStatus compliant body attribute Be sure to purge any unallowed properties from the object. TIP: At the risk of being a bit slow we copy the errors instead of mutating them since they may have key/vals like headers that are useful elsewhere. """
body = copy.deepcopy(self.errors) for error in body: for key in error.keys(): if key not in self.ERROR_OBJECT_FIELDS: del error[key] return json.dumps({'errors': body})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_headers(self): """ Return a HTTPStatus compliant headers attribute FIX: duplicate headers will collide terribly! """
headers = {'Content-Type': goldman.JSON_MIMETYPE} for error in self.errors: if 'headers' in error: headers.update(error['headers']) return headers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_status(self): """ Return a HTTPStatus compliant status attribute Per the JSON API spec errors could have different status codes & a generic one should be chosen in these conditions for the actual HTTP response code. """
codes = [error['status'] for error in self.errors] same = all(code == codes[0] for code in codes) if not same and codes[0].startswith('4'): return falcon.HTTP_400 elif not same and codes[0].startswith('5'): return falcon.HTTP_500 else: return codes[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize(self, parts): """ Invoke the RFC 2388 spec compliant normalizer :param parts: the already vetted & parsed FieldStorage objects :return: normalized dict """
part = parts.list[0] return { 'content': part.file.read(), 'content-type': part.type, 'file-ext': extensions.get(part.type), 'file-name': part.filename, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, mimetypes): """ Invoke the RFC 2388 spec compliant parser """
self._parse_top_level_content_type() link = 'tools.ietf.org/html/rfc2388' parts = cgi.FieldStorage( fp=self.req.stream, environ=self.req.env, ) if not parts: self.fail('A payload in the body of your request is required ' '& must be encapsulated by the boundary with proper ' 'headers according to RFC 2388', link) elif len(parts) > 1: self.fail('Currently, only 1 upload at a time is allowed. Please ' 'break up your request into %s individual requests & ' 'retry' % len(parts), link) else: self._parse_part(parts.list[0], mimetypes) return parts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def covered_interval(bin): """ Given a bin number `bin`, return the interval covered by this bin. :arg int bin: Bin number. :return: Tuple of `start, stop` being the zero-based, open-ended interval covered by `bin`. :rtype: tuple(int) :raise OutOfRangeError: If bin number `bin` exceeds the maximum bin number. """
if bin < 0 or bin > MAX_BIN: raise OutOfRangeError( 'Invalid bin number %d (maximum bin number is %d)' % (bin, MAX_BIN)) shift = SHIFT_FIRST for offset in BIN_OFFSETS: if offset <= bin: return bin - offset << shift, bin + 1 - offset << shift shift += SHIFT_NEXT
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stitchModules(module, fallbackModule): """ complete missing attributes with those in fallbackModule imagine you have 2 modules: a and b a is some kind of an individualised module of b - but will maybe not contain all attributes of b. in this case a should use the attributes from b # what we now want is to all all missing attributes from b to a: individual 1 standard 2 """
for name, attr in fallbackModule.__dict__.items(): if name not in module.__dict__: module.__dict__[name] = attr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getTarget(self, iid): ''' Returns a dictionary containing information about a certain target ''' sql = 'select name, path from {} where _id=?'.format(self.TABLE_ITEMS) data = self.db.execute(sql, (iid,)).fetchone() if data: return {'name': data[0], 'path': data[1]} return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def insertTarget(self, name, path): ''' Inserts a new target into the vault database Returns the id of the created target ''' sql = 'insert into {}(name, path) values (?,?);'.format(self.TABLE_ITEMS) try: _id = self.db.execute(sql, (name, path)).lastrowid self.db.commit() return _id except sqlite3.IntegrityError: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def removeTarget(self, iid): ''' Removes target information from vault database ''' sql = 'delete from {} where _id=?'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql, (iid,)) if cursor.rowcount > 0: self.db.commit() return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def listTargets(self): ''' Returns a list of all the items secured in the vault ''' sql = 'select * from {}'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql) return [(iid, name, path) for iid, name, path in cursor]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pre_create(sender, model): """ Callback before creating any new model Identify the creator of the new model & set the created timestamp to now. """
model.created = dt.utcnow() model.creator = goldman.sess.login
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def x509_from_ecdsap256_key_pair(pub_key, priv_key, common_name): """ Creates a self-signed x509 certificate for a common name and ECDSAP256 key pair. :pub_key: an ECDSAP256PublicKey instance :priv_key: an ECDSAP256PrivateKey instance :common_name: an XTTIdentity instance :returns: the certificate as a byte string """
cert_len = _lib.xtt_x509_certificate_length() cert = _ffi.new('unsigned char[]', cert_len) rc = _lib.xtt_x509_from_ecdsap256_keypair(pub_key.native, priv_key.native, common_name.native, cert, len(cert)) if rc == RC.SUCCESS: return _ffi.buffer(cert)[:] else: raise error_from_code(rc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def asn1_from_ecdsap256_private_key(priv_key, pub_key): """ Returns the ASN.1 encoding of a ECDSAP256 private ket. :priv_key: an ECDSAP256PrivateKey instance :returns: the ASN.1 encoding as a byte string """
encoded_len = _lib.xtt_asn1_private_key_length() encoded = _ffi.new('unsigned char[]', encoded_len) rc = _lib.xtt_asn1_from_ecdsap256_private_key(priv_key.native, pub_key.native, encoded, len(encoded)) if rc == RC.SUCCESS: return _ffi.buffer(encoded)[:] else: raise error_from_code(rc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smart_fit(image, fit_to_width, fit_to_height): """ Proportionally fit the image into the specified width and height. Return the correct width and height. """
im_width, im_height = image.size out_width, out_height = fit_to_width, fit_to_height if im_width == 0 or im_height == 0: return (fit_to_width, fit_to_height) w_scale = float(fit_to_width) / float(im_width) h_scale = float(fit_to_height) / float(im_height) if w_scale < h_scale: scale = float(fit_to_width) / float(im_width) out_height = int(round(scale * im_height)) else: scale = float(fit_to_height) / float(im_height) out_width = int(round(scale * im_width)) return out_width, out_height
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(image, size, *args, **kwargs): """ Automatically crop the image based on image gravity and face detection """
from autodetect import smart_crop box_width, box_height = AutoCrop.parse_size(image, size) scaled_size, rect = smart_crop(box_width, box_height, image.filename) return image.resize(scaled_size, Image.ANTIALIAS).crop(tuple(rect))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def version_by_import(self, module_name): # type: (str) ->Dict[str,str] """ This is slow & if running against random code, dangerous Sometimes apps call exit() in import if conditions not met. :param module_name: :return: """
if not module_name: return {} try: module = __import__(module_name) except ModuleNotFoundError: # hypothetical module would have to be on python path or execution folder, I think. return {} except FileNotFoundError: return {} if hasattr(module, "__version__"): version = module.__version__ return {"module import": version} return {}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initrepo(repopath, bare, shared): """ Initialize an activegit repo. Default makes base shared repo that should be cloned for users """
ag = activegit.ActiveGit(repopath, bare=bare, shared=shared)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clonerepo(barerepo, userrepo): """ Clone a bare base repo to a user """
git.clone(barerepo, userrepo) ag = activegit.ActiveGit(userrepo)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def example(script, explain, contents, requirements, output, outputfmt, details): """Prints the example help for the script."""
blank() cprint(script.upper(), "yellow") cprint(''.join(["=" for i in range(70)]) + '\n', "yellow") cprint("DETAILS", "blue") std(explain + '\n') cprint(requirements, "red") cprint(output, "green") blank() if details != "": std(details) blank() cprint("OUTPUT FORMAT", "blue") std(outputfmt) blank() cprint("EXAMPLES", "blue") for i in range(len(contents)): pre, code, post = contents[i] std("{}) {}".format(i + 1, pre)) cprint(" " + code, "cyan") if post != "": std('\n' + post) blank()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def EntryPoints(registry, **kwargs): """Returns an object to use as entry point when calling ``registry.solve_resource``. When calling ``registry.solve_resource`` on a "root" resource, we don't have any value. This function will create a object to use as a first value and is used to specify which entry points are allowed at the first level of a dataql query. Example ------- '2015-06-01' '2015-06-02' Notes ----- The name of this function is intentionally made to resemble a class, as it returns an instance of a class named ``EntryPoints``. """
# We convert functions to staticmethod as they will be held by a class and # we don't want them to expect a ``self`` or ``cls`` argument. attrs = {k: (staticmethod(v) if isfunction(v) else v) for k, v in kwargs.items()} klass = type('EntryPoints', (BaseEntryPoints, ), attrs) registry.register(klass, kwargs.keys()) return klass()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(self, source, attributes=None, allow_class=False, allow_subclasses=True, propagate_attributes=True, inherit_attributes=True): """Register a source class with its attributes. Arguments --------- source : type Must be a class for which we want to allow access to only the given attributes. attributes : iterable[str] / Attributes, optional A list (or other iterable) of string representing the names of the allowed attributes from the source. Can also be an ``Attributes`` instance. To allow all attributes, you must pass an ``Attributes`` instance with ``allow_all=True``. allow_class : boolean, default ``False`` If set to ``True``, the source apply not only to instances of the source class, but also to the class itself. allow_subclasses : boolean, default ``True`` When ``True``, if an instance of a subclass is used without defined source, this source will be used. propagate_attributes : boolean, default ``True`` When ``True``, all the attributes of this source will be propagated to subclasses of this source (except if the subclass has ``inherit_attributes`` set to ``False``. When ``False``, subclasses will have to declare their own attributes. inherit_attributes : boolean, default ``True`` When ``True``, if the source class has a parent class in the registry, it will inherits its attributes if it has ``propagate_attributes`` set to ``True``. When ``False``, it has to declare its own attributes Raises ------ dataql.solvers.exception.AlreadyRegistered If the source class is already registered. Example ------- Traceback (most recent call last): 1 Traceback (most recent call last): 1 True """
if source in self.sources: raise AlreadyRegistered(self, source) # Inherit attributes from parent classes parent_sources = set() if inherit_attributes: bases = source.__bases__ if isinstance(source, type) else source.__class__.__bases__ for klass in bases: if klass in self.sources and self.sources[klass].propagate_attributes: parent_sources.add(self.sources[klass]) self.sources[source] = self.Source( source, attributes, allow_class, allow_subclasses, propagate_attributes, inherit_attributes, parent_sources ) # Propagate attributes to existing subclasses if propagate_attributes: for src in self.sources.values(): if src.source != source and src.inherit_attributes\ and issubclass(src.source, source): src.parent_source.add(self.sources[source])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_resource_solvers(self, resource): """Returns the resource solvers that can solve the given resource. Arguments --------- resource : dataql.resources.Resource An instance of a subclass of ``Resource`` for which we want to get the solver classes that can solve it. Returns ------- list The list of resource solvers instances that can solve the given resource. Raises ------ dataql.solvers.exceptions.SolverNotFound When no solver is able to solve the given resource. Example ------- [<AttributeSolver>] [<ListSolver>] Traceback (most recent call last): """
solvers_classes = [s for s in self.resource_solver_classes if s.can_solve(resource)] if solvers_classes: solvers = [] for solver_class in solvers_classes: # Put the solver instance in the cache if not cached yet. if solver_class not in self._resource_solvers_cache: self._resource_solvers_cache[solver_class] = solver_class(self) solvers.append(self._resource_solvers_cache[solver_class]) return solvers raise SolverNotFound(self, resource)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_filter_solvers(self, filter_): """Returns the filter solvers that can solve the given filter. Arguments --------- filter : dataql.resources.BaseFilter An instance of the a subclass of ``BaseFilter`` for which we want to get the solver classes that can solve it. Returns ------- list The list of filter solvers instances that can solve the given resource. Raises ------ dataql.solvers.exceptions.SolverNotFound When no solver is able to solve the given filter. Example ------- [<FilterSolver>] Traceback (most recent call last): """
solvers_classes = [s for s in self.filter_solver_classes if s.can_solve(filter_)] if solvers_classes: solvers = [] for solver_class in solvers_classes: # Put the solver instance in the cache if not cached yet. if solver_class not in self._filter_solvers_cache: self._filter_solvers_cache[solver_class] = solver_class(self) solvers.append(self._filter_solvers_cache[solver_class]) return solvers raise SolverNotFound(self, filter_)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_resource(self, value, resource): """Solve the given resource for the given value. The solving is done by the first resource solver class that returns ``True`` when calling its ``can_solve`` method for the given resource, and that doesn't raise a ``CannotSolve`` exception. Arguments --------- value : ? A value to be solved with the given resource. resource : dataql.resources.Resource An instance of a subclass of ``Resource`` to be solved with the given value. Returns ------- The solved result. Raises ------ dataql.solvers.exceptions.SolveFailure If no solvers were able to solve the resource. This happen if a solver says that it can solve a resource (by returning ``True`` when calling its ``can_solve`` method, but raises a ``CannotSolve`` exception during solving). Example ------- 1 # Create an object from which we'll want an object (``date``) and a list (``dates``) {'day': 1, 'month': 6, 'year': 2015} [[1, 6, 2015], [2, 6, 2015], [3, 6, 2015]] '2015-06-02' # List of fields ['2015-06-02', '2015-06-03'] # List of objects [{'date': '2015-06-02'}, {'date': '2015-06-03'}] # List of list [['2015-06-01', {'day': 1, 'month': 6, 'year': 2015}], ['2015-06-03', {'day': 3, 'month': 6, 'year': 2015}]] # Test the dict-like approach 1 Traceback (most recent call last): 3 Traceback (most recent call last): # Example of ``SolveFailure`` exception. Traceback (most recent call last): dataql.solvers.exceptions.SolveFailure: Unable to solve `<Field[fromtimestamp]>`. """
for solver in self.get_resource_solvers(resource): try: return solver.solve(value, resource) except CannotSolve: continue raise SolveFailure(self, resource, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_filter(self, value, filter_): """Solve the given filter for the given value. The solving is done by the first filter solver class that returns ``True`` when calling its ``can_solve`` method for the given filter, and that doesn't raise a ``CannotSolve`` exception. Arguments --------- value : ? A value to be solved with the given filter. filter_ : dataql.resources.BaseFilter An instance of a subclass of ``dataql.resources.BaseFilter`` to be solved with the given value. Returns ------- The solved result. Raises ------ dataql.solvers.exceptions.SolveFailure If no solvers were able to solve the filter. This happen if a solver says that it can solve a filter (by returning ``True`` when calling its ``can_solve`` method, but raises a ``CannotSolve`` exception during solving). Example ------- 1 '2015-06-01' # Example of ``SolveFailure`` exception. Traceback (most recent call last): dataql.solvers.exceptions.SolveFailure: Unable to solve `<Filter[fromtimestamp]>`. """
for solver in self.get_filter_solvers(filter_): try: return solver.solve(value, filter_) except CannotSolve: continue raise SolveFailure(self, filter_, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_or_update(data, item, value): """ Add or update value in configuration file format used by proftpd. Args: data (str): Configuration file as string. item (str): What option will be added/updated. value (str): Value of option. Returns: str: updated configuration """
data = data.splitlines() # to list of bytearrays (this is useful, because their reference passed to # other functions can be changed, and it will change objects in arrays # unlike strings) data = map(lambda x: bytearray(x), data) # search for the item in raw (ucommented) values conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data) if conf: conf[0][:] = conf[0].strip().split()[0] + " " + value else: # search for the item in commented values, if found, uncomment it comments = filter( lambda x: x.strip().startswith("#") and len(x.split("#")) >= 2 and x.split("#")[1].split() and x.split("#")[1].split()[0] == item, data ) if comments: comments[0][:] = comments[0].split("#")[1].split()[0] + " " + value else: # add item, if not found in raw/commented values data.append(item + " " + value + "\n") return "\n".join(map(lambda x: str(x), data))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def comment(data, what): """ Comments line containing `what` in string `data`. Args: data (str): Configuration file in string. what (str): Line which will be commented out. Returns: str: Configuration file with commented `what`. """
data = data.splitlines() data = map( lambda x: "#" + x if x.strip().split() == what.split() else x, data ) return "\n".join(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _write_conf_file(): """ Write configuration file as it is defined in settings. """
with open(CONF_FILE, "w") as f: f.write(DEFAULT_PROFTPD_CONF) logger.debug("'%s' created.", CONF_FILE)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def integerize(self): """Convert co-ordinate values to integers."""
self.x = int(round(self.x)) self.y = int(round(self.y))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def floatize(self): """Convert co-ordinate values to floats."""
self.x = float(self.x) self.y = float(self.y)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rotate(self, rad): """ Rotate counter-clockwise by rad radians. Positive y goes *up,* as in traditional mathematics. Interestingly, you can use this in y-down computer graphics, if you just remember that it turns clockwise, rather than counter-clockwise. The new position is returned as a new Point. """
s, c = [f(rad) for f in (math.sin, math.cos)] x, y = (c * self.x - s * self.y, s * self.x + c * self.y) return Point(x, y)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rotate_about(self, p, theta): """ Rotate counter-clockwise around a point, by theta degrees. Positive y goes *up,* as in traditional mathematics. The new position is returned as a new Point. """
result = self.clone() result.translate(-p.x, -p.y) result.rotate(theta) result.translate(p.x, p.y) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_points(self, pt1, pt2): """Reset the rectangle coordinates."""
(x1, y1) = pt1.as_tuple() (x2, y2) = pt2.as_tuple() self.left = min(x1, x2) self.top = min(y1, y2) self.right = max(x1, x2) self.bottom = max(y1, y2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overlaps(self, other): """ Return true if a rectangle overlaps this rectangle. """
return ( self.right > other.left and self.left < other.right and self.top < other.bottom and self.bottom > other.top )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expanded_by(self, n): """Return a rectangle with extended borders. Create a new rectangle that is wider and taller than the immediate one. All sides are extended by "n" points. """
return Rect(self.left - n, self.top - n, self.right + n, self.bottom + n)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ Before saving, get publication's PubMed metadata if publication is not already in database or if 'redo_query' is True. """
if self.no_query: if not self.pk or self.pmid > 0: try: pmid_min = Publication.objects.all().aggregate( models.Min('pmid'))['pmid__min'] - 1 except: self.pmid = 0 else: self.pmid = min(0, pmid_min) self.pubmed_url = '' self.mini_citation = '{} - {} - {}'.format( self.first_author, self.year, self.journal) elif self.redo_query or not self.pk: if self.pmid: query = self.pmid else: query = self.pubmed_url email = "" # FIX THIS: Use logged-in user's email lookup = pubmed_lookup.PubMedLookup(query, email) publication = pubmed_lookup.Publication(lookup) self.pmid = publication.pmid self.pubmed_url = publication.pubmed_url self.title = strip_tags(publication.title) self.authors = publication.authors self.first_author = publication.first_author self.last_author = publication.last_author self.journal = publication.journal self.year = publication.year self.month = publication.month self.day = publication.day self.url = publication.url self.citation = publication.cite() self.mini_citation = publication.cite_mini() self.abstract = strip_tags(publication.abstract) self.redo_query = False super().save(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perform_bulk_pubmed_query(self): """ If 'bulk_pubmed_query' contains any content, perform a bulk PubMed query, add the publications to the publication set, and save. """
if self.bulk_pubmed_query: failed_queries = [] pmid_list = re.findall(r'(\d+)(?:[\s,]+|$)', self.bulk_pubmed_query) for pmid in pmid_list: try: p, created = Publication.objects.get_or_create(pmid=pmid) except: failed_queries.append(pmid) else: self.publications.add(p.id) if failed_queries: failed_queries.sort(key=int) self.bulk_pubmed_query = 'FAILED QUERIES: {}'.format(', '.join(failed_queries)) else: self.bulk_pubmed_query = ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_backend(self, backend): "Add a RapidSMS backend to this tenant" if backend in self.get_backends(): return backend_link, created = BackendLink.all_tenants.get_or_create(backend=backend) self.backendlink_set.add(backend_link)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_resources_to_registry(): """ Add resources to the deform registry """
from deform.widget import default_resource_registry default_resource_registry.set_js_resources("jqueryui", None, None) default_resource_registry.set_js_resources("datetimepicker", None, None) default_resource_registry.set_js_resources("custom_dates", None, None) default_resource_registry.set_js_resources( "radio_choice_toggle", None, None ) default_resource_registry.set_js_resources("checkbox_toggle", None, None) from js.deform import resource_mapping # fix missing resource from js.select2 import select2 resource_mapping['select2'] = select2 # add the datetimepicker from js.jquery_timepicker_addon import timepicker resource_mapping['datetimepicker'] = timepicker resource_mapping['custom_dates'] = custom_dates resource_mapping['radio_choice_toggle'] = radio_choice_toggle resource_mapping['checkbox_toggle'] = checkbox_toggle
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def childgroup(self, field): """ Return children grouped regarding the grid description """
cols = getattr(self, "cols", self.default_cols) width = self.num_cols / cols for child in field.children: child.width = width res = list(grouper(field.children, cols, fillvalue=None)) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _childgroup(self, children, grid): """ Stores the children in a list following the grid's structure :param children: list of fields :param grid: a list of list corresponding of the layout to apply to the given children """
result = [] index = 0 hidden_fields = [] for row in grid: child_row = [] width_sum = 0 for width, filled in row: width_sum += width if width_sum > self.num_cols: warnings.warn(u"It seems your grid configuration overlaps \ the bootstrap layout columns number. One of your lines is larger than {0}. \ You can increase this column number by compiling bootstrap css with \ lessc.".format(self.num_cols)) if isinstance(filled, StaticWidget): child = filled child.width = width elif filled: try: child = children[index] except IndexError: warnings.warn(u"The grid items number doesn't \ match the number of children of our mapping widget") break if type(child.widget) == deform.widget.HiddenWidget: hidden_fields.append(child) index += 1 try: child = children[index] except IndexError: warnings.warn(u"The grid items number doesn't \ match the number of children of our mapping widget") break child.width = width index += 1 else: child = VoidWidget(width) child_row.append(child) if child_row != []: result.append(child_row) if index <= len(children): result.append(children[index:]) if hidden_fields != []: result.append(hidden_fields) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _childgroup_by_name(self, children, grid): """ Group the children ordering them by name """
children = self._dict_children(children) result = [] for row in grid: child_row = [] row_is_void = True width_sum = 0 for name, width in row: width_sum += width if width_sum > self.num_cols: warnings.warn(u"It seems your grid configuration overlaps \ the bootstrap layout columns number. One of your lines is larger than {0}. \ You can increase this column number by compiling bootstrap css with \ lessc.".format(self.num_cols)) if isinstance(name, StaticWidget): child = name child.width = width row_is_void = False elif name is not None: try: child = children.pop(name) row_is_void = False except KeyError: warnings.warn(u"No node {0} found".format(name)) child = VoidWidget(width) child.width = width else: child = VoidWidget(width) child_row.append(child) if not row_is_void: result.append(child_row) # Add the field not configured in the grid for value in children.values(): result.append([value]) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def childgroup(self, field): """ Return a list of fields stored by row regarding the configured grid :param field: The original field this widget is attached to """
grid = getattr(self, "grid", None) named_grid = getattr(self, "named_grid", None) if grid is not None: childgroup = self._childgroup(field.children, grid) elif named_grid is not None: childgroup = self._childgroup_by_name(field.children, named_grid) else: raise AttributeError(u"Missing the grid or named_grid argument") return childgroup
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def summary(self): '''Compute the execution summary''' out = {} for bench in self.runner.runned: key = self.key(bench) runs = {} for method, results in bench.results.items(): mean = results.total / bench.times name = bench.label_for(method) runs[method] = { 'name': name, 'total': results.total, 'mean': mean } out[key] = { 'name': bench.label, 'times': bench.times, 'runs': runs } return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def end(self): ''' Dump the report into the output file. If the file directory does not exists, it will be created. The open file is then given as parameter to :meth:`~minibench.report.FileReporter.output`. ''' dirname = os.path.dirname(self.filename) if dirname and not os.path.exists(dirname): os.makedirs(dirname) with open(self.filename, 'w') as out: self.out = out self.output(out) self.out = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def line(self, text=''): '''A simple helper to write line with `\n`''' self.out.write(text) self.out.write('\n')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def with_sizes(self, *headers): '''Compute the report summary and add the computed column sizes''' if len(headers) != 5: raise ValueError('You need to provide this headers: class, method, times, total, average') summary = self.summary() for row in summary.values(): sizes = [len(header) for header in headers] # Benchmark/Class column sizes[0] = max(sizes[0], len(row['name'])) # Method column max_length = max(len(r['name']) for r in row['runs'].values()) sizes[1] = max(sizes[1], max_length) # Times column sizes[2] = max(sizes[2], len(str(row['times']))) # Float columns for idx, field in [(3, 'total'), (4, 'mean')]: float_len = lambda r: len(self.float(r[field])) max_length = max(float_len(r) for r in row['runs'].values()) sizes[idx] = max(sizes[idx], max_length) row['sizes'] = sizes return summary
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __telnet_event_listener(self, ip, callback): """creates a telnet connection to the lightpad"""
tn = telnetlib.Telnet(ip, 2708) self._last_event = "" self._telnet_running = True while self._telnet_running: try: raw_string = tn.read_until(b'.\n', 5) if len(raw_string) >= 2 and raw_string[-2:] == b'.\n': # lightpad sends ".\n" at the end that we need to chop off json_string = raw_string.decode('ascii')[0:-2] if json_string != self._last_event: callback(json.loads(json_string)) self._last_event = json_string except: pass tn.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_assign(data, varname): """Finds a substring that looks like an assignment. :param data: Source to search in. :param varname: Name of the variable for which an assignment should be found. """
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname)) if len(ASSIGN_RE.findall(data)) > 1: raise PluginError('Found multiple {}-strings.'.format(varname)) if len(ASSIGN_RE.findall(data)) < 1: raise PluginError('No version assignment ("{}") found.' .format(varname)) return ASSIGN_RE.search(data).group(2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_instance_id(self, instance): " Returns instance pk even if multiple instances were passed to RichTextField. " if type(instance) in [list, tuple]: core_signals.request_finished.connect(receiver=RichTextField.reset_instance_counter_listener) if RichTextField.__inst_counter >= len(instance): return None else: obj_id = self.instance[ RichTextField.__inst_counter ].pk RichTextField.__inst_counter += 1 else: obj_id = instance.pk return obj_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean(self, value): """ When cleaning field, store original value to SourceText model and return rendered field. @raise ValidationError when something went wrong with transformation. """
super_value = super(RichTextField, self).clean(value) if super_value in fields.EMPTY_VALUES: if self.instance: obj_id = self.get_instance_id(self.instance) if not obj_id: SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor).delete() else: SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name).delete() self.validate_rendered('') return '' text = smart_unicode(value) if self.instance: obj_id = self.get_instance_id(self.instance) try: if not obj_id: src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor) else: src_text = SourceText.objects.get(content_type=self.ct, object_id=obj_id, field=self.field_name) assert src_text.processor == self.processor except SourceText.DoesNotExist: src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor) src_text.content = text try: rendered = src_text.render() except ProcessorError, e: raise ValidationError(self.error_messages['syntax_error']) else: # in case of adding new model, instance is not set self.instance = src_text = SourceText( content_type=self.ct, field=self.field_name, content=text, processor=self.processor ) try: rendered = src_text.render() except Exception, err: raise ValidationError(self.error_messages['syntax_error']) self.validate_rendered(rendered) if not hasattr(self.model, RICH_FIELDS_SET): setattr(self.model, RICH_FIELDS_SET, set()) getattr(self.model, RICH_FIELDS_SET).add(self.field_name) # register the listener that saves the SourceText #listener = self.post_save_listener(src_text) signals.post_save.connect(receiver=self.post_save_listener, sender=self.model) # wrap the text so that we can store the src_text on it rendered = UnicodeWrapper(rendered) setattr(rendered, self.src_text_attr, src_text) return rendered
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def value_from_datadict(self, data, files, name): '''Generate a single value from multi-part form data. Constructs a W3C date based on values that are set, leaving out day and month if they are not present. :param data: dictionary of data submitted by the form :param files: - unused :param name: base name of the form field :returns: string value ''' y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == 'YYYY': y = '' if m == 'MM': m = '' if d == 'DD': d = '' date = y if m: date += '-%s' % m if d: date += '-%s' % d return date
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def render(self, name, value, attrs=None): '''Render the widget as HTML inputs for display on a form. :param name: form field base name :param value: date value :param attrs: - unused :returns: HTML text with three inputs for year/month/day ''' # expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None) year, month, day = 'YYYY', 'MM', 'DD' if value: # use the regular expression to pull out year, month, and day values # if regular expression does not match, inputs will be empty match = W3C_DATE_RE.match(value) if match: date_parts = match.groupdict() year = date_parts['year'] month = date_parts['month'] day = date_parts['day'] year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };') month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };') day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };') # display widget fields in YYYY-MM-DD order to match W3C date format, # and putting required field(s) on the left output = [year_html, month_html, day_html] return mark_safe(u' / \n'.join(output))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def describe(self): '''Provide a dictionary with information describing itself.''' description = { 'description': self._description, 'type': self.name, } description.update(self.extra_params) return description
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __build_raw_query(self, routine, parameters): """Return a query that uses raw string-replacement for parameters. The parameters will still be escaped before replaced-into the query (by sqlalchemy). """
parameter_names = [] replacements = {} for i, value in enumerate(parameters): name = 'arg' + str(i) parameter_names.append(name) replacements[name] = value parameter_phrase = ', '.join([('%(' + p + ')s') for p in parameter_names]) query = "CALL " + routine + "(" + parameter_phrase + ")" return (query, replacements)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def call(self, routine, *args): """This is a newer, less-verbose interface that calls the old philistine one. This should be used. """
(query, replacements) = self.__build_query(routine, args) return self.__execute_text(query, **replacements)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_resultsets(self, routine, *args): """Return a list of lists of dictionaries, for when a query returns more than one resultset. """
(query, replacements) = self.__build_raw_query(routine, args) # Grab a raw connection from the connection-pool. connection = mm.db.ENGINE.raw_connection() sets = [] try: cursor = connection.cursor() cursor.execute(query, replacements) while 1: #(column_name, type_, ignore_, ignore_, ignore_, null_ok, column_flags) names = [c[0] for c in cursor.description] set_ = [] while 1: row_raw = cursor.fetchone() if row_raw is None: break row = dict(zip(names, row_raw)) set_.append(row) sets.append(list(set_)) if cursor.nextset() is None: break # TODO(dustin): nextset() doesn't seem to be sufficiant to tell the end. if cursor.description is None: break finally: # Return the connection to the pool (won't actually close). connection.close() return sets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, name, namespace=None): """ Find plugin object Parameters name : string A name of the object entry or full namespace namespace : string, optional A period separated namespace. E.g. `foo.bar.hogehoge` Returns ------- instance An instance found Raises ------ KeyError If the named instance have not registered Examples -------- True True True """
if "." in name: namespace, name = name.rsplit(".", 1) caret = self.raw if namespace: for term in namespace.split('.'): if term not in caret: caret[term] = Bunch() caret = caret[term] return caret[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sign(self, request, authheaders, secret): """Returns the signature appropriate for the request. The request is not changed by this function. Keyword arguments: request -- A request object which can be consumed by this API. authheaders -- A string-indexable object which contains the headers appropriate for this signature version. secret -- The base64-encoded secret key for the HMAC authorization. """
mac = hmac.HMAC(secret.encode('utf-8'), digestmod=self.digest) mac.update(self.signable(request, authheaders).encode('utf-8')) digest = mac.digest() return base64.b64encode(digest).decode('utf-8')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_auth_headers(self, authorization): """Parses the authorization headers from the authorization header taken from a request. Returns a dict that is accepted by all other API functions which expect authorization headers in a dict format. Keyword arguments: authorization -- The authorization header of any request. The header must be in a format understood by the signer. """
m = re.match(r'^(?i)Acquia\s+(.*?):(.+)$', authorization) if m is not None: return {"id": m.group(1), "signature": m.group(2)} return {}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self, request, secret): """Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature. This verifies every element of the signature, including headers other than Authorization. Keyword arguments: request -- A request object which can be consumed by this API. secret -- The base64-encoded secret key for the HMAC authorization. """
if request.get_header("Authorization") == "": return False ah = self.parse_auth_headers(request.get_header("Authorization")) if "id" not in ah: return False if "signature" not in ah: return False return ah["signature"] == self.sign(request, ah, secret)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sign_direct(self, request, authheaders, secret): """Signs a request directly with an appropriate signature. The request's Authorization header will change. Keyword arguments: request -- A request object which can be consumed by this API. authheaders -- A string-indexable object which contains the headers appropriate for this signature version. secret -- The base64-encoded secret key for the HMAC authorization. """
sig = self.sign(request, authheaders, secret) return request.with_header("Authorization", "Acquia {0}:{1}".format(authheaders["id"], sig))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def main(directories): '''Perform all checks on the API's contained in `directory`.''' msg = 'Checking module "{}" from directory "{}" for coding errors.' api_checker = ApiChecker() resource_checker = ResourceChecker() errors = [] modules = [] for loader, mname, _ in pkgutil.walk_packages(directories): sys.path.append(os.path.abspath(loader.path)) log.info(msg.format(mname, loader.path)) modules.append(mname) import_module(mname) for api in Api: if api.__module__.split('.')[-1] not in modules: continue log.debug('Anlysing Api class: {}'.format(api.__name__)) errors.extend(api_checker(api)) for res in Resource: if res.__module__.split('.')[-1] not in modules: continue log.debug('Anlysing Resource class: {}'.format(res.__name__)) errors.extend(resource_checker(res)) else: log.info('All modules tested, no problem detected.') return errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def checks(self): '''Return the list of all check methods.''' condition = lambda a: a.startswith('check_') return (getattr(self, a) for a in dir(self) if condition(a))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_has_docstring(self, api): '''An API class must have a docstring.''' if not api.__doc__: msg = 'The Api class "{}" lacks a docstring.' return [msg.format(api.__name__)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_has_version(self, api): '''An API class must have a `version` attribute.''' if not hasattr(api, 'version'): msg = 'The Api class "{}" lacks a `version` attribute.' return [msg.format(api.__name__)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_has_path(self, api): '''An API class must have a `path` attribute.''' if not hasattr(api, 'path'): msg = 'The Api class "{}" lacks a `path` attribute.' return [msg.format(api.__name__)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_docstring(self, method): '''All methods should have a docstring.''' mn = method.__name__ if method.__doc__ is None: return ['Missing docstring for method "{}"'.format(mn)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_return_types(self, method): '''Return types must be correct, their codes must match actual use.''' mn = method.__name__ retanno = method.__annotations__.get('return', None) # Take a look at the syntax if not retanno: return ['Missing return types for method "{}"'.format(mn)] if not isinstance(retanno, (list, tuple)): msg = 'Return annotation for method "{}" not tuple nor list' return [msg.format(mn)] if (any(map(lambda t: not isinstance(t, (list, tuple)), retanno)) or any(map(lambda t: not (2 <= len(t) <= 3), retanno))): msg = ('Return values series for "{}" should be composed of ' '2 or 3-items tuples (code, msg, type).') return [msg.format(mn)] errors = [] # Take a look at the codes declared = set([t[0] for t in retanno]) actual = set(int(s) for s in HTTP_STATUSES_REGEX.findall(method.source)) if declared != actual: if declared.issubset(actual): msg = 'Method {} returns undeclared codes: {}.' errors.append(msg.format(mn, actual - declared)) elif actual.issubset(declared): msg = 'Method {} declares codes {} that are never used.' errors.append(msg.format(mn, declared - actual)) else: msg = 'Declared {} and Used {} codes mismatch.' errors.append(msg.format(declared, actual)) # Take a look at the types ret_with_types = filter(lambda t: len(t) == 3, retanno) msg = 'Method {} return type for code {} must be class (not instance).' msg_mod = 'Method {} return type for code {} must subclass from Model.' for code, _, type_ in ret_with_types: try: if Model not in type_.__bases__: errors.append(msg_mod.format(mn, code)) except AttributeError: errors.append(msg.format(mn, code)) return errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_params_types(self, method): '''Types in argument annotations must be instances, not classes.''' mn = method.__name__ annos = dict(method.__annotations__) errors = [] # Take a look at the syntax msg_tuple = 'Parameter {} in method {} is not annotated with a tuple.' msg_ptype = 'Parameter {} in method {} is not a valid Ptype.' msg_mod = 'Type for param {} in method {} must descend from Model.' msg_cls = 'Type for param {} in method {} must be instance (not class)' bodies = [] for pname, anno in annos.items(): if pname == 'return': continue elif len(anno) != 2: errors.append(msg_tuple.format(pname, mn)) else: param_type, value_type = anno if param_type not in Ptypes: errors.append(msg_ptype.format(pname, mn)) elif param_type == 'body': bodies.append(pname) elif param_type == 'path': default = method.signature.parameters[pname].default if default is not inspect._empty: msg = ('Path prameter {} in method {} has a default ' 'value ({}) that would make it optional (which ' 'is wrong!)') errors.append(msg.format(pname, mn, default)) if hasattr(value_type, '__bases__'): errors.append(msg_cls.format(pname, mn)) elif Model not in value_type.__class__.__bases__: errors.append(msg_mod.format(pname, mn)) # Only one body parameter! if len(bodies) > 1: msg = 'Too many "Ptypes.body" params {} for method {} (max=1).' errors.append(msg.format(bodies, mn)) return errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_path_consistency(self, resource): '''Path arguments must be consistent for all methods.''' msg = ('Method "{}" path variables {}) do not conform with the ' 'resource subpath declaration ({}).') errors = [] # If subpath is not set, it will be detected by another checker if resource.subpath is None: return errors declared = sorted(self.path_params_regex.findall(resource.subpath)) for callback in resource.callbacks: actual = sorted(utils.filter_annotations_by_ptype( callback, Ptypes.path)) if declared == actual: continue errors.append(msg.format( '{}.{}'.format(resource.__name__, callback.__name__), actual, resource.subpath)) return errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_no_multiple_handlers(self, resource): '''The same verb cannot be repeated on several endpoints.''' seen = [] errors = [] msg = 'HTTP verb "{}" associated to more than one endpoint in "{}".' for method in resource.callbacks: for op in getattr(method, 'swagger_ops'): if op in seen: errors.append(msg.format(op, resource.__name__)) else: seen.append(op) return errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init(req, model): """ Return an array of fields to include. """
rels = model.relationships params = req.get_param_as_list('include') or [] params = [param.lower() for param in params] for param in params: _validate_no_nesting(param) _validate_rels(param, rels) return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_endpoint(api_key, query, offset, type): """Return endpoint URL for the relevant search type. The base API endpoint only varies by type of search requested, of which there are two: 1) domain search and 2) email search. Each search type requires different parameters, though api_key is common between them. Note: if both a url and email address are provided the endpoint returned will default to the domain search as it is considered to be the primary function of the API and thus takes precedent. :param api_key: Secret client API key. :param query: URL or email address on which to search. :param offset: Specifies the number of emails to skip. :param type: Specifies email type (i.e. generic or personal). """
query_type = get_query_type(query) if query_type not in ('domain', 'email'): raise ex.InvalidQueryStringException('Invalid query string') if query_type == 'domain': return DOMAIN_URL.format(query, api_key, offset, type) else: return EMAIL_URL.format(query, api_key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def incrementName(nameList, name): """ return a name that is unique in a given nameList through attaching a number to it now we will add 3xfoo 2xbar and one klaus to our list: ['bar', 'bar2', 'foo', 'foo2', 'foo3', 'klaus'] """
if name not in nameList: return name newName = name + str(1) for n in range(1, len(nameList) + 2): found = False for b in nameList: newName = name + str(n) if b == newName: found = True if not found: break return newName
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def provider_factory(factory=_sentinel, scope=NoneScope): ''' Decorator to create a provider using the given factory, and scope. Can also be used in a non-decorator manner. :param scope: Scope key, factory, or instance :type scope: object or callable :return: decorator :rtype: decorator ''' if factory is _sentinel: return functools.partial(provider_factory, scope=scope) provider = Provider(factory, scope) return provider
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _id_for_pc(self, name): """ Given the name of the PC, return the database identifier. """
if not name in self.pc2id_lut: self.c.execute("INSERT INTO pcs (name) VALUES ( ? )", (name,)) self.pc2id_lut[name] = self.c.lastrowid self.id2pc_lut[self.c.lastrowid] = name return self.pc2id_lut[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _id_for_source(self, name): """ Given the name of the source, return the database identifier. """
if not name in self.source2id_lut: self.c.execute("INSERT INTO sources (name) VALUES ( ? )", (name,)) self.source2id_lut[name] = self.c.lastrowid self.id2source_lut[self.c.lastrowid] = name return self.source2id_lut[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def record_occupation_updates(self, updates, source, version): """ Records an occupation update """
now = int(time.time()) # Put it on the recordQueue and notify the worker thread. with self.recordCond: self.recordQueue.append((now, updates, source)) self.recordCond.notify()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def a_urls(html): ''' return normalized urls found in the 'a' tag ''' soup = BeautifulSoup(html, 'lxml') for node in soup.find_all('a'): try: href = node['href'] except KeyError: continue yield norm_url(href)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def host_names(urls): ''' Takes a StringCounter of normalized URL and parses their hostnames N.B. this assumes that absolute URLs will begin with http:// in order to accurately resolve the host name. Relative URLs will not have host names. ''' host_names = StringCounter() for url in urls: host_names[urlparse(url).netloc] += urls[url] return host_names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def path_dirs(urls): ''' Takes a StringCounter of normalized URL and parses them into a list of path directories. The file name is included in the path directory list. ''' path_dirs = StringCounter() for url in urls: for path_dir in filter(None, urlparse(url).path.split('/')): path_dirs[path_dir] += urls[url] return path_dirs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scan_path(executable="mongod"): """Scan the path for a binary. """
for path in os.environ.get("PATH", "").split(":"): path = os.path.abspath(path) executable_path = os.path.join(path, executable) if os.path.exists(executable_path): return executable_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_open_port(host="localhost"): """Get an open port on the machine. """
temp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) temp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) temp_sock.bind((host, 0)) port = temp_sock.getsockname()[1] temp_sock.close() del temp_sock return port
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure(self, options, conf): """Parse the command line options and start an instance of mongodb """
# This option has to be specified on the command line, to enable the # plugin. if not options.mongoengine or options.mongodb_bin: return if not options.mongodb_bin: self.mongodb_param['mongodb_bin'] = scan_path() if self.mongodb_param['mongodb_bin'] is None: raise AssertionError( "Mongoengine plugin enabled, but no mongod on path, " "please specify path to binary\n" "ie. --mongoengine-mongodb=/path/to/mongod") else: self.mongodb_param['mongodb_bin'] = os.path.abspath( os.path.expanduser(os.path.expandvars(options.mongodb_bin))) if not os.path.exists(self.mongodb_param['mongodb_bin']): raise AssertionError( "Invalid mongodb binary %r" % \ self.mongodb_param['mongodb_bin']) # Its necessary to enable in nose self.enabled = True db_log_path = os.path.expandvars(os.path.expanduser( options.mongodb_logpath)) try: db_file = open(db_log_path, "w") db_file.close() except Exception as exc: raise AssertionError("Invalid log path %r" % exc) if not options.mongodb_port: self.mongodb_param['db_port'] = get_open_port() else: self.mongodb_param['db_port'] = options.mongodb_port db_prealloc = options.mongodb_prealloc db_scripting = options.mongodb_scripting self.clear_context['module'] = options.mongoengine_clear_after_module self.clear_context['class'] = options.mongoengine_clear_after_class # generate random database name self.database_name = str(uuid.uuid1()) ######################################### # Start a instance of mongo ######################################### # Stores data here self.mongodb_param['db_path'] = tempfile.mkdtemp() if not os.path.exists(self.mongodb_param['db_path']): os.mkdir(self.mongodb_param['db_path']) args = [ self.mongodb_param['mongodb_bin'], "--dbpath", self.mongodb_param['db_path'], "--port", str(self.mongodb_param['db_port']), # don't flood stdout, we're not reading it "--quiet", # save the port "--nohttpinterface", # disable unused. "--nounixsocket", # use a smaller default file size "--smallfiles", # journaling on by default in 2.0 and makes it to slow # for tests, can causes failures in jenkins "--nojournal", # Default is /dev/null "--logpath", db_log_path, "-vvvvv" ] if not db_prealloc: args.append("--noprealloc") if not db_scripting: args.append("--noscripting") self.process = Popen( args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) self._running = True os.environ["TEST_MONGODB"] = "localhost:%s" % \ self.mongodb_param['db_port'] os.environ["TEST_MONGODB_DATABASE"] = self.database_name # Give a moment for mongodb to finish coming up time.sleep(0.3) # Connecting using mongoengine self.connection = connect(self.database_name, host="localhost", port=self.mongodb_param['db_port'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stopContext(self, context): """Clear the database if so configured for this """
# Use pymongo directly to drop all collections of created db if ((self.clear_context['module'] and inspect.ismodule(context)) or (self.clear_context['class'] and inspect.isclass(context))): self.connection.drop_database(self.database_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def finalize(self, result): """Stop the mongodb instance. """
if not self._running: return # Clear out the env variable. del os.environ["TEST_MONGODB"] del os.environ["TEST_MONGODB_DATABASE"] # Kill the mongod process if sys.platform == 'darwin': self.process.kill() else: self.process.terminate() self.process.wait() # Clean out the test data. shutil.rmtree(self.mongodb_param['db_path']) self._running = False