text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pages_to_show(paginator, page, page_labels=None): """Generate a dictionary of pages to show around the current page. Show 3 numbers on either side of the specified page, or more if close to end or beginning of available pages. :param paginator: django :class:`~django.core.paginator.Paginator`, populated with objects :param page: number of the current page :param page_labels: optional dictionary of page labels, keyed on page number :rtype: dictionary; keys are page numbers, values are page labels """
show_pages = {} # FIXME; do we need OrderedDict here ? if page_labels is None: page_labels = {} def get_page_label(index): if index in page_labels: return page_labels[index] else: return unicode(index) if page != 1: before = 3 # default number of pages to show before the current page if page >= (paginator.num_pages - 3): # current page is within 3 of end # increase number to show before current page based on distance to end before += (3 - (paginator.num_pages - page)) for i in range(before, 0, -1): # add pages from before away up to current page if (page - i) >= 1: # if there is a page label available, use that as dictionary value show_pages[page - i] = get_page_label(page - i) # show up to 3 to 7 numbers after the current number, depending on # how many we already have for i in range(7 - len(show_pages)): if (page + i) <= paginator.num_pages: # if there is a page label available, use that as dictionary value show_pages[page + i] = get_page_label(page + i) return show_pages
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Indentation( logical_line, previous_logical, indent_level, previous_indent_level ): """Use two spaces per indentation level."""
comment = '' if logical_line else ' (comment)' if indent_level % 2: code = 'YCM111' if logical_line else 'YCM114' message = ' indentation is not a multiple of two spaces' + comment yield 0, code + message if ( previous_logical.endswith( ':' ) and ( indent_level - previous_indent_level != 2 ) ): code = 'YCM112' if logical_line else 'YCM115' message = ' expected an indented block of {} spaces{}'.format( previous_indent_level + 2, comment ) yield 0, code + message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def SpacesInsideBrackets( logical_line, tokens ): """Require spaces inside parentheses, square brackets, and braces for non-empty content."""
for index in range( len( tokens ) ): _, prev_text, _, prev_end, _ = ( tokens[ index - 1 ] if index - 1 >= 0 else ( None, None, None, None, None ) ) token_type, text, start, end, _ = tokens[ index ] next_token_type, next_text, next_start, _, _ = ( tokens[ index + 1 ] if index + 1 < len( tokens ) else ( None, None, None, None, None ) ) if text in LEFT_BRACKETS: if ( next_text == CORRESPONDING_BRACKET[ text ] and next_start != end ): code = 'YCM204' message = ( ' no spaces between {} and {}' ' for empty content'.format( text, next_text ) ) yield end, code + message if ( next_token_type not in [ tokenize.NL, tokenize.NEWLINE ] and next_text != CORRESPONDING_BRACKET[ text ] and next_start and next_start[ 0 ] == start[ 0 ] and next_start[ 1 ] - start[ 1 ] != 2 ): code = 'YCM201' message = ' exactly one space required after {}'.format( text ) yield end, code + message if text in RIGHT_BRACKETS: if ( prev_text != CORRESPONDING_BRACKET[ text ] and prev_end and prev_end[ 0 ] == end[ 0 ] and end[ 1 ] - prev_end[ 1 ] != 2 ): code = 'YCM202' message = ' exactly one space required before {}'.format( text ) yield start, code + message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buy(self, player, cost): """ indicate that the player was bought at the specified cost :param Player player: player to buy :param int cost: cost to pay :raises InsufficientFundsError: if owner doesn't have the money :raises NoValidRosterSlotError: if owner doesn't have a slot this player could fill :raises AlreadyPurchasedError: if owner already bought this player """
if cost > self.max_bid(): raise InsufficientFundsError() elif not any(roster_slot.accepts(player) and roster_slot.occupant is None for roster_slot in self.roster): raise NoValidRosterSlotError() elif self.owns(player): raise AlreadyPurchasedError() self.money -= cost self._remaining_picks -= 1 self._owned_player_ids.add(player.player_id) self._slot_in(player, cost)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """Keep running this thread until it's stopped"""
while not self._finished.isSet(): self._func(self._reference) self._finished.wait(self._func._interval / 1000.0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[str, Any]={}): """Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run. Args: clock_name: The name of the Area that is used as synchronizing Clock. clock_slots: The slots of the Clock relevant to this Area. subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values. """
for area in subscriptions: # type: str init_full(self, area, subscriptions[area]) subscriptions[area] = {'slots': subscriptions[area]} if clock_name is not None: self.clock_name = clock_name self.clock_slots = clock_slots subscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1} self.setup(puller=True, subscriptions=subscriptions)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logger(ref=0): """Finds a module logger. If the argument passed is a module, find the logger for that module using the modules' name; if it's a string, finds a logger of that name; if an integer, walks the stack to the module at that height. The logger is always extended with a ``.configure()`` method allowing its log levels for syslog and stderr to be adjusted or automatically initialized as per the documentation for `configure()` below. """
if inspect.ismodule(ref): return extend(logging.getLogger(ref.__name__)) if isinstance(ref, basestring): return extend(logging.getLogger(ref)) return extend(logging.getLogger(stackclimber(ref+1)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auto(cls, syslog=None, stderr=None, level=None, extended=None, server=None): """Tries to guess a sound logging configuration. """
level = norm_level(level) or logging.INFO if syslog is None and stderr is None: if sys.stderr.isatty() or syslog_path() is None: log.info('Defaulting to STDERR logging.') syslog, stderr = None, level if extended is None: extended = (stderr or 0) <= logging.DEBUG else: log.info('Defaulting to logging with Syslog.') syslog, stderr = level, None return cls(syslog=syslog, stderr=stderr, extended=extended, server=server)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_file(fd, filename=None, size=None, timestamp=None, ctype=None, charset=CHARSET, attachment=False, wrapper=DEFAULT_WRAPPER): """ Send a file represented by file object This function constcuts a HTTPResponse object that uses a file descriptor as response body. The file descriptor is suppled as ``fd`` argument and it must have a ``read()`` method. ``ValueError`` is raised when this is not the case. It supports `byte serving`_ using Range header, and makes the best effort to set all appropriate headers. It also supports HEAD queries. Because we are dealing with file descriptors and not physical files, the user must also supply the file metadata such as filename, size, and timestamp. The ``filename`` argument is an arbitrary filename. It is used to guess the content type, and also to set the content disposition in case of attachments. The ``size`` argument is the payload size in bytes. If it is omitted, the content length header is not set, and byte serving does not work. The ``timestamp`` argument is the number of seconds since Unix epoch when the file was created or last modified. If this argument is omitted, If-Modified-Since request headers cannot be honored. To explicitly specify the content type, the ``ctype`` argument can be used. This should be a valid MIME type of the payload. Default encoding (used as charset parameter in Content-Type header) is 'UTF-8'. This can be overridden by using the ``charset`` argument. The ``attachment`` argumnet can be set to ``True`` to add the Content-Dispositon response header. Value of the header is then set to the filename. The ``wrapper`` argument is used to wrap the file descriptor when doing byte serving. The default is to use ``fdsend.rangewrapper.RangeWrapper`` class, but there are alternatives as ``fdsend.rangewrapper.range_iter`` and ``bottle._file_iter_range``. The wrappers provided by this package are written to specifically handle file handles that do not have a ``seek()`` method. If this is not your case, you may safely use the bottle's wrapper. The primary difference between ``fdsend.rangewrapper.RangeWrapper`` and ``fdsend.rangewrapper.range_iter`` is that the former returns a file-like object with ``read()`` method, which may or may not increase performance when used on a WSGI server that supports ``wsgi.file_wrapper`` feature. The latter returns an iterator and the response is returned as is without the use of a ``file_wrapper``. This may have some benefits when it comes to memory usage. Benchmarking and profiling is the best way to determine which wrapper you want to use, or you need to implement your own. To implement your own wrapper, you need to create a callable or a class that takes the following arguments: - file descriptor - offset (in bytes from start of the file) - length (total number of bytes in the range) The return value of the wrapper must be either an iterable or file-like object that implements ``read()`` and ``close()`` methods with the usual semantics. The code is partly based on ``bottle.static_file``. .. _byte serving: https://tools.ietf.org/html/rfc2616#page-138 """
if not hasattr(fd, 'read'): raise ValueError("Object '{}' has no read() method".format(fd)) headers = {} status = 200 if not ctype and filename is not None: ctype, enc = mimetypes.guess_type(filename) if enc: headers['Content-Encoding'] = enc if ctype: if ctype.startswith('text/'): # We expect and assume all text files are encoded UTF-8. It's # broadcaster's job to ensure this is true. ctype += '; charset=%s' % charset headers['Content-Type'] = ctype if size: headers['Content-Length'] = size headers['Accept-Ranges'] = 'bytes' if timestamp: headers['Last-Modified'] = format_ts(timestamp) # Check if If-Modified-Since header is in request and respond early modsince = request.environ.get('HTTP_IF_MODIFIED_SINCE') print(modsince) modsince = modsince and parse_date(modsince.split(';')[0].strip()) if modsince is not None and modsince >= timestamp: headers['Date'] = format_ts() return HTTPResponse(status=304, **headers) if attachment and filename: headers['Content-Disposition'] = 'attachment; filename="%s"' % filename if request.method == 'HEAD': # Request is a HEAD, so remove any fd body fd = '' ranges = request.environ.get('HTTP_RANGE') if size and ranges: ranges = list(parse_range_header(ranges, size)) if not ranges: return HTTPError(416, 'Request Range Not Satisfiable') start, end = ranges[0] headers['Content-Range'] = 'bytes %d-%d/%d' % (start, end - 1, size) length = end - start headers['Content-Length'] = str(length) fd = wrapper(fd, start, length) status = 206 return HTTPResponse(fd, status=status, **headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(self, element): """Convert an element to a chainlink"""
if isinstance(element, self.base_link_type): return element for converter in self.converters: link = converter(element) if link is not NotImplemented: return link raise TypeError('%r cannot be converted to a chainlink' % element)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_view_name(view_cls, suffix=None): """ Given a view class, return a textual name to represent the view. This name is used in the browsable API, and in OPTIONS responses. This function is the default for the `VIEW_NAME_FUNCTION` setting. """
name = view_cls.__name__ name = formatting.remove_trailing_string(name, 'View') name = formatting.remove_trailing_string(name, 'ViewSet') name = formatting.camelcase_to_spaces(name) if suffix: name += ' ' + suffix return name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_view_description(view_cls, html=False): """ Given a view class, return a textual description to represent the view. This name is used in the browsable API, and in OPTIONS responses. This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting. """
description = view_cls.__doc__ or '' description = formatting.dedent(smart_text(description)) if html: return formatting.markup_description(description) return description
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exception_handler(exc, context): """ Returns the response that should be used for any given exception. By default we handle the REST framework `APIException`, and also Django's built-in `Http404` and `PermissionDenied` exceptions. Any unhandled exceptions may return `None`, which will cause a 500 error to be raised. """
if isinstance(exc, exceptions.APIException): headers = {} if getattr(exc, 'auth_header', None): headers['WWW-Authenticate'] = exc.auth_header if getattr(exc, 'wait', None): headers['Retry-After'] = '%d' % exc.wait if isinstance(exc.detail, (list, dict)): data = exc.detail else: data = {'message': exc.detail} set_rollback() return Response(data, status=exc.status_code, headers=headers) elif isinstance(exc, Http404): msg = _('Not found.') data = {'message': six.text_type(msg)} set_rollback() return Response(data, status=status.HTTP_404_NOT_FOUND) elif isinstance(exc, PermissionDenied): msg = _('Permission denied.') data = {'message': six.text_type(msg)} set_rollback() return Response(data, status=status.HTTP_403_FORBIDDEN) # Note: Unhandled exceptions will raise a 500 error. return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def as_view(cls, **initkwargs): """ Store the original class on the view function. This allows us to discover information about the view when we do URL reverse lookups. Used for breadcrumb generation. """
if isinstance(getattr(cls, 'queryset', None), models.query.QuerySet): def force_evaluation(): raise RuntimeError( 'Do not evaluate the `.queryset` attribute directly, ' 'as the result will be cached and reused between requests. ' 'Use `.all()` or call `.get_queryset()` instead.' ) cls.queryset._fetch_all = force_evaluation cls.queryset._result_iter = force_evaluation # Django <= 1.5 view = super(RestView, cls).as_view(**initkwargs) view.cls = cls # Note: session based authentication is explicitly CSRF validated, # all other authentication is CSRF exempt. return csrf_exempt(view)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def permission_denied(self, request, message=None): """ If request is not permitted, determine what kind of exception to raise. """
if not request.successful_authenticator: raise exceptions.NotAuthenticated() raise exceptions.PermissionDenied(detail=message)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_view_name(self): """ Return the view name, as used in OPTIONS responses and in the browsable API. """
func = self.settings.VIEW_NAME_FUNCTION return func(self.__class__, getattr(self, 'suffix', None))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_view_description(self, html=False): """ Return some descriptive text for the view, as used in OPTIONS responses and in the browsable API. """
func = self.settings.VIEW_DESCRIPTION_FUNCTION return func(self.__class__, html)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_format_suffix(self, **kwargs): """ Determine if the request includes a '.json' style format suffix """
if self.settings.FORMAT_SUFFIX_KWARG: return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_content_negotiator(self): """ Instantiate and return the content negotiation class to use. """
if not getattr(self, '_negotiator', None): self._negotiator = self.content_negotiation_class() return self._negotiator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perform_content_negotiation(self, request, force=False): """ Determine which renderer and media type to use render the response. """
renderers = self.get_renderers() conneg = self.get_content_negotiator() try: return conneg.select_renderer(request, renderers, self.format_kwarg) except Exception: if force: return (renderers[0], renderers[0].media_type) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_throttles(self, request): """ Check if request should be throttled. Raises an appropriate exception if the request is throttled. """
for throttle in self.get_throttles(): if not throttle.allow_request(request, self): self.throttled(request, throttle.wait())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize_request(self, request, *args, **kwargs): """ Returns the initial request object. """
parser_context = self.get_parser_context(request) return Request( request, parsers=self.get_parsers(), authenticators=self.get_authenticators(), negotiator=self.get_content_negotiator(), parser_context=parser_context )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initial(self, request, *args, **kwargs): """ Runs anything that needs to occur prior to calling the method handler. """
self.format_kwarg = self.get_format_suffix(**kwargs) # Ensure that the incoming request is permitted self.perform_authentication(request) self.check_permissions(request) self.check_throttles(request) # Perform content negotiation and store the accepted info on the request neg = self.perform_content_negotiation(request) request.accepted_renderer, request.accepted_media_type = neg # Determine the API version, if versioning is in use. version, scheme = self.determine_version(request, *args, **kwargs) request.version, request.versioning_scheme = version, scheme
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def finalize_response(self, request, response, *args, **kwargs): """ Returns the final response object. """
# Make the error obvious if a proper response is not returned assert isinstance(response, HttpResponseBase), ( 'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` ' 'to be returned from the view, but received a `%s`' % type(response) ) if isinstance(response, Response): if not getattr(request, 'accepted_renderer', None): neg = self.perform_content_negotiation(request, force=True) request.accepted_renderer, request.accepted_media_type = neg response.accepted_renderer = request.accepted_renderer response.accepted_media_type = request.accepted_media_type response.renderer_context = self.get_renderer_context() for key, value in self.headers.items(): response[key] = value return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_exception(self, exc): """ Handle any exception that occurs, by returning an appropriate response, or re-raising the error. """
if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): # WWW-Authenticate header for 401 responses, else coerce to 403 auth_header = self.get_authenticate_header(self.request) if auth_header: exc.auth_header = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN exception_handler = self.settings.EXCEPTION_HANDLER context = self.get_exception_handler_context() response = exception_handler(exc, context) if response is None: raise response.exception = True return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def options(self, request, *args, **kwargs): """ Handler method for HTTP 'OPTIONS' request. """
if self.metadata_class is None: return self.http_method_not_allowed(request, *args, **kwargs) data = self.metadata_class().determine_metadata(request, self) return Response(data, status=status.HTTP_200_OK)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_subkey(self,name): """Retreive the subkey with the specified name. If the named subkey is not found, AttributeError is raised; this is for consistency with the attribute-based access notation. """
subkey = Key(name,self) try: hkey = subkey.hkey except WindowsError: raise AttributeError("subkey '%s' does not exist" % (name,)) return subkey
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_subkey(self,name,value=None): """Create the named subkey and set its value. There are several different ways to specify the new contents of the named subkey: * if 'value' is the Key class, a subclass thereof, or None, then the subkey is created but not populated with any data. * if 'value' is a key instance, the data from that key will be copied into the new subkey. * if 'value' is a dictionary, the dict's keys are interpreted as key or value names and the corresponding entries are created within the new subkey - nested dicts create further subkeys, while scalar values create values on the subkey. * any other value will be converted to a Value object and assigned to the default value for the new subkey. """
self.sam |= KEY_CREATE_SUB_KEY subkey = Key(name,self) try: subkey = self.get_subkey(name) except AttributeError: _winreg.CreateKey(self.hkey,name) subkey = self.get_subkey(name) if value is None: pass elif issubclass(type(value),type) and issubclass(value,Key): pass elif isinstance(value,Key): for v in value.values(): subkey[v.name] = v for k in value.subkeys(): subkey.set_subkey(k.name,k) elif isinstance(value,dict): for (nm,val) in value.items(): if isinstance(val,dict): subkey.set_subkey(nm,val) elif isinstance(val,Key): subkey.set_subkey(nm,val) elif issubclass(type(val),type) and issubclass(val,Key): subkey.set_subkey(nm,val) else: subkey[nm] = val else: if not isinstance(value,Value): value = Value(value) subkey[value.name] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def del_subkey(self,name): """Delete the named subkey, and any values or keys it contains."""
self.sam |= KEY_WRITE subkey = self.get_subkey(name) subkey.clear() _winreg.DeleteKey(subkey.parent.hkey,subkey.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear(self): """Remove all subkeys and values from this key."""
self.sam |= KEY_WRITE for v in list(self.values()): del self[v.name] for k in list(self.subkeys()): self.del_subkey(k.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def publish(self): """ Iterate over the scheduler collections and apply any actions found """
try: for collection in self.settings.get("scheduler").get("collections"): yield self.publish_for_collection(collection) except Exception as ex: self.logger.error(ex)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_all_revisions_to_in_process(self, ids): """ Set all revisions found to in process, so that other threads will not pick them up. :param list ids: """
predicate = { "_id" : { "$in" : [ ObjectId(id) for id in ids ] } } set = {"$set": { "inProcess": True }} yield self.revisions.collection.update(predicate, set, multi=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __get_pending_revisions(self): """ Get all the pending revisions after the current time :return: A list of revisions :rtype: list """
dttime = time.mktime(datetime.datetime.now().timetuple()) changes = yield self.revisions.find({ "toa" : { "$lt" : dttime, }, "processed": False, "inProcess": None }) if len(changes) > 0: yield self.set_all_revisions_to_in_process([change.get("id") for change in changes]) raise Return(changes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def publish_for_collection(self, collection_name): """ Run the publishing operations for a given collection :param str collection_name: """
self.revisions = BaseAsyncMotorDocument("%s_revisions" % collection_name, self.settings) changes = yield self.__get_pending_revisions() if len(changes) > 0: self.logger.info("%s revisions will be actioned" % len(changes)) for change in changes: try: self.logger.info("Applying %s action %s - %s to document: %s/%s" % (change.get("action"), change.get("id"), change.get("meta",{}).get("comment", "No Comment"), change.get("collection"), change.get("master_id"))) stack = AsyncSchedulableDocumentRevisionStack( change.get("collection"), self.settings, master_id=change.get("master_id") ) revision = yield stack.pop() self.logger.debug(revision) except Exception as ex: self.logger.error(ex)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __update_action(self, revision): """Update a master document and revision history document :param dict revision: The revision dictionary """
patch = revision.get("patch") if patch.get("_id"): del patch["_id"] update_response = yield self.collection.patch(revision.get("master_id"), self.__make_storeable_patch_patchable(patch)) if update_response.get("n") == 0: raise RevisionNotFoundException()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __insert_action(self, revision): """ Handle the insert action type. Creates new document to be created in this collection. This allows you to stage a creation of an object :param dict revision: The revision dictionary """
revision["patch"]["_id"] = ObjectId(revision.get("master_id")) insert_response = yield self.collection.insert(revision.get("patch")) if not isinstance(insert_response, str): raise DocumentRevisionInsertFailed()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __delete_action(self, revision): """ Handle a delete action to a partiular master id via the revision. :param dict revision: :return: """
delete_response = yield self.collection.delete(revision.get("master_id")) if delete_response.get("n") == 0: raise DocumentRevisionDeleteFailed()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pop(self): """Pop the top revision off the stack back onto the collection at the given id. This method applies the action. Note: This assumes you don't have two revisions scheduled closer than a single scheduling cycle. """
revisions = yield self.list() if len(revisions) > 0: revision = revisions[0] # Update type action if revision.get("action") == self.UPDATE_ACTION: try: yield self.__update_action(revision) except Exception as ex: self.logger.error(ex) # Insert type update if revision.get("action") == self.INSERT_ACTION: try: yield self.__insert_action(revision) except Exception as ex: self.logger.error(ex) #Get the updated object for attachment to the snapshot snapshot_object = yield self.collection.find_one_by_id(revision.get("master_id")) #Handle delete action here if revision.get("action") == self.DELETE_ACTION: try: yield self.__delete_action(revision) except Exception as ex: self.logger.error(ex) snapshot_object = None #Update the revision to be in a post-process state including snapshot revision_update_response = yield self.revisions.patch(revision.get("id"), { "processed" : True, "snapshot" : snapshot_object, "inProcess": False } ) if revision_update_response.get("n") == 0: raise RevisionUpdateFailed(msg="revision document update failed") revision = yield self.revisions.find_one_by_id(revision.get("id")) #TODO: Make this callback method something that can be passed in. This was used in #the original implementation to send back to the client via websocket #revision_success.send('revision_success', type="RevisionSuccess", data=revision) raise Return(revision) raise Return(None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __make_patch_storeable(self, patch): """Replace all dots with pipes in key names, mongo doesn't like to store keys with dots. :param dict patch: The patch that needs to be made storeable and applied in the future """
new_patch = {} for key in patch: new_patch[key.replace(".", "|")] = patch[key] return new_patch
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def push(self, patch=None, toa=None, meta=None): """Push a change on to the revision stack for this ObjectId. Pushing onto the stack is how you get revisions to be staged and scheduled for some future time. :param dict patch: None Denotes Delete :param int toa: Time of action :param dict meta: The meta data for this action """
if not meta: meta = {} if not toa: toa = time.mktime(datetime.datetime.now().timetuple()) if not isinstance(toa, int): toa = int(toa) #Documents should be stored in bson formats if isinstance(patch, dict): patch = self.revisions._dictionary_to_cursor(patch) action = None if isinstance(patch, type(None)): action = self.DELETE_ACTION elif self.master_id and isinstance(patch, dict): action = self.UPDATE_ACTION patch = self.__make_patch_storeable(patch) yield self._lazy_migration(meta=copy.deepcopy(meta), toa=toa-1) elif not self.master_id and isinstance(patch, dict): #Scheduled inserts will not have an object ID and one should be generated action = self.INSERT_ACTION patch["_id"] = ObjectId() self.master_id = patch["_id"].__str__() elif not action: raise RevisionActionNotValid() # We shall never store the _id to a patch dictionary if patch and patch.get("_id"): del patch["_id"] change = { "toa": toa, "processed": False, "collection": self.collection_name, "master_id": self.master_id, "action": action, "patch" : None if action == self.DELETE_ACTION else self.collection._dictionary_to_cursor(patch), "meta": meta } jsonschema.validate(change, self.SCHEMA) id = yield self.revisions.insert(change) raise Return(id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list(self, toa=None, show_history=False): """Return all revisions for this stack :param int toa: The time of action as a UTC timestamp :param bool show_history: Whether to show historical revisions """
if not toa: toa = time.mktime(datetime.datetime.now().timetuple()) query = { "$query": { "master_id": self.master_id, "processed": show_history, "toa" : {"$lte" : toa} }, "$orderby": { "toa": 1 } } revisions = yield self.revisions.find(query) raise Return(revisions)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _lazy_migration(self, patch=None, meta=None, toa=None): """ Handle when a revision scheduling is turned onto a collection that was previously not scheduleable. This method will create the first revision for each object before its every used in the context of scheduling. :param dict patch: The patch that should be used :param dict meta: Meta data for this action :param int toa: The time of action :return: A legacy revision for a document that was previously :rtype: list """
objects = yield self.revisions.find({"master_id": self.master_id}, limit=1) if len(objects) > 0: raise Return(objects) if not patch: patch = yield self.collection.find_one_by_id(self.master_id) if not toa: toa = long(time.mktime(datetime.datetime.now().timetuple())) meta["comment"] = "This document was migrated automatically." if isinstance(patch, dict) and patch.get("id"): del patch["id"] if isinstance(patch, dict) and patch.get("_id"): del patch["_id"] #Here we separate patch and snapshot, and make sure that the snapshot looks like the master document snapshot = copy.deepcopy(patch) snapshot["id"] = self.master_id snapshot["published"] = self.settings.get("scheduler", {}).get("lazy_migrated_published_by_default", False) #If no objects are returned, this is some legacy object that needs a first revision #Create it here legacy_revision = { "toa": toa, "processed": True, "collection": self.collection_name, "master_id": self.master_id, "action": self.INSERT_ACTION, "patch": self.collection._dictionary_to_cursor(patch), "snapshot": snapshot, "meta": meta, } response = yield self.revisions.insert(legacy_revision) if isinstance(response, str): raise Return([legacy_revision]) raise Return(None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __create_preview_object_base(self, dct): """ The starting point for a preview of a future object. This is the object which will have future revisions iterated and applied to. :param dict dct: The starting object dictionary :return: The preview object id :rtype: str """
if dct.get("_id"): del dct["_id"] preview_object_id = yield self.previews.insert(dct) raise Return(preview_object_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preview(self, revision_id): """Get an ephemeral preview of a revision with all revisions applied between it and the current state :param str revision_id: The ID of the revision state you want to preview the master id at. :return: A snapshot of a future state of the object :rtype: dict """
target_revision = yield self.revisions.find_one_by_id(revision_id) if isinstance(target_revision.get("snapshot"), dict): raise Return(target_revision) preview_object = None if not isinstance(target_revision, dict): raise RevisionNotFound() revision_collection_client = BaseAsyncMotorDocument(target_revision.get("collection"), self.settings) self.master_id = target_revision.get("master_id") action = target_revision.get("action") if action == self.DELETE_ACTION: raise Return(preview_object) if action in [self.INSERT_ACTION, self.UPDATE_ACTION]: revisions = yield self.list(toa=target_revision.get("toa")) if len(revisions) == 0: raise NoRevisionsAvailable() first_revision = revisions[0] current_document = None if first_revision.get("action") == self.UPDATE_ACTION: current_document = yield revision_collection_client.find_one_by_id(target_revision.get("master_id")) elif first_revision.get("action") == self.INSERT_ACTION: # If we are doing an insert, the first revision patch is the current state current_document = first_revision.get("patch") if not current_document: raise RevisionNotFound() preview_id = yield self.__create_preview_object_base(current_document) for revision in revisions: patch = revision.get("patch") if patch.get("_id"): del patch["_id"] yield self.previews.patch(preview_id, self.__make_storeable_patch_patchable(patch)) preview_object = yield self.previews.find_one_by_id(preview_id) preview_object["id"] = target_revision["id"] target_revision["snapshot"] = self.collection._obj_cursor_to_dictionary(preview_object) target_revision["snapshot"]["id"] = target_revision["master_id"] # Delete the last preview yield self.previews.delete(preview_id) raise Return(target_revision)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(self, dct, toa=None, comment=""): """Create a document :param dict dct: :param toa toa: Optional time of action, triggers this to be handled as a future insert action for a new document :param str comment: A comment :rtype str: :returns string bson id: """
if self.schema: jsonschema.validate(dct, self.schema) bson_obj = yield self.collection.insert(dct) raise Return(bson_obj.__str__())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upsert(self, _id, dct, attribute="_id"): """Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected """
mongo_response = yield self.update(_id, dct, upsert=True, attribute=attribute) raise Return(mongo_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, predicate_value, dct, upsert=False, attribute="_id"): """Update an existing document :param predicate_value: The value of the predicate :param dict dct: The dictionary to update with :param bool upsert: Whether this is an upsert action :param str attribute: The attribute to query for to find the object to set this data ond :returns: JSON Mongo client response including the "n" key to show number of objects effected """
if self.schema: jsonschema.validate(dct, self.schema) if attribute=="_id" and not isinstance(predicate_value, ObjectId): predicate_value = ObjectId(predicate_value) predicate = {attribute: predicate_value} dct = self._dictionary_to_cursor(dct) mongo_response = yield self.collection.update(predicate, dct, upsert) raise Return(self._obj_cursor_to_dictionary(mongo_response))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, _id): """Delete a document or create a DELETE revision :param str _id: The ID of the document to be deleted :returns: JSON Mongo client response including the "n" key to show number of objects effected """
mongo_response = yield self.collection.remove({"_id": ObjectId(_id)}) raise Return(mongo_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_one(self, query): """Find one wrapper with conversion to dictionary :param dict query: A Mongo query """
mongo_response = yield self.collection.find_one(query) raise Return(self._obj_cursor_to_dictionary(mongo_response))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, query, orderby=None, order_by_direction=1, page=0, limit=0): """Find a document by any criteria :param dict query: The query to perform :param str orderby: The attribute to order results by :param int order_by_direction: 1 or -1 :param int page: The page to return :param int limit: Number of results per page :returns: A list of results :rtype: list """
cursor = self.collection.find(query) if orderby: cursor.sort(orderby, order_by_direction) cursor.skip(page*limit).limit(limit) results = [] while (yield cursor.fetch_next): results.append(self._obj_cursor_to_dictionary(cursor.next_object())) raise Return(results)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_one_by_id(self, _id): """ Find a single document by id :param str _id: BSON string repreentation of the Id :return: a signle object :rtype: dict """
document = (yield self.collection.find_one({"_id": ObjectId(_id)})) raise Return(self._obj_cursor_to_dictionary(document))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_index(self, index, index_type=GEO2D): """Create an index on a given attribute :param str index: Attribute to set index on :param str index_type: See PyMongo index types for further information, defaults to GEO2D index. """
self.logger.info("Adding %s index to stores on attribute: %s" % (index_type, index)) yield self.collection.create_index([(index, index_type)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def location_based_search(self, lng, lat, distance, unit="miles", attribute_map=None, page=0, limit=50): """Search based on location and other attribute filters :param long lng: Longitude parameter :param long lat: Latitude parameter :param int distance: The radius of the query :param str unit: The unit of measure for the query, defaults to miles :param dict attribute_map: Additional attributes to apply to the location bases query :param int page: The page to return :param int limit: Number of results per page :returns: List of objects :rtype: list """
#Determine what type of radian conversion you want base on a unit of measure if unit == "miles": distance = float(distance/69) else: distance = float(distance/111.045) #Start with geospatial query query = { "loc" : { "$within": { "$center" : [[lng, lat], distance]} } } #Allow querying additional attributes if attribute_map: query = dict(query.items() + attribute_map.items()) results = yield self.find(query, page=page, limit=limit) raise Return(self._list_cursor_to_json(results))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default(self, obj, **kwargs): """Handles the adapting of special types from mongo"""
if isinstance(obj, datetime.datetime): return time.mktime(obj.timetuple()) if isinstance(obj, Timestamp): return obj.time if isinstance(obj, ObjectId): return obj.__str__() return JSONEncoder.default(self, obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _tdec(code: str, unit: str = 'C') -> str: """ Translates a 4-digit decimal temperature representation Ex: 1045 -> -4.5°C 0237 -> 23.7°C """
ret = f"{'-' if code[0] == '1' else ''}{int(code[1:3])}.{code[3]}" if unit: ret += f'°{unit}' return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pressure_tendency(code: str, unit: str = 'mb') -> str: """ Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing """
width, precision = int(code[2:4]), code[4] return ('3-hour pressure difference: +/- ' f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(rmk: str) -> RemarksData: """ Finds temperature and dewpoint decimal values from the remarks """
rmkdata = {} for item in rmk.split(' '): if len(item) in [5, 9] and item[0] == 'T' and item[1:].isdigit(): rmkdata['temperature_decimal'] = core.make_number(_tdec(item[1:5], None)) # type: ignore rmkdata['dewpoint_decimal'] = core.make_number(_tdec(item[5:], None)) # type: ignore return RemarksData(**rmkdata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def translate(remarks: str) -> typing.Dict[str, str]: # noqa """ Translates elements in the remarks string """
ret = {} # Add and replace static multi-word elements for key in REMARKS_GROUPS: if key in remarks: ret[key.strip()] = REMARKS_GROUPS[key] remarks.replace(key, ' ') # For each remaining element for rmk in remarks.split()[1:]: rlen = len(rmk) # Static single-word elements if rmk in REMARKS_ELEMENTS: ret[rmk] = REMARKS_ELEMENTS[rmk] # Digit-only encoded elements elif rmk.isdigit(): if rlen == 5 and rmk[0] in LEN5_DECODE: rmk_ = LEN5_DECODE[rmk[0]](rmk) # type: ignore ret[rmk] = rmk_ # 24-hour min/max temperature elif rlen == 9: ret[rmk] = f'24-hour temperature: max {_tdec(rmk[1:5])} min {_tdec(rmk[5:])}' # Sea level pressure: SLP218 elif rmk.startswith('SLP'): ret[rmk] = f'Sea level pressure: 10{rmk[3:5]}.{rmk[5]} hPa' # Temp/Dew with decimal: T02220183 elif rlen == 9 and rmk[0] == 'T' and rmk[1:].isdigit(): ret[rmk] = f'Temperature {_tdec(rmk[1:5])} and dewpoint {_tdec(rmk[5:])}' # Precipitation amount: P0123 elif rlen == 5 and rmk[0] == 'P' and rmk[1:].isdigit(): ret[rmk] = f'Hourly precipitation: {int(rmk[1:3])}.{rmk[3:]} in.' # Weather began/ended elif rlen == 5 and rmk[2] in ('B', 'E') and rmk[3:].isdigit() and rmk[:2] in WX_TRANSLATIONS: state = 'began' if rmk[2] == 'B' else 'ended' ret[rmk] = f'{WX_TRANSLATIONS[rmk[:2]]} {state} at :{rmk[3:]}' return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_row(self): """ Parses a row, cell-by-cell, returning a dict of field names to the cleaned field values. """
fields = self.mapping for i, cell in enumerate(self.row[0:len(fields)]): field_name, field_type = fields[str(i)] parsed_cell = self.clean_cell(cell, field_type) self.parsed_row[field_name] = parsed_cell
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_mappings(self): """ Uses CSV files of field names and positions for different filing types to load mappings into memory, for use in parsing different types of rows. """
self.mappings = {} for record_type in ('sa', 'sb', 'F8872'): path = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname(__file__))), 'mappings', '{}.csv'.format(record_type)) mapping = {} with open(path, 'r') as csvfile: reader = csv.DictReader(csvfile) for row in reader: mapping[row['position']] = ( row['model_name'], row['field_type']) self.mappings[record_type] = mapping
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff(full, dataset_uri, reference_dataset_uri): """Report the difference between two datasets. 1. Checks that the identifiers are identicial 2. Checks that the sizes are identical 3. Checks that the hashes are identical, if the '--full' option is used If a differences is detected in step 1, steps 2 and 3 will not be carried out. Similarly if a difference is detected in step 2, step 3 will not be carried out. When checking that the hashes are identical the hashes for the first dataset are recalculated using the hashing algorithm of the reference dataset. """
def echo_header(desc, ds_name, ref_ds_name, prop): click.secho("Different {}".format(desc), fg="red") click.secho("ID, {} in '{}', {} in '{}'".format( prop, ds_name, prop, ref_ds_name)) def echo_diff(diff): for d in diff: line = "{}, {}, {}".format(d[0], d[1], d[2]) click.secho(line) ds = dtoolcore.DataSet.from_uri(dataset_uri) ref_ds = dtoolcore.DataSet.from_uri(reference_dataset_uri) num_items = len(list(ref_ds.identifiers)) ids_diff = diff_identifiers(ds, ref_ds) if len(ids_diff) > 0: echo_header("identifiers", ds.name, ref_ds.name, "present") echo_diff(ids_diff) sys.exit(1) with click.progressbar(length=num_items, label="Comparing sizes") as progressbar: sizes_diff = diff_sizes(ds, ref_ds, progressbar) if len(sizes_diff) > 0: echo_header("sizes", ds.name, ref_ds.name, "size") echo_diff(sizes_diff) sys.exit(2) if full: with click.progressbar(length=num_items, label="Comparing hashes") as progressbar: content_diff = diff_content(ds, ref_ds, progressbar) if len(content_diff) > 0: echo_header("content", ds.name, ref_ds.name, "hash") echo_diff(content_diff) sys.exit(3)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def identifiers(dataset_uri): """List the item identifiers in the dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri) for i in dataset.identifiers: click.secho(i)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def summary(dataset_uri, format): """Report summary information about a dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri) creator_username = dataset._admin_metadata["creator_username"] frozen_at = dataset._admin_metadata["frozen_at"] num_items = len(dataset.identifiers) tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) if format == "json": json_lines = [ '{', ' "name": "{}",'.format(dataset.name), ' "uuid": "{}",'.format(dataset.uuid), ' "creator_username": "{}",'.format(creator_username), ' "number_of_items": {},'.format(num_items), ' "size_in_bytes": {},'.format(tot_size), ' "frozen_at": {}'.format(frozen_at), '}', ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False) else: info = [ ("name", dataset.name), ("uuid", dataset.uuid), ("creator_username", creator_username), ("number_of_items", str(num_items)), ("size", sizeof_fmt(tot_size).strip()), ("frozen_at", date_fmt(frozen_at)), ] for key, value in info: click.secho("{}: ".format(key), nl=False) click.secho(value, fg="green")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def properties(dataset_uri, item_identifier): """Report item properties."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: props = dataset.item_properties(item_identifier) except KeyError: click.secho( "No such item in dataset: {}".format(item_identifier), fg="red", err=True ) sys.exit(20) json_lines = [ '{', ' "relpath": "{}",'.format(props["relpath"]), ' "size_in_bytes": {},'.format(props["size_in_bytes"]), ' "utc_timestamp": {},'.format(props["utc_timestamp"]), ' "hash": "{}"'.format(props["hash"]), '}', ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def relpath(dataset_uri, item_identifier): """Return relpath associated with the item. """
dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: props = dataset.item_properties(item_identifier) except KeyError: click.secho( "No such item in dataset: {}".format(item_identifier), fg="red", err=True ) sys.exit(21) click.secho(props["relpath"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify(full, dataset_uri): """Verify the integrity of a dataset. """
dataset = dtoolcore.DataSet.from_uri(dataset_uri) all_okay = True generated_manifest = dataset.generate_manifest() generated_identifiers = set(generated_manifest["items"].keys()) manifest_identifiers = set(dataset.identifiers) for i in generated_identifiers.difference(manifest_identifiers): message = "Unknown item: {} {}".format( i, generated_manifest["items"][i]["relpath"] ) click.secho(message, fg="red") all_okay = False for i in manifest_identifiers.difference(generated_identifiers): message = "Missing item: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False for i in manifest_identifiers.intersection(generated_identifiers): generated_hash = generated_manifest["items"][i]["size_in_bytes"] manifest_hash = dataset.item_properties(i)["size_in_bytes"] if generated_hash != manifest_hash: message = "Altered item size: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False if full: for i in manifest_identifiers.intersection(generated_identifiers): generated_hash = generated_manifest["items"][i]["hash"] manifest_hash = dataset.item_properties(i)["hash"] if generated_hash != manifest_hash: message = "Altered item hash: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False if not all_okay: sys.exit(1) else: click.secho("All good :)", fg="green")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def uuid(dataset_uri): """Return the UUID of the dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri) click.secho(dataset.uuid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split_data(self): """Splits the list of SeqRecordExpanded objects into lists, which are kept into a bigger list. If the file_format is Nexus, then it is only partitioned by gene. If it is FASTA, then it needs partitioning by codon positions if required. Example: """
this_gene_code = None for seq_record in self.data.seq_records: if this_gene_code is None or this_gene_code != seq_record.gene_code: this_gene_code = seq_record.gene_code self._blocks.append([]) list_length = len(self._blocks) self._blocks[list_length - 1].append(seq_record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_to_string(self, block): """Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code. Override this function if the dataset block needs to be different due to file format. This block will need to be split further if the dataset is FASTA or TNT and the partitioning scheme is 1st-2nd, 3rd. As the dataset is split into several blocks due to 1st-2nd, 3rd we cannot translate to aminoacids or degenerate the sequences. """
if self.partitioning != '1st-2nd, 3rd': return self.make_datablock_by_gene(block) else: if self.format == 'FASTA': return self.make_datablock_considering_codon_positions_as_fasta_format(block) else: return self.make_datablock_by_gene(block)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd): """Takes into account whether we need to output all codon positions."""
out = "" # We need 1st and 2nd positions if self.codon_positions in ['ALL', '1st-2nd']: for gene_code, seqs in block_1st2nd.items(): out += '>{0}_1st-2nd\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '1st': for gene_code, seqs in block_1st.items(): out += '>{0}_1st\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '2nd': for gene_code, seqs in block_2nd.items(): out += '>{0}_2nd\n----\n'.format(gene_code) for seq in seqs: out += seq # We also need 3rd positions if self.codon_positions in ['ALL', '3rd']: for gene_code, seqs in block_3rd.items(): out += '\n>{0}_3rd\n----\n'.format(gene_code) for seq in seqs: out += seq return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_charsets(self): """ Override this function for Phylip dataset as the content is different and goes into a separate file. """
count_start = 1 out = '' for gene_code, lengths in self.data.gene_codes_and_lengths.items(): count_end = lengths[0] + count_start - 1 out += self.format_charset_line(gene_code, count_start, count_end) count_start = count_end + 1 return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_slash_number(self): """ Charset lines have \2 or \3 depending on type of partitioning and codon positions requested for our dataset. :return: """
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd': return '\\2' elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]: return '\\3' else: return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_suffixes_to_gene_codes(self): """Appends pos1, pos2, etc to the gene_code if needed."""
out = [] for gene_code in self.data.gene_codes: for sufix in self.make_gene_code_suffixes(): out.append('{0}{1}'.format(gene_code, sufix)) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_outgroup(self): """Generates the outgroup line from the voucher code specified by the user. """
if self.outgroup is not None: outgroup_taxonomy = '' for i in self.data.seq_records: if self.outgroup == i.voucher_code: outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'], i.taxonomy['species']) break outgroup = '\noutgroup {0}_{1};'.format(self.outgroup, outgroup_taxonomy) else: outgroup = '' return outgroup
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_string(input_str) -> 'MissionTime': # noinspection SpellCheckingInspection """ Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance """
match = RE_INPUT_STRING.match(input_str) if not match: raise ValueError(f'badly formatted date/time: {input_str}') return MissionTime( datetime.datetime( int(match.group('year')), int(match.group('month')), int(match.group('day')), int(match.group('hour')), int(match.group('minute')), int(match.group('second')), ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run(self, output): '''Generate the report to the given output. :param output: writable file-like object or file path ''' # Ensure folder exists. if self.folder_id not in self.folders.folders(self.user): print("E: folder not found: %s" % self.folder_name, file=sys.stderr) return # Create workbook. wb = self.workbook = xlsxwriter.Workbook(output) # Create the different styles used by this report generator. self.formats['title'] = wb.add_format({'font_size': '18', 'bold': True}) self.formats['default'] = wb.add_format({'align': 'top'}) self.formats['bold'] = wb.add_format({'bold': True}) self.formats['header'] = wb.add_format({ 'bold': True, 'align': 'center', 'valign': 'top', 'font_size': '14', 'font_color': '#506050', 'bg_color': '#f5f5f5', 'right': 1, 'border_color': 'white'}) self.formats['pre'] = wb.add_format({'font_name': 'Courier', 'valign': 'top'}) self.formats['link'] = wb.add_format({'valign': 'top', 'font_color': 'blue', 'underline': True}) self.formats['type_text'] = wb.add_format({ 'font_color': '#BF8645', 'valign': 'top', 'align': 'center'}) self.formats['type_image'] = wb.add_format({ 'font_color': '#84BF45', 'valign': 'top', 'align': 'center'}) # Generate report for a specific subfolder or *all* subfolders of # self.folder . if self.subfolder_id is None: self._generate_report_all() else: self._generate_report_single(self.subfolder_id) # done and outta here self.workbook.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _generate_report_all(self): '''Generate report for all subfolders contained by self.folder_id.''' assert self.workbook is not None count = 0 # Do all subfolders for sid in self.folders.subfolders(self.folder_id, self.user): count += 1 self._generate_for_subfolder(sid) if count == 0: print("I: empty workbook created: no subfolders found")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _generate_report_single(self, sid): '''Generate report for subfolder given by sid . The main purpose of this method is to make sure the subfolder given by sid does indeed exist. All real work is delegated to _generate_for_subfolder. :param sid: The subfolder id Private method. ''' assert self.workbook is not None assert sid is not None # Ensure subfolder exists if not sid in self.folders.subfolders(self.folder_id, self.user): subfolder = Folders.id_to_name(sid) print("E: subfolder not found: %s" % subfolder, file=sys.stderr) return self._generate_for_subfolder(sid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _generate_for_subfolder(self, sid): ''' Generate report for a subfolder. :param sid: The subfolder id; assumed valid ''' # TODO: the following assumes subfolder names can be constructed from a # subfolder id, which might not be the case in the future. name = self._sanitise_sheetname(uni(Folders.id_to_name(sid))) ws = self.workbook.add_worksheet(name) fmt = self.formats ws.write("A1", "Dossier report", fmt['title']) ws.write("A2", "%s | %s" % (uni(self.folder_name), name)) # Column dimensions ws.set_column('A:A', 37) ws.set_column('B:B', 37) ws.set_column('C:C', 37) ws.set_column('D:D', 8) ws.set_column('E:E', 30) ws.set_column('F:F', 37) # Header ws.write("A4", "Id", fmt['header']) ws.write("B4", "URL", fmt['header']) ws.write("C4", "Subtopic Id", fmt['header']) ws.write("D4", "Type", fmt['header']) ws.write("E4", "Content", fmt['header']) ws.write("F4", "Image URL", fmt['header']) # TODO: we probably want to wrap the following in a try-catch block, in # case the call to folders.subtopics fails. row = 4 for i in subtopics(self.store, self.folders, self.folder_id, sid, self.user): Item.construct(self, i).generate_to(ws, row) row += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def construct(generator, subtopic): '''Method constructor of Item-derived classes. Given a subtopic tuple, this method attempts to construct an Item-derived class, currently either ItemText or ItemImage, from the subtopic's type, found in its 4th element. :param generator: Reference to the owning ReportGenerator instance :param subtopic: Tuple containing content_id, meta_url, subtopic_id, type and type-specific data. :returns An instantiated Item-derived class. ''' type = subtopic[3] if type not in Item.constructors: raise LookupError(type) # perhaps customise this exception? return Item.constructors[type](generator, subtopic)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def resize_image(self, data): '''Resize image if height over 50 pixels and convert to JPEG. Given a ByteIO or StringIO data input, this method ensures that the image is not over 50 pixels high. If it is over 50 pixels high, the image is resized to precisely 50 pixels in height and the width is adjusted accordingly in keeping with the width/height ratio. The image is always converted to JPEG to minimize any potentials issues while embedding the image in the Excel workbook. :param data: ByteIO or StringIO stream containing image data :returns Reference to a BytesIO instance containing resized image data. ''' image = Image.open(data) stream_out = BytesIO() width, height = image.size[:] if height > 50: width = int(width * 50 / height) height = 50 image = image.resize((width, 50)) image.save(stream_out, format="JPEG", quality=100) stream_out.seek(0) return stream_out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def desc(self): """ A textual description of this course """
if 'ects' in self: fmt = '%s (%s, S%d) [%s, %.2f ECTS]' fields = ('title', 'code', 'semester', 'status', 'ects') else: fmt = '%s' fields = ('title',) s = fmt % tuple([self[f] for f in fields]) if self['followed'] and self['session']: res = self['result'] if self.get('jury', 0) > 0: res = self['jury'] s += ' --> %.2f/20 (%s)' % (res, self['session']) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _populate(self, soup): """ Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object. """
tables = soup.select('table[rules=all]') if not tables: return trs = tables[0].select('tr')[1:] if len(trs[0]) == 5: # M1 self._populate_small_table(trs) else: # M2 self._populate_large_table(trs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_host(self): """ Gets the host name or IP address. :return: the host name or IP address. """
host = self.get_as_nullable_string("host") host = host if host != None else self.get_as_nullable_string("ip") return host
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query_icao(icao: str): """ Queries AWC for the METAR of a given station Args: icao: station ID as a four letters-digits ICAO code Returns: AWC result for the station """
params = { 'dataSource': 'metars', 'requestType': 'retrieve', 'format': 'csv', 'hoursBeforeNow': 24, } AWC._validate_icao(icao) params['stationString'] = icao try: return AWC._query(params) except RequestsConnectionError: raise AWCRequestFailed('failed to obtain requested data from AWC')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, asset_content, friendly_name, tags='', optimize=False): """ Create an asset on the server You must provide the asset with a friendly name for the server to generate a path from. """
return self._create_asset({ 'asset': b64encode(asset_content), 'friendly-name': friendly_name, 'tags': tags, 'optimize': optimize, 'type': 'base64' })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_at_path(self, asset_content, url_path, tags=''): """ Create asset at a specific URL path on the server """
return self._create_asset({ 'asset': b64encode(asset_content), 'url-path': url_path, 'tags': tags, 'type': 'base64' })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _matrix_integration(q, h, t): ''' Returns the dp metric for a single horsetail curve at a given value of the epistemic uncertainties''' N = len(q) # correction if CDF has gone out of trapezium range if h[-1] < 0.9: h[-1] = 1.0 W = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) dp = (q - t).T.dot(W).dot(q - t) return dp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _matrix_grad(q, h, h_dx, t, t_prime): ''' Returns the gradient with respect to a single variable''' N = len(q) W = np.zeros([N, N]) Wprime = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) Wprime[i, i] = \ 0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)]) tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)]) grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \ + (q - t).T.dot(Wprime).dot(q - t) return grad
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def evalMetric(self, x, method=None): '''Evaluates the horsetail matching metric at given values of the design variables. :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :param str method: method to use to evaluate the metric ('empirical' or 'kernel') :return: metric_value - value of the metric evaluated at the design point given by x :rtype: float *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u1 = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> x0 = [1, 2] >>> theHM.evalMetric(x0) ''' # Make sure dimensions are correct # u_sample_dimensions = self._processDimensions() if self.verbose: print('----------') print('At design: ' + str(x)) q_samples, grad_samples = self.evalSamples(x) if self.verbose: print('Evaluating metric') return self.evalMetricFromSamples(q_samples, grad_samples, method)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None): '''Evaluates the horsetail matching metric from given samples of the quantity of interest and gradient instead of evaluating them at a design. :param np.ndarray q_samples: samples of the quantity of interest, size (M_int, M_prob) :param np.ndarray grad_samples: samples of the gradien, size (M_int, M_prob, n_x) :return: metric_value - value of the metric :rtype: float ''' # Make sure dimensions are correct # u_sample_dimensions = self._processDimensions() q_samples = np.array(q_samples) if not (q_samples.shape[0] == self.samples_int and q_samples.shape[1] == self.samples_prob): raise ValueError('Shape of q_samples should be [M_int, M_prob]') if grad_samples is not None: grad_samples = np.array(grad_samples) if not (grad_samples.shape[0] == self.samples_int and grad_samples.shape[1] == self.samples_prob): raise ValueError('''Shape of grad_samples should be [M_int, M_prob, n_dv]''') if method is None: method = self.method if method.lower() == 'empirical': return self._evalMetricEmpirical(q_samples, grad_samples) elif method.lower() == 'kernel': return self._evalMetricKernel(q_samples, grad_samples) else: raise ValueError('Unsupported metric evalation method')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getHorsetail(self): '''Function that gets vectors of the horsetail plot at the last design evaluated. :return: upper_curve, lower_curve, CDFs - returns three parameters, the first two are tuples containing pairs of x/y vectors of the upper and lower bounds on the CDFs (the horsetail plot). The third parameter is a list of x/y tuples for individual CDFs propagated at each sampled value of the interval uncertainties *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail() >>> matplotlib.pyplot(x1, y1, 'b') >>> matplotlib.pyplot(x2, y2, 'b') >>> for (x, y) in CDFs: ... matplotlib.pyplot(x, y, 'k:') >>> matplotlib.pyplot.show() ''' if hasattr(self, '_ql'): ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu qh, hh = self._qh, self._hh if self._qis is not None: ql, hl = _appendPlotArrays(ql, hl, self._qis) qu, hu = _appendPlotArrays(qu, hu, self._qis) CDFs = [] for qi, hi in zip(qh, hh): CDFs.append((qi, hi)) upper_target = [self._ftarg_u(h) for h in hu] upper_curve = (qu, hu, upper_target) lower_target = [self._ftarg_l(h) for h in hl] lower_curve = (ql, hl, lower_target) return upper_curve, lower_curve, CDFs else: raise ValueError('''The metric has not been evaluated at any design point so the horsetail does not exist''')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_queryset(self): ''' If MultiTenantMiddleware is used, filter queryset by request.site_id ''' queryset = super(PageList, self).get_queryset() if hasattr(self.request, 'site_id'): queryset = queryset.filter(site_id=self.request.site_id) return queryset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]: """ Retrieves a TAF string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str """
url = _BASE_TAF_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain TAF for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: """ Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str """
url = _BASE_METAR_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain METAR for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value(self, units=None): """Return the pressure in the specified units."""
if units is None: return self._value if not units.upper() in CustomPressure.legal_units: raise UnitsError("unrecognized pressure unit: '" + units + "'") units = units.upper() if units == self._units: return self._value if self._units == "IN": mb_value = self._value * 33.86398 elif self._units == "MM": mb_value = self._value * 1.3332239 else: mb_value = self._value if units in ("MB", "HPA"): return mb_value if units == "IN": return mb_value / 33.86398 if units == "MM": return mb_value / 1.3332239 raise UnitsError("unrecognized pressure unit: '" + units + "'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def string(self, units: typing.Optional[str] = None) -> str: """Return a string representation of the pressure, using the given units."""
if not units: _units: str = self._units else: if not units.upper() in CustomPressure.legal_units: raise UnitsError("unrecognized pressure unit: '" + units + "'") _units = units.upper() val = self.value(units) if _units == "MB": return "%.0f mb" % val if _units == "HPA": return "%.0f hPa" % val if _units == "IN": return "%.2f inches" % val if _units == "MM": return "%.0f mmHg" % val raise ValueError(_units)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def determine_name(func): """ Given a function, returns the name of the function. Ex:: from random import choice determine_name(choice) # Returns 'choice' :param func: The callable :type func: function :returns: Name string """
if hasattr(func, '__name__'): return func.__name__ elif hasattr(func, '__class__'): return func.__class__.__name__ # This shouldn't be possible, but blow up if so. raise AttributeError("Provided callable '{}' has no name.".format( func ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_module(module_name): """ Given a dotted Python path, imports & returns the module. If not found, raises ``UnknownModuleError``. Ex:: mod = import_module('random') :param module_name: The dotted Python path :type module_name: string :returns: module """
try: return importlib.import_module(module_name) except ImportError as err: raise UnknownModuleError(str(err))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_attr(module_name, attr_name): """ Given a dotted Python path & an attribute name, imports the module & returns the attribute. If not found, raises ``UnknownCallableError``. Ex:: choice = import_attr('random', 'choice') :param module_name: The dotted Python path :type module_name: string :param attr_name: The attribute name :type attr_name: string :returns: attribute """
module = import_module(module_name) try: return getattr(module, attr_name) except AttributeError as err: raise UnknownCallableError(str(err))