text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_decorated_with_property_setter(node): """Check if the function is decorated as a property setter. :param node: The node to check. :type node: astroid.nodes.FunctionDef :returns: True if the function is a property setter, False otherwise. :rtype: bool """
if not node.decorators: return False for decorator in node.decorators.nodes: if ( isinstance(decorator, astroid.nodes.Attribute) and decorator.attrname == "setter" ): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_constructor(node): """Check if the function is a constructor. :param node: The node to check. :type node: astroid.nodes.FunctionDef :returns: True if the function is a contructor, False otherwise. :rtype: bool """
return ( node.parent and isinstance(node.parent.scope(), astroid.nodes.ClassDef) and node.name == "__init__" )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_exception(node): """Check if a class is an exception. :param node: The node to check. :type node: astroid.nodes.ClassDef :returns: True if the class is an exception, False otherwise. :rtype: bool """
if ( node.name in ("Exception", "BaseException") and node.root().name == _EXCEPTIONS_MODULE ): return True if not hasattr(node, "ancestors"): return False return any(is_exception(parent) for parent in node.ancestors(recurs=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_local_import_from(node, package_name): """Check if a node is an import from the local package. :param node: The node to check. :type node: astroid.node.NodeNG :param package_name: The name of the local package. :type package_name: str :returns: True if the node is an import from the local package, False otherwise. :rtype: bool """
if not isinstance(node, astroid.ImportFrom): return False return ( node.level or node.modname == package_name or node.modname.startswith(package_name + ".") )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_autoapi(app): """ Load AutoAPI data from the filesystem. """
if not app.config.autoapi_dirs: raise ExtensionError("You must configure an autoapi_dirs setting") # Make sure the paths are full normalized_dirs = [] autoapi_dirs = app.config.autoapi_dirs if isinstance(autoapi_dirs, str): autoapi_dirs = [autoapi_dirs] for path in autoapi_dirs: if os.path.isabs(path): normalized_dirs.append(path) else: normalized_dirs.append(os.path.normpath(os.path.join(app.confdir, path))) for _dir in normalized_dirs: if not os.path.exists(_dir): raise ExtensionError( "AutoAPI Directory `{dir}` not found. " "Please check your `autoapi_dirs` setting.".format(dir=_dir) ) normalized_root = os.path.normpath( os.path.join(app.confdir, app.config.autoapi_root) ) url_root = os.path.join("/", app.config.autoapi_root) sphinx_mapper = default_backend_mapping[app.config.autoapi_type] sphinx_mapper_obj = sphinx_mapper( app, template_dir=app.config.autoapi_template_dir, url_root=url_root ) app.env.autoapi_mapper = sphinx_mapper_obj if app.config.autoapi_file_patterns: file_patterns = app.config.autoapi_file_patterns else: file_patterns = default_file_mapping.get(app.config.autoapi_type, []) if app.config.autoapi_ignore: ignore_patterns = app.config.autoapi_ignore else: ignore_patterns = default_ignore_patterns.get(app.config.autoapi_type, []) if ".rst" in app.config.source_suffix: out_suffix = ".rst" elif ".txt" in app.config.source_suffix: out_suffix = ".txt" else: # Fallback to first suffix listed out_suffix = app.config.source_suffix[0] # Actual meat of the run. LOGGER.info(bold("[AutoAPI] ") + darkgreen("Loading Data")) sphinx_mapper_obj.load( patterns=file_patterns, dirs=normalized_dirs, ignore=ignore_patterns ) LOGGER.info(bold("[AutoAPI] ") + darkgreen("Mapping Data")) sphinx_mapper_obj.map(options=app.config.autoapi_options) if app.config.autoapi_generate_api_docs: LOGGER.info(bold("[AutoAPI] ") + darkgreen("Rendering Data")) sphinx_mapper_obj.output_rst(root=normalized_root, source_suffix=out_suffix)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def doctree_read(app, doctree): """ Inject AutoAPI into the TOC Tree dynamically. """
if app.env.docname == "index": all_docs = set() insert = True nodes = doctree.traverse(toctree) toc_entry = "%s/index" % app.config.autoapi_root add_entry = ( nodes and app.config.autoapi_generate_api_docs and app.config.autoapi_add_toctree_entry ) if not add_entry: return # Capture all existing toctree entries for node in nodes: for entry in node["entries"]: all_docs.add(entry[1]) # Don't insert autoapi it's already present for doc in all_docs: if doc.find(app.config.autoapi_root) != -1: insert = False if insert and app.config.autoapi_add_toctree_entry: # Insert AutoAPI index nodes[-1]["entries"].append((None, u"%s/index" % app.config.autoapi_root)) nodes[-1]["includefiles"].append(u"%s/index" % app.config.autoapi_root) message_prefix = bold("[AutoAPI] ") message = darkgreen( "Adding AutoAPI TOCTree [{0}] to index.rst".format(toc_entry) ) LOGGER.info(message_prefix + message)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _expand_wildcard_placeholder(original_module, originals_map, placeholder): """Expand a wildcard placeholder to a sequence of named placeholders. :param original_module: The data dictionary of the module that the placeholder is imported from. :type original_module: dict :param originals_map: A map of the names of children under the module to their data dictionaries. :type originals_map: dict(str, dict) :param placeholder: The wildcard placeholder to expand. :type placeholder: dict :returns: The placeholders that the wildcard placeholder represents. :rtype: list(dict) """
originals = originals_map.values() if original_module["all"] is not None: originals = [] for name in original_module["all"]: if name == "__all__": continue if name not in originals_map: msg = "Invalid __all__ entry {0} in {1}".format( name, original_module["name"] ) LOGGER.warning(msg) continue originals.append(originals_map[name]) placeholders = [] for original in originals: new_full_name = placeholder["full_name"].replace("*", original["name"]) new_original_path = placeholder["original_path"].replace("*", original["name"]) if "original_path" in original: new_original_path = original["original_path"] new_placeholder = dict( placeholder, name=original["name"], full_name=new_full_name, original_path=new_original_path, ) placeholders.append(new_placeholder) return placeholders
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_module_placeholders(modules, module_name, visit_path, resolved): """Resolve all placeholder children under a module. :param modules: A mapping of module names to their data dictionary. Placeholders are resolved in place. :type modules: dict(str, dict) :param module_name: The name of the module to resolve. :type module_name: str :param visit_path: An ordered set of visited module names. :type visited: collections.OrderedDict :param resolved: A set of already resolved module names. :type resolved: set(str) """
if module_name in resolved: return visit_path[module_name] = True module, children = modules[module_name] for child in list(children.values()): if child["type"] != "placeholder": continue if child["original_path"] in modules: module["children"].remove(child) children.pop(child["name"]) continue imported_from, original_name = child["original_path"].rsplit(".", 1) if imported_from in visit_path: msg = "Cannot resolve cyclic import: {0}, {1}".format( ", ".join(visit_path), imported_from ) LOGGER.warning(msg) module["children"].remove(child) children.pop(child["name"]) continue if imported_from not in modules: msg = "Cannot resolve import of unknown module {0} in {1}".format( imported_from, module_name ) LOGGER.warning(msg) module["children"].remove(child) children.pop(child["name"]) continue _resolve_module_placeholders(modules, imported_from, visit_path, resolved) if original_name == "*": original_module, originals_map = modules[imported_from] # Replace the wildcard placeholder # with a list of named placeholders. new_placeholders = _expand_wildcard_placeholder( original_module, originals_map, child ) child_index = module["children"].index(child) module["children"][child_index : child_index + 1] = new_placeholders children.pop(child["name"]) for new_placeholder in new_placeholders: if new_placeholder["name"] not in children: children[new_placeholder["name"]] = new_placeholder original = originals_map[new_placeholder["name"]] _resolve_placeholder(new_placeholder, original) elif original_name not in modules[imported_from][1]: msg = "Cannot resolve import of {0} in {1}".format( child["original_path"], module_name ) LOGGER.warning(msg) module["children"].remove(child) children.pop(child["name"]) continue else: original = modules[imported_from][1][original_name] _resolve_placeholder(child, original) del visit_path[module_name] resolved.add(module_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_placeholder(placeholder, original): """Resolve a placeholder to the given original object. :param placeholder: The placeholder to resolve, in place. :type placeholder: dict :param original: The object that the placeholder represents. :type original: dict """
new = copy.deepcopy(original) # The name remains the same. new["name"] = placeholder["name"] new["full_name"] = placeholder["full_name"] # Record where the placeholder originally came from. new["original_path"] = original["full_name"] # The source lines for this placeholder do not exist in this file. # The keys might not exist if original is a resolved placeholder. new.pop("from_line_no", None) new.pop("to_line_no", None) # Resolve the children stack = list(new.get("children", ())) while stack: child = stack.pop() # Relocate the child to the new location assert child["full_name"].startswith(original["full_name"]) suffix = child["full_name"][len(original["full_name"]) :] child["full_name"] = new["full_name"] + suffix # The source lines for this placeholder do not exist in this file. # The keys might not exist if original is a resolved placeholder. child.pop("from_line_no", None) child.pop("to_line_no", None) # Resolve the remaining children stack.extend(child.get("children", ())) placeholder.clear() placeholder.update(new)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, patterns, dirs, ignore=None): """Load objects from the filesystem into the ``paths`` dictionary Also include an attribute on the object, ``relative_path`` which is the shortened, relative path the package/module """
for dir_ in dirs: dir_root = dir_ if os.path.exists(os.path.join(dir_, "__init__.py")): dir_root = os.path.abspath(os.path.join(dir_, os.pardir)) for path in self.find_files(patterns=patterns, dirs=[dir_], ignore=ignore): data = self.read_file(path=path) if data: data["relative_path"] = os.path.relpath(path, dir_root) self.paths[path] = data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_placeholders(self): """Resolve objects that have been imported from elsewhere."""
modules = {} for module in self.paths.values(): children = {child["name"]: child for child in module["children"]} modules[module["name"]] = (module, children) resolved = set() for module_name in modules: visit_path = collections.OrderedDict() _resolve_module_placeholders(modules, module_name, visit_path, resolved)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_class(self, data, options=None, **kwargs): """Create a class from the passed in data :param data: dictionary data of parser output """
obj_map = dict( (cls.type, cls) for cls in [ PythonClass, PythonFunction, PythonModule, PythonMethod, PythonPackage, PythonAttribute, PythonData, PythonException, ] ) try: cls = obj_map[data["type"]] except KeyError: LOGGER.warning("Unknown type: %s" % data["type"]) else: obj = cls( data, class_content=self.app.config.autoapi_python_class_content, options=self.app.config.autoapi_options, jinja_env=self.jinja_env, url_root=self.url_root, **kwargs ) lines = sphinx.util.docstrings.prepare_docstring(obj.docstring) if lines and "autodoc-process-docstring" in self.app.events.events: self.app.emit( "autodoc-process-docstring", cls.type, obj.name, None, # object None, # options lines, ) obj.docstring = "\n".join(lines) for child_data in data.get("children", []): for child_obj in self.create_class( child_data, options=options, **kwargs ): obj.children.append(child_obj) yield obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_toc_node(docname, anchor="anchor", text="test text", bullet=False): """ Create the node structure that Sphinx expects for TOC Tree entries. The ``bullet`` argument wraps it in a ``nodes.bullet_list``, which is how you nest TOC Tree entries. """
reference = nodes.reference( "", "", internal=True, refuri=docname, anchorname="#" + anchor, *[nodes.Text(text, text)] ) para = addnodes.compact_paragraph("", "", reference) ret_list = nodes.list_item("", para) return nodes.bullet_list("", ret_list) if bullet else ret_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _traverse_parent(node, objtypes): """ Traverse up the node's parents until you hit the ``objtypes`` referenced. Can either be a single type, or a tuple of types. """
curr_node = node.parent while curr_node is not None: if isinstance(curr_node, objtypes): return curr_node curr_node = curr_node.parent return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _find_toc_node(toc, ref_id, objtype): """ Find the actual TOC node for a ref_id. Depends on the object type: * Section - First section (refuri) or 2nd+ level section (anchorname) * Desc - Just use the anchor name """
for check_node in toc.traverse(nodes.reference): if objtype == nodes.section and ( check_node.attributes["refuri"] == ref_id or check_node.attributes["anchorname"] == "#" + ref_id ): return check_node if ( objtype == addnodes.desc and check_node.attributes["anchorname"] == "#" + ref_id ): return check_node return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_toc_reference(app, node, toc, docname): """ Logic that understands maps a specific node to it's part of the toctree. It takes a specific incoming ``node``, and returns the actual TOC Tree node that is said reference. """
if isinstance(node, nodes.section) and isinstance(node.parent, nodes.document): # Top Level Section header ref_id = docname toc_reference = _find_toc_node(toc, ref_id, nodes.section) elif isinstance(node, nodes.section): # Nested Section header ref_id = node.attributes["ids"][0] toc_reference = _find_toc_node(toc, ref_id, nodes.section) else: # Desc node try: ref_id = node.children[0].attributes["ids"][0] toc_reference = _find_toc_node(toc, ref_id, addnodes.desc) except (KeyError, IndexError) as e: LOGGER.warning("Invalid desc node: %s" % e) toc_reference = None return toc_reference
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_domain_to_toctree(app, doctree, docname): """ Add domain objects to the toctree dynamically. This should be attached to the ``doctree-resolved`` event. This works by: * Finding each domain node (addnodes.desc) * Figuring out it's parent that will be in the toctree (nodes.section, or a previously added addnodes.desc) * Finding that parent in the TOC Tree based on it's ID * Taking that element in the TOC Tree, and finding it's parent that is a TOC Listing (nodes.bullet_list) * Adding the new TOC element for our specific node as a child of that nodes.bullet_list * This checks that bullet_list's last child, and checks that it is also a nodes.bullet_list, effectively nesting it under that element """
toc = app.env.tocs[docname] for desc_node in doctree.traverse(addnodes.desc): try: ref_id = desc_node.children[0].attributes["ids"][0] except (KeyError, IndexError) as e: LOGGER.warning("Invalid desc node: %s" % e) continue try: # Python domain object ref_text = desc_node[0].attributes["fullname"].split(".")[-1].split("(")[0] except (KeyError, IndexError): # TODO[eric]: Support other Domains and ways of accessing this data # Use `astext` for other types of domain objects ref_text = desc_node[0].astext().split(".")[-1].split("(")[0] # This is the actual object that will exist in the TOC Tree # Sections by default, and other Desc nodes that we've previously placed. parent_node = _traverse_parent( node=desc_node, objtypes=(addnodes.desc, nodes.section) ) if parent_node: toc_reference = _get_toc_reference(app, parent_node, toc, docname) if toc_reference: # Get the last child of our parent's bullet list, this is where "we" live. toc_insertion_point = _traverse_parent( toc_reference, nodes.bullet_list )[-1] # Ensure we're added another bullet list so that we nest inside the parent, # not next to it if toc_insertion_point and isinstance( toc_insertion_point[0], nodes.bullet_list ): new_insert = toc_insertion_point[0] to_add = _build_toc_node(docname, anchor=ref_id, text=ref_text) new_insert.append(to_add) else: to_add = _build_toc_node( docname, anchor=ref_id, text=ref_text, bullet=True ) toc_insertion_point.append(to_add)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def warn(self, msg): """Add a warning message. :param msg: The warning message to add. :type msg: str """
self.warnings.append( self.state.document.reporter.warning(msg, line=self.lineno) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_names(self): """Get the names of the objects to include in the table. :returns: The names of the objects to include. :rtype: generator(str) """
for line in self.content: line = line.strip() if line and re.search("^[a-zA-Z0-9]", line): yield line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def humanize_duration(duration): """ Returns a humanized string representing time difference For example: 2 days 1 hour 25 minutes 10 seconds """
days = duration.days hours = int(duration.seconds / 3600) minutes = int(duration.seconds % 3600 / 60) seconds = int(duration.seconds % 3600 % 60) parts = [] if days > 0: parts.append(u'%s %s' % (days, pluralize(days, _('day,days')))) if hours > 0: parts.append(u'%s %s' % (hours, pluralize(hours, _('hour,hours')))) if minutes > 0: parts.append(u'%s %s' % (minutes, pluralize(minutes, _('minute,minutes')))) if seconds > 0: parts.append(u'%s %s' % (seconds, pluralize(seconds, _('second,seconds')))) return ', '.join(parts) if len(parts) != 0 else _('< 1 second')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_cron_with_cache_check(cron_class, force=False, silent=False): """ Checks the cache and runs the cron or not. @cron_class - cron class to run. @force - run job even if not scheduled @silent - suppress notifications """
with CronJobManager(cron_class, silent) as manager: manager.run(force)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_old_log_entries(): """ Removes older log entries, if the appropriate setting has been set """
if hasattr(settings, 'DJANGO_CRON_DELETE_LOGS_OLDER_THAN'): delta = timedelta(days=settings.DJANGO_CRON_DELETE_LOGS_OLDER_THAN) CronJobLog.objects.filter(end_time__lt=get_current_time() - delta).delete()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def should_run_now(self, force=False): from django_cron.models import CronJobLog cron_job = self.cron_job """ Returns a boolean determining whether this cron should run now or not! """
self.user_time = None self.previously_ran_successful_cron = None # If we pass --force options, we force cron run if force: return True if cron_job.schedule.run_every_mins is not None: # We check last job - success or not last_job = None try: last_job = CronJobLog.objects.filter(code=cron_job.code).latest('start_time') except CronJobLog.DoesNotExist: pass if last_job: if not last_job.is_success and cron_job.schedule.retry_after_failure_mins: if get_current_time() > last_job.start_time + timedelta(minutes=cron_job.schedule.retry_after_failure_mins): return True else: return False try: self.previously_ran_successful_cron = CronJobLog.objects.filter( code=cron_job.code, is_success=True, ran_at_time__isnull=True ).latest('start_time') except CronJobLog.DoesNotExist: pass if self.previously_ran_successful_cron: if get_current_time() > self.previously_ran_successful_cron.start_time + timedelta(minutes=cron_job.schedule.run_every_mins): return True else: return True if cron_job.schedule.run_at_times: for time_data in cron_job.schedule.run_at_times: user_time = time.strptime(time_data, "%H:%M") now = get_current_time() actual_time = time.strptime("%s:%s" % (now.hour, now.minute), "%H:%M") if actual_time >= user_time: qset = CronJobLog.objects.filter( code=cron_job.code, ran_at_time=time_data, is_success=True ).filter( Q(start_time__gt=now) | Q(end_time__gte=now.replace(hour=0, minute=0, second=0, microsecond=0)) ) if not qset: self.user_time = time_data return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lock(self): """ This method sets a cache variable to mark current job as "already running". """
if self.cache.get(self.lock_name): return False else: self.cache.set(self.lock_name, timezone.now(), self.timeout) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_invitation_model(): """ Returns the Invitation model that is active in this project. """
path = app_settings.INVITATION_MODEL try: return django_apps.get_model(path) except ValueError: raise ImproperlyConfigured( "path must be of the form 'app_label.model_name'" ) except LookupError: raise ImproperlyConfigured( "path refers to model '%s' that\ has not been installed" % app_settings.INVITATION_MODEL )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def list(self, **params) -> Mapping: """ List of images """
response = await self.docker._query_json("images/json", "GET", params=params) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def inspect(self, name: str) -> Mapping: """ Return low-level information about an image Args: name: name of the image """
response = await self.docker._query_json("images/{name}/json".format(name=name)) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def pull( self, from_image: str, *, auth: Optional[Union[MutableMapping, str, bytes]] = None, tag: str = None, repo: str = None, stream: bool = False ) -> Mapping: """ Similar to `docker pull`, pull an image locally Args: fromImage: name of the image to pull repo: repository name given to an image when it is imported tag: if empty when pulling an image all tags for the given image to be pulled auth: special {'auth': base64} pull private repo """
image = from_image # TODO: clean up params = {"fromImage": image} headers = {} if repo: params["repo"] = repo if tag: params["tag"] = tag if auth is not None: registry, has_registry_host, _ = image.partition("/") if not has_registry_host: raise ValueError( "Image should have registry host " "when auth information is provided" ) # TODO: assert registry == repo? headers["X-Registry-Auth"] = compose_auth_header(auth, registry) response = await self.docker._query( "images/create", "POST", params=params, headers=headers ) return await json_stream_result(response, stream=stream)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def tag(self, name: str, repo: str, *, tag: str = None) -> bool: """ Tag the given image so that it becomes part of a repository. Args: repo: the repository to tag in tag: the name for the new tag """
params = {"repo": repo} if tag: params["tag"] = tag await self.docker._query( "images/{name}/tag".format(name=name), "POST", params=params, headers={"content-type": "application/json"}, ) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def delete( self, name: str, *, force: bool = False, noprune: bool = False ) -> List: """ Remove an image along with any untagged parent images that were referenced by that image Args: name: name/id of the image to delete force: remove the image even if it is being used by stopped containers or has other tags noprune: don't delete untagged parent images Returns: List of deleted images """
params = {"force": force, "noprune": noprune} response = await self.docker._query_json( "images/{name}".format(name=name), "DELETE", params=params ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def build( self, *, remote: str = None, fileobj: BinaryIO = None, path_dockerfile: str = None, tag: str = None, quiet: bool = False, nocache: bool = False, buildargs: Mapping = None, pull: bool = False, rm: bool = True, forcerm: bool = False, labels: Mapping = None, stream: bool = False, encoding: str = None ) -> Mapping: """ Build an image given a remote Dockerfile or a file object with a Dockerfile inside Args: path_dockerfile: path within the build context to the Dockerfile remote: a Git repository URI or HTTP/HTTPS context URI quiet: suppress verbose build output nocache: do not use the cache when building the image rm: remove intermediate containers after a successful build pull: downloads any updates to the FROM image in Dockerfiles encoding: set `Content-Encoding` for the file object your send forcerm: always remove intermediate containers, even upon failure labels: arbitrary key/value labels to set on the image fileobj: a tar archive compressed or not """
local_context = None headers = {} params = { "t": tag, "rm": rm, "q": quiet, "pull": pull, "remote": remote, "nocache": nocache, "forcerm": forcerm, "dockerfile": path_dockerfile, } if remote is None and fileobj is None: raise ValueError("You need to specify either remote or fileobj") if fileobj and remote: raise ValueError("You cannot specify both fileobj and remote") if fileobj and not encoding: raise ValueError("You need to specify an encoding") if remote is None and fileobj is None: raise ValueError("Either remote or fileobj needs to be provided.") if fileobj: local_context = fileobj.read() headers["content-type"] = "application/x-tar" if fileobj and encoding: headers["Content-Encoding"] = encoding if buildargs: params.update({"buildargs": json.dumps(buildargs)}) if labels: params.update({"labels": json.dumps(labels)}) response = await self.docker._query( "build", "POST", params=clean_map(params), headers=headers, data=local_context, ) return await json_stream_result(response, stream=stream)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def export_image(self, name: str): """ Get a tarball of an image by name or id. Args: name: name/id of the image to be exported Returns: Streamreader of tarball image """
response = await self.docker._query( "images/{name}/get".format(name=name), "GET" ) return response.content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def import_image(self, data, stream: bool = False): """ Import tarball of image to docker. Args: data: tarball data of image to be imported Returns: Tarball of the image """
headers = {"Content-Type": "application/x-tar"} response = await self.docker._query_chunked_post( "images/load", "POST", data=data, headers=headers ) return await json_stream_result(response, stream=stream)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def parse_result(response, response_type=None, *, encoding="utf-8"): """ Convert the response to native objects by the given response type or the auto-detected HTTP content-type. It also ensures release of the response object. """
if response_type is None: ct = response.headers.get("content-type") if ct is None: cl = response.headers.get("content-length") if cl is None or cl == "0": return "" raise TypeError( "Cannot auto-detect response type " "due to missing Content-Type header." ) main_type, sub_type, extras = parse_content_type(ct) if sub_type == "json": response_type = "json" elif sub_type == "x-tar": response_type = "tar" elif (main_type, sub_type) == ("text", "plain"): response_type = "text" encoding = extras.get("charset", encoding) else: raise TypeError("Unrecognized response type: {ct}".format(ct=ct)) if "tar" == response_type: what = await response.read() return tarfile.open(mode="r", fileobj=BytesIO(what)) if "json" == response_type: data = await response.json(encoding=encoding) elif "text" == response_type: data = await response.text(encoding=encoding) else: data = await response.read() return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_map(obj: Mapping[Any, Any]) -> Mapping[Any, Any]: """ Return a new copied dictionary without the keys with ``None`` values from the given Mapping object. """
return {k: v for k, v in obj.items() if v is not None}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_networks(networks: Iterable[str] = None) -> Optional[Iterable[str]]: """ Cleans the values inside `networks` Returns a new list """
if not networks: return networks if not isinstance(networks, list): raise TypeError("networks parameter must be a list.") result = [] for n in networks: if isinstance(n, str): n = {"Target": n} result.append(n) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def inspect(self, task_id: str) -> Mapping[str, Any]: """ Return info about a task Args: task_id: is ID of the task """
response = await self.docker._query_json( "tasks/{task_id}".format(task_id=task_id), method="GET" ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def run(self, config, *, name=None): """ Create and start a container. If container.start() will raise an error the exception will contain a `container_id` attribute with the id of the container. """
try: container = await self.create(config, name=name) except DockerError as err: # image not find, try pull it if err.status == 404 and "Image" in config: await self.docker.pull(config["Image"]) container = await self.create(config, name=name) else: raise err try: await container.start() except DockerError as err: raise DockerContainerError( err.status, {"message": err.message}, container["id"] ) return container
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscribe(self, *, create_task=True, **params): """Subscribes to the Docker events channel. Use the keyword argument create_task=False to prevent automatically spawning the background tasks that listen to the events. This function returns a ChannelSubscriber object. """
if create_task and not self.task: self.task = asyncio.ensure_future(self.run(**params)) return self.channel.subscribe()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def run(self, **params): """ Query the events endpoint of the Docker daemon. Publish messages inside the asyncio queue. """
if self.json_stream: warnings.warn("already running", RuntimeWarning, stackelevel=2) return forced_params = {"stream": True} params = ChainMap(forced_params, params) try: # timeout has to be set to 0, None is not passed # Otherwise after 5 minutes the client # will close the connection # http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession.request response = await self.docker._query( "events", method="GET", params=params, timeout=0 ) self.json_stream = await json_stream_result( response, self._transform_event, human_bool(params["stream"]) ) try: async for data in self.json_stream: await self.channel.publish(data) finally: if self.json_stream is not None: await self.json_stream._close() self.json_stream = None finally: # signal termination to subscribers await self.channel.publish(None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def inspect(self, *, node_id: str) -> Mapping[str, Any]: """ Inspect a node Args: node_id: The ID or name of the node """
response = await self.docker._query_json( "nodes/{node_id}".format(node_id=node_id), method="GET" ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def update( self, *, node_id: str, version: int, spec: Mapping[str, Any] ) -> Mapping[str, Any]: """ Update the spec of a node. Args: node_id: The ID or name of the node version: version number of the node being updated spec: fields to be updated """
params = {"version": version} if "Role" in spec: assert spec["Role"] in {"worker", "manager"} if "Availability" in spec: assert spec["Availability"] in {"active", "pause", "drain"} response = await self.docker._query_json( "nodes/{node_id}/update".format(node_id=node_id), method="POST", params=params, data=spec, ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def remove(self, *, node_id: str, force: bool = False) -> Mapping[str, Any]: """ Remove a node from a swarm. Args: node_id: The ID or name of the node """
params = {"force": force} response = await self.docker._query_json( "nodes/{node_id}".format(node_id=node_id), method="DELETE", params=params ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def _query( self, path, method="GET", *, params=None, data=None, headers=None, timeout=None, chunked=None ): """ Get the response object by performing the HTTP request. The caller is responsible to finalize the response object. """
url = self._canonicalize_url(path) if headers and "content-type" not in headers: headers["content-type"] = "application/json" try: response = await self.session.request( method, url, params=httpize(params), headers=headers, data=data, timeout=timeout, chunked=chunked, ) except asyncio.TimeoutError: raise if (response.status // 100) in [4, 5]: what = await response.read() content_type = response.headers.get("content-type", "") response.close() if content_type == "application/json": raise DockerError(response.status, json.loads(what.decode("utf8"))) else: raise DockerError(response.status, {"message": what.decode("utf8")}) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def _query_chunked_post( self, path, method="POST", *, params=None, data=None, headers=None, timeout=None ): """ A shorthand for uploading data by chunks """
if headers is None: headers = {} if headers and "content-type" not in headers: headers["content-type"] = "application/octet-stream" response = await self._query( path, method, params=params, data=data, headers=headers, timeout=timeout, chunked=True, ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def init( self, *, advertise_addr: str = None, listen_addr: str = "0.0.0.0:2377", force_new_cluster: bool = False, swarm_spec: Mapping = None ) -> str: """ Initialize a new swarm. Args: ListenAddr: listen address used for inter-manager communication AdvertiseAddr: address advertised to other nodes. ForceNewCluster: Force creation of a new swarm. SwarmSpec: User modifiable swarm configuration. Returns: id of the swarm node """
data = { "AdvertiseAddr": advertise_addr, "ListenAddr": listen_addr, "ForceNewCluster": force_new_cluster, "Spec": swarm_spec, } response = await self.docker._query_json("swarm/init", method="POST", data=data) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def join( self, *, remote_addrs: Iterable[str], listen_addr: str = "0.0.0.0:2377", join_token: str, advertise_addr: str = None, data_path_addr: str = None ) -> bool: """ Join a swarm. Args: listen_addr Used for inter-manager communication advertise_addr Externally reachable address advertised to other nodes. data_path_addr Address or interface to use for data path traffic. remote_addrs Addresses of manager nodes already participating in the swarm. join_token Secret token for joining this swarm. """
data = { "RemoteAddrs": list(remote_addrs), "JoinToken": join_token, "ListenAddr": listen_addr, "AdvertiseAddr": advertise_addr, "DataPathAddr": data_path_addr, } await self.docker._query("swarm/join", method="POST", data=clean_map(data)) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def list(self, *, filters: Mapping = None) -> List[Mapping]: """ Return a list of services Args: filters: a dict with a list of filters Available filters: id=<service id> label=<service label> mode=["replicated"|"global"] name=<service name> """
params = {"filters": clean_filters(filters)} response = await self.docker._query_json( "services", method="GET", params=params ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def update( self, service_id: str, version: str, *, image: str = None, rollback: bool = False ) -> bool: """ Update a service. If rollback is True image will be ignored. Args: service_id: ID or name of the service. version: Version of the service that you want to update. rollback: Rollback the service to the previous service spec. Returns: True if successful. """
if image is None and rollback is False: raise ValueError("You need to specify an image.") inspect_service = await self.inspect(service_id) spec = inspect_service["Spec"] if image is not None: spec["TaskTemplate"]["ContainerSpec"]["Image"] = image params = {"version": version} if rollback is True: params["rollback"] = "previous" data = json.dumps(clean_map(spec)) await self.docker._query_json( "services/{service_id}/update".format(service_id=service_id), method="POST", data=data, params=params, ) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def delete(self, service_id: str) -> bool: """ Remove a service Args: service_id: ID or name of the service Returns: True if successful """
await self.docker._query( "services/{service_id}".format(service_id=service_id), method="DELETE" ) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def inspect(self, service_id: str) -> Mapping[str, Any]: """ Inspect a service Args: service_id: ID or name of the service Returns: a dict with info about a service """
response = await self.docker._query_json( "services/{service_id}".format(service_id=service_id), method="GET" ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def logs( self, service_id: str, *, details: bool = False, follow: bool = False, stdout: bool = False, stderr: bool = False, since: int = 0, timestamps: bool = False, is_tty: bool = False, tail: str = "all" ) -> Union[str, AsyncIterator[str]]: """ Retrieve logs of the given service Args: details: show service context and extra details provided to logs follow: return the logs as a stream. stdout: return logs from stdout stderr: return logs from stderr since: return logs since this time, as a UNIX timestamp timestamps: add timestamps to every log line is_tty: the service has a pseudo-TTY allocated tail: only return this number of log lines from the end of the logs, specify as an integer or `all` to output all log lines. """
if stdout is False and stderr is False: raise TypeError("Need one of stdout or stderr") params = { "details": details, "follow": follow, "stdout": stdout, "stderr": stderr, "since": since, "timestamps": timestamps, "tail": tail, } response = await self.docker._query( "services/{service_id}/logs".format(service_id=service_id), method="GET", params=params, ) return await multiplexed_result(response, follow, is_tty=is_tty)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def store_many_vectors(self, hash_name, bucket_keys, vs, data): """ Store a batch of vectors. Stores vector and JSON-serializable data in bucket with specified key. """
if data is None: data = itertools.repeat(data) for v, k, d in zip(vs, bucket_keys, data): self.store_vector(hash_name, k, v, d)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collect_all_bucket_keys(self): """ Just collects all buckets keys from subtree """
if len(self.childs) == 0: # This is a leaf so just return the bucket key (we reached the bucket leaf) #print 'Returning (collect) leaf bucket key %s with %d vectors' % (self.bucket_key, self.vector_count) return [self.bucket_key] # Not leaf, return results of childs result = [] for child in self.childs.values(): result = result + child.collect_all_bucket_keys() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bucket_keys_to_guarantee_result_set_size(self, bucket_key, N, tree_depth): """ Returns list of bucket keys based on the specified bucket key and minimum result size N. """
if tree_depth == len(bucket_key): #print 'Returning leaf bucket key %s with %d vectors' % (self.bucket_key, self.vector_count) # This is a leaf so just return the bucket key (we reached the bucket leaf) return [self.bucket_key] # If not leaf, this is a subtree node. hash_char = bucket_key[tree_depth] if hash_char == '0': other_hash_char = '1' else: other_hash_char = '0' # Check if child has enough results if hash_char in self.childs: if self.childs[hash_char].vector_count < N: # If not combine buckets of both child subtrees listA = self.childs[hash_char].collect_all_bucket_keys() listB = self.childs[other_hash_char].collect_all_bucket_keys() return listA + listB else: # Child subtree has enough results, so call method on child return self.childs[hash_char].bucket_keys_to_guarantee_result_set_size(bucket_key, N, tree_depth+1) else: # That subtree is not existing, so just follow the other side return self.childs[other_hash_char].bucket_keys_to_guarantee_result_set_size(bucket_key, N, tree_depth+1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def store_vector(self, hash_name, bucket_key, v, data): """ Stores vector and JSON-serializable data in MongoDB with specified key. """
mongo_key = self._format_mongo_key(hash_name, bucket_key) val_dict = {} val_dict['lsh'] = mongo_key # Depending on type (sparse or not) fill value dict if scipy.sparse.issparse(v): # Make sure that we are using COO format (easy to handle) if not scipy.sparse.isspmatrix_coo(v): v = scipy.sparse.coo_matrix(v) # Construct list of [index, value] items, # one for each non-zero element of the sparse vector encoded_values = [] for k in range(v.data.size): row_index = v.row[k] value = v.data[k] encoded_values.append([int(row_index), value]) val_dict['sparse'] = 1 val_dict['nonzeros'] = encoded_values val_dict['dim'] = v.shape[0] else: # Make sure it is a 1d vector v = numpy.reshape(v, v.shape[0]) val_dict['vector'] = v.tostring() val_dict['dtype'] = v.dtype.name # Add data if set if data is not None: val_dict['data'] = data # Push JSON representation of dict to end of bucket list self.mongo_object.insert_one(val_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def numpy_array_from_list_or_numpy_array(vectors): """ Returns numpy array representation of argument. Argument maybe numpy array (input is returned) or a list of numpy vectors. """
# If vectors is not a numpy matrix, create one if not isinstance(vectors, numpy.ndarray): V = numpy.zeros((vectors[0].shape[0], len(vectors))) for index in range(len(vectors)): vector = vectors[index] V[:, index] = vector return V return vectors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unitvec(vec): """ Scale a vector to unit length. The only exception is the zero vector, which is returned back unchanged. """
if scipy.sparse.issparse(vec): # convert scipy.sparse to standard numpy array vec = vec.tocsr() veclen = numpy.sqrt(numpy.sum(vec.data ** 2)) if veclen > 0.0: return vec / veclen else: return vec if isinstance(vec, numpy.ndarray): vec = numpy.asarray(vec, dtype=float) veclen = numpy.linalg.norm(vec) if veclen > 0.0: return vec / veclen else: return vec
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perform_pca(A): """ Computes eigenvalues and eigenvectors of covariance matrix of A. The rows of a correspond to observations, the columns to variables. """
# First subtract the mean M = (A-numpy.mean(A.T, axis=1)).T # Get eigenvectors and values of covariance matrix return numpy.linalg.eig(numpy.cov(M))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def permute(self, ba): """ Permute the bitarray ba inplace. """
c = ba.copy() for i in xrange(len(self.mapping)): ba[i] = c[self.mapping[i]] return ba
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def store_vector(self, v, data=None): """ Hashes vector v and stores it in all matching buckets in the storage. The data argument must be JSON-serializable. It is stored with the vector and will be returned in search results. """
# We will store the normalized vector (used during retrieval) nv = unitvec(v) # Store vector in each bucket of all hashes for lshash in self.lshashes: for bucket_key in lshash.hash_vector(v): #print 'Storying in bucket %s one vector' % bucket_key self.storage.store_vector(lshash.hash_name, bucket_key, nv, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def store_many_vectors(self, vs, data=None): """ Store a batch of vectors. Hashes vector vs and stores them in all matching buckets in the storage. The data argument must be either None or a list of JSON-serializable object. It is stored with the vector and will be returned in search results. """
# We will store the normalized vector (used during retrieval) nvs = [unitvec(i) for i in vs] # Store vector in each bucket of all hashes for lshash in self.lshashes: bucket_keys = [lshash.hash_vector(i)[0] for i in vs] self.storage.store_many_vectors(lshash.hash_name, bucket_keys, nvs, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_candidates(self, v): """ Collect candidates from all buckets from all hashes """
candidates = [] for lshash in self.lshashes: for bucket_key in lshash.hash_vector(v, querying=True): bucket_content = self.storage.get_bucket( lshash.hash_name, bucket_key, ) #print 'Bucket %s size %d' % (bucket_key, len(bucket_content)) candidates.extend(bucket_content) return candidates
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _apply_filter(self, filters, candidates): """ Apply vector filters if specified and return filtered list """
if filters: filter_input = candidates for fetch_vector_filter in filters: filter_input = fetch_vector_filter.filter_vectors(filter_input) return filter_input else: return candidates
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _append_distances(self, v, distance, candidates): """ Apply distance implementation if specified """
if distance: # Normalize vector (stored vectors are normalized) nv = unitvec(v) candidates = [(x[0], x[1], self.distance.distance(x[0], nv)) for x in candidates] return candidates
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perform_experiment(self, engine_list): """ Performs nearest neighbour recall experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (recall, precision, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. """
# We will fill this array with measures for all the engines. result = [] # For each engine, first index vectors and then retrieve neighbours for endine_idx, engine in enumerate(engine_list): print('Engine %d / %d' % (endine_idx, len(engine_list))) # Clean storage engine.clean_all_buckets() # Use this to compute average recall avg_recall = 0.0 # Use this to compute average precision avg_precision = 0.0 # Use this to compute average search time avg_search_time = 0.0 # Index all vectors and store them for index, v in enumerate(self.vectors): engine.store_vector(v, 'data_%d' % index) # Look for N nearest neighbours for query vectors for index in self.query_indices: # Get indices of the real nearest as set real_nearest = set(self.closest[index]) # We have to time the search search_time_start = time.time() # Get nearest N according to engine nearest = engine.neighbours(self.vectors[index]) # Get search time search_time = time.time() - search_time_start # For comparance we need their indices (as set) nearest = set([self.__index_of_vector(x[0]) for x in nearest]) # Remove query index from search result to make sure that # recall and precision make sense in terms of "neighbours". # If ONLY the query vector is retrieved, we want recall to be # zero! nearest.remove(index) # If the result list is empty, recall and precision are 0.0 if len(nearest) == 0: recall = 0.0 precision = 0.0 else: # Get intersection count inter_count = float(len(real_nearest & nearest)) # Normalize recall for this vector recall = inter_count/float(len(real_nearest)) # Normalize precision for this vector precision = inter_count/float(len(nearest)) # Add to accumulator avg_recall += recall # Add to accumulator avg_precision += precision # Add to accumulator avg_search_time += search_time # Normalize recall over query set avg_recall /= float(len(self.query_indices)) # Normalize precision over query set avg_precision /= float(len(self.query_indices)) # Normalize search time over query set avg_search_time = avg_search_time / float(len(self.query_indices)) # Normalize search time with respect to exact search avg_search_time /= self.exact_search_time_per_vector print(' recall=%f, precision=%f, time=%f' % (avg_recall, avg_precision, avg_search_time)) result.append((avg_recall, avg_precision, avg_search_time)) # Return (recall, precision, search_time) tuple return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __vector_to_string(self, vector): """ Returns string representation of vector. """
return numpy.array_str(numpy.round(unitvec(vector), decimals=3))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distance(self, x, y): """ Computes the Manhattan distance between vectors x and y. Returns float. """
if scipy.sparse.issparse(x): return numpy.sum(numpy.absolute((x-y).toarray().ravel())) else: return numpy.sum(numpy.absolute(x-y))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def store_many_vectors(self, hash_name, bucket_keys, vs, data): """ Store a batch of vectors in Redis. Stores vector and JSON-serializable data in bucket with specified key. """
with self.redis_object.pipeline() as pipeline: if data is None: data = [None] * len(vs) for bucket_key, data, v in zip(bucket_keys, data, vs): self._add_vector(hash_name, bucket_key, v, data, pipeline) pipeline.execute()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _add_vector(self, hash_name, bucket_key, v, data, redis_object): ''' Store vector and JSON-serializable data in bucket with specified key. ''' redis_key = self._format_redis_key(hash_name, bucket_key) val_dict = {} # Depending on type (sparse or not) fill value dict if scipy.sparse.issparse(v): # Make sure that we are using COO format (easy to handle) if not scipy.sparse.isspmatrix_coo(v): v = scipy.sparse.coo_matrix(v) # Construct list of [index, value] items, # one for each non-zero element of the sparse vector encoded_values = [] for k in range(v.data.size): row_index = v.row[k] value = v.data[k] encoded_values.append([int(row_index), value]) val_dict['sparse'] = 1 val_dict['nonzeros'] = encoded_values val_dict['dim'] = v.shape[0] else: # Make sure it is a 1d vector v = numpy.reshape(v, v.shape[0]) val_dict['vector'] = v.tostring() val_dict['dtype'] = v.dtype.name # Add data if set if data is not None: val_dict['data'] = data # Push JSON representation of dict to end of bucket list self.redis_object.rpush(redis_key, pickle.dumps(val_dict, protocol=2))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_buckets(self, hash_name): """ Removes all buckets and their content for specified hash. """
bucket_keys = self._iter_bucket_keys(hash_name) self.redis_object.delete(*bucket_keys)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_all_buckets(self): """ Removes all buckets from all hashes and their content. """
bucket_keys = self.redis_object.keys(pattern='nearpy_*') if len(bucket_keys) > 0: self.redis_object.delete(*bucket_keys)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_child_hash(self, child_hash): """ Adds specified child hash. The hash must be one of the binary types. """
# Hash must generate binary keys if not (isinstance(child_hash,PCABinaryProjections) or isinstance(child_hash,RandomBinaryProjections) or isinstance(child_hash,RandomBinaryProjectionTree)): raise ValueError('Child hashes must generate binary keys') # Add both hash and config to array of child hashes. Also we are going to # accumulate used bucket keys for every hash in order to build the permuted index self.child_hashes.append(child_hash)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perform_experiment(self, engine_list): """ Performs nearest neighbour experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (distance_ratio, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. """
# We will fill this array with measures for all the engines. result = [] # For each engine, first index vectors and then retrieve neighbours for engine in engine_list: print('Engine %d / %d' % (engine_list.index(engine), len(engine_list))) # Clean storage engine.clean_all_buckets() # Use this to compute average distance_ratio avg_distance_ratio = 0.0 # Use this to compute average result set size avg_result_size = 0.0 # Use this to compute average search time avg_search_time = 0.0 # Index all vectors and store them for index in range(self.vectors.shape[1]): engine.store_vector(self.vectors[:, index], 'data_%d' % index) # Look for N nearest neighbours for query vectors for index in self.query_indices: # We have to time the search search_time_start = time.time() # Get nearest N according to engine nearest = engine.neighbours(self.vectors[:, index]) # Get search time search_time = time.time() - search_time_start # Get average distance ratio (with respect to radius # of real N closest neighbours) distance_ratio = 0.0 for n in nearest: # If the vector is outside the real neighbour radius if n[2] > self.nearest_radius[index]: # Compute distance to real neighbour radius d = (n[2] - self.nearest_radius[index]) # And normalize it. 1.0 means: distance to # real neighbour radius is identical to radius d /= self.nearest_radius[index] # If all neighbours are in the radius, the # distance ratio is 0.0 distance_ratio += d # Normalize distance ratio over all neighbours distance_ratio /= len(nearest) # Add to accumulator avg_distance_ratio += distance_ratio # Add to accumulator avg_result_size += len(nearest) # Add to accumulator avg_search_time += search_time # Normalize distance ratio over query set avg_distance_ratio /= float(len(self.query_indices)) # Normalize avg result size avg_result_size /= float(len(self.query_indices)) # Normalize search time over query set avg_search_time = avg_search_time / float(len(self.query_indices)) # Normalize search time with respect to exact search avg_search_time /= self.exact_search_time_per_vector print(' distance_ratio=%f, result_size=%f, time=%f' % (avg_distance_ratio, avg_result_size, avg_search_time)) result.append((avg_distance_ratio, avg_result_size, avg_search_time)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_neighbour_keys(self, hash_name, bucket_key): """ Return the neighbour buckets given hash_name and query bucket key. """
# get the permutedIndex given hash_name permutedIndex = self.permutedIndexs[hash_name] # return neighbour bucket keys of query bucket key return permutedIndex.get_neighbour_keys( bucket_key, permutedIndex.num_neighbour)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_rc_file(editor, rc_file): """ Run rc file. """
assert isinstance(editor, Editor) assert isinstance(rc_file, six.string_types) # Expand tildes. rc_file = os.path.expanduser(rc_file) # Check whether this file exists. if not os.path.exists(rc_file): print('Impossible to read %r' % rc_file) _press_enter_to_continue() return # Run the rc file in an empty namespace. try: namespace = {} with open(rc_file, 'r') as f: code = compile(f.read(), rc_file, 'exec') six.exec_(code, namespace, namespace) # Now we should have a 'configure' method in this namespace. We call this # method with editor as an argument. if 'configure' in namespace: namespace['configure'](editor) except Exception as e: # Handle possible exceptions in rc file. traceback.print_exc() _press_enter_to_continue()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _try_char(character, backup, encoding=sys.stdout.encoding): """ Return `character` if it can be encoded using sys.stdout, else return the backup character. """
if character.encode(encoding, 'replace') == b'?': return backup else: return character
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self): """ Update layout to match the layout as described in the WindowArrangement. """
# Start with an empty frames list everytime, to avoid memory leaks. existing_frames = self._frames self._frames = {} def create_layout_from_node(node): if isinstance(node, window_arrangement.Window): # Create frame for Window, or reuse it, if we had one already. key = (node, node.editor_buffer) frame = existing_frames.get(key) if frame is None: frame, pt_window = self._create_window_frame(node.editor_buffer) # Link layout Window to arrangement. node.pt_window = pt_window self._frames[key] = frame return frame elif isinstance(node, window_arrangement.VSplit): return VSplit( [create_layout_from_node(n) for n in node], padding=1, padding_char=self.get_vertical_border_char(), padding_style='class:frameborder') if isinstance(node, window_arrangement.HSplit): return HSplit([create_layout_from_node(n) for n in node]) layout = create_layout_from_node(self.window_arrangement.active_tab.root) self._fc.content = layout
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_window_frame(self, editor_buffer): """ Create a Window for the buffer, with underneat a status bar. """
@Condition def wrap_lines(): return self.editor.wrap_lines window = Window( self._create_buffer_control(editor_buffer), allow_scroll_beyond_bottom=True, scroll_offsets=ScrollOffsets( left=0, right=0, top=(lambda: self.editor.scroll_offset), bottom=(lambda: self.editor.scroll_offset)), wrap_lines=wrap_lines, left_margins=[ConditionalMargin( margin=NumberedMargin( display_tildes=True, relative=Condition(lambda: self.editor.relative_number)), filter=Condition(lambda: self.editor.show_line_numbers))], cursorline=Condition(lambda: self.editor.cursorline), cursorcolumn=Condition(lambda: self.editor.cursorcolumn), colorcolumns=( lambda: [ColorColumn(pos) for pos in self.editor.colorcolumn]), ignore_content_width=True, ignore_content_height=True, get_line_prefix=partial(self._get_line_prefix, editor_buffer.buffer)) return HSplit([ window, VSplit([ WindowStatusBar(self.editor, editor_buffer), WindowStatusBarRuler(self.editor, window, editor_buffer.buffer), ], width=Dimension()), # Ignore actual status bar width. ]), window
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_buffer_control(self, editor_buffer): """ Create a new BufferControl for a given location. """
@Condition def preview_search(): return self.editor.incsearch input_processors = [ # Processor for visualising spaces. (should come before the # selection processor, otherwise, we won't see these spaces # selected.) ConditionalProcessor( ShowTrailingWhiteSpaceProcessor(), Condition(lambda: self.editor.display_unprintable_characters)), # Replace tabs by spaces. TabsProcessor( tabstop=(lambda: self.editor.tabstop), char1=(lambda: '|' if self.editor.display_unprintable_characters else ' '), char2=(lambda: _try_char('\u2508', '.', get_app().output.encoding()) if self.editor.display_unprintable_characters else ' '), ), # Reporting of errors, for Pyflakes. ReportingProcessor(editor_buffer), HighlightSelectionProcessor(), ConditionalProcessor( HighlightSearchProcessor(), Condition(lambda: self.editor.highlight_search)), ConditionalProcessor( HighlightIncrementalSearchProcessor(), Condition(lambda: self.editor.highlight_search) & preview_search), HighlightMatchingBracketProcessor(), DisplayMultipleCursors(), ] return BufferControl( lexer=DocumentLexer(editor_buffer), include_default_input_processors=False, input_processors=input_processors, buffer=editor_buffer.buffer, preview_search=preview_search, search_buffer_control=self.search_control, focus_on_click=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_command(editor, input_string): """ Handle commands entered on the Vi command line. """
# Match with grammar and extract variables. m = COMMAND_GRAMMAR.match(input_string) if m is None: return variables = m.variables() command = variables.get('command') go_to_line = variables.get('go_to_line') shell_command = variables.get('shell_command') # Call command handler. if go_to_line is not None: # Handle go-to-line. _go_to_line(editor, go_to_line) elif shell_command is not None: # Handle shell commands. editor.application.run_system_command(shell_command) elif has_command_handler(command): # Handle other 'normal' commands. call_command_handler(command, editor, variables) else: # For unknown commands, show error message. editor.show_message('Not an editor command: %s' % input_string) return # After execution of commands, make sure to update the layout and focus # stack. editor.sync_with_prompt_toolkit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _go_to_line(editor, line): """ Move cursor to this line in the current buffer. """
b = editor.application.current_buffer b.cursor_position = b.document.translate_row_col_to_index(max(0, int(line) - 1), 0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def report_pyflakes(document): """ Run pyflakes on document and return list of ReporterError instances. """
# Run pyflakes on input. reporter = _FlakesReporter() pyflakes.api.check(document.text, '', reporter=reporter) def format_flake_message(message): return [ ('class:flakemessage.prefix', 'pyflakes:'), ('', ' '), ('class:flakemessage', message.message % message.message_args) ] def message_to_reporter_error(message): """ Turn pyflakes message into ReporterError. """ start_index = document.translate_row_col_to_index(message.lineno - 1, message.col) end_index = start_index while end_index < len(document.text) and document.text[end_index] in WORD_CHARACTERS: end_index += 1 return ReporterError(lineno=message.lineno - 1, start_column=message.col, end_column=message.col + end_index - start_index, formatted_text=format_flake_message(message)) # Construct list of ReporterError instances. return [message_to_reporter_error(m) for m in reporter.messages]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_initial_files(self, locations, in_tab_pages=False, hsplit=False, vsplit=False): """ Load a list of files. """
assert in_tab_pages + hsplit + vsplit <= 1 # Max one of these options. # When no files were given, open at least one empty buffer. locations2 = locations or [None] # First file self.window_arrangement.open_buffer(locations2[0]) for f in locations2[1:]: if in_tab_pages: self.window_arrangement.create_tab(f) elif hsplit: self.window_arrangement.hsplit(location=f) elif vsplit: self.window_arrangement.vsplit(location=f) else: self.window_arrangement.open_buffer(f) self.window_arrangement.active_tab_index = 0 if locations and len(locations) > 1: self.show_message('%i files loaded.' % len(locations))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_application(self): """ Create CommandLineInterface instance. """
# Create Application. application = Application( input=self.input, output=self.output, editing_mode=EditingMode.VI, layout=self.editor_layout.layout, key_bindings=self.key_bindings, # get_title=lambda: get_terminal_title(self), style=DynamicStyle(lambda: self.current_style), paste_mode=Condition(lambda: self.paste_mode), # ignore_case=Condition(lambda: self.ignore_case), # TODO include_default_pygments_style=False, mouse_support=Condition(lambda: self.enable_mouse_support), full_screen=True, enable_page_navigation_bindings=True) # Handle command line previews. # (e.g. when typing ':colorscheme blue', it should already show the # preview before pressing enter.) def preview(_): if self.application.layout.has_focus(self.command_buffer): self.previewer.preview(self.command_buffer.text) self.command_buffer.on_text_changed += preview return application
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def current_editor_buffer(self): """ Return the `EditorBuffer` that is currently active. """
current_buffer = self.application.current_buffer # Find/return the EditorBuffer with this name. for b in self.window_arrangement.editor_buffers: if b.buffer == current_buffer: return b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sync_with_prompt_toolkit(self): """ Update the prompt-toolkit Layout and FocusStack. """
# After executing a command, make sure that the layout of # prompt-toolkit matches our WindowArrangement. self.editor_layout.update() # Make sure that the focus stack of prompt-toolkit has the current # page. window = self.window_arrangement.active_pt_window if window: self.application.layout.focus(window)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Run the event loop for the interface. This starts the interaction. """
# Make sure everything is in sync, before starting. self.sync_with_prompt_toolkit() def pre_run(): # Start in navigation mode. self.application.vi_state.input_mode = InputMode.NAVIGATION # Run eventloop of prompt_toolkit. self.application.run(pre_run=pre_run)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enter_command_mode(self): """ Go into command mode. """
self.application.layout.focus(self.command_buffer) self.application.vi_state.input_mode = InputMode.INSERT self.previewer.save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def leave_command_mode(self, append_to_history=False): """ Leave command mode. Focus document window again. """
self.previewer.restore() self.application.layout.focus_last() self.application.vi_state.input_mode = InputMode.NAVIGATION self.command_buffer.reset(append_to_history=append_to_history)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_editor_style_by_name(name): """ Get Style class. This raises `pygments.util.ClassNotFound` when there is no style with this name. """
if name == 'vim': vim_style = Style.from_dict(default_vim_style) else: vim_style = style_from_pygments_cls(get_style_by_name(name)) return merge_styles([ vim_style, Style.from_dict(style_extensions), ])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lex_document(self, document): """ Call the lexer and return a get_tokens_for_line function. """
location = self.editor_buffer.location if location: if self.editor_buffer.in_file_explorer_mode: return PygmentsLexer(DirectoryListingLexer, sync_from_start=False).lex_document(document) return PygmentsLexer.from_filename(location, sync_from_start=False).lex_document(document) return SimpleLexer().lex_document(document)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self, location): """ Read file from disk. """
location = os.path.expanduser(location) # Try to open this file, using different encodings. for e in ENCODINGS: try: with codecs.open(location, 'r', e) as f: return f.read(), e except UnicodeDecodeError: pass # Try next codec. # Unable to open. raise Exception('Unable to open file: %r' % location)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reload(self): """ Reload file again from storage. """
text = self._read(self.location) cursor_position = min(self.buffer.cursor_position, len(text)) self.buffer.document = Document(text, cursor_position) self._file_content = text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_display_name(self, short=False): """ Return name as displayed. """
if self.location is None: return '[New file]' elif short: return os.path.basename(self.location) else: return self.location
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run_reporter(self): " Buffer text changed. " if not self._reporter_is_running: self._reporter_is_running = True text = self.buffer.text self.report_errors = [] # Don't run reporter when we don't have a location. (We need to # know the filetype, actually.) if self.location is None: return # Better not to access the document in an executor. document = self.buffer.document def in_executor(): # Call reporter report_errors = report(self.location, document) def ready(): self._reporter_is_running = False # If the text has not been changed yet in the meantime, set # reporter errors. (We were running in another thread.) if text == self.buffer.text: self.report_errors = report_errors get_app().invalidate() else: # Restart reporter when the text was changed. self.run_reporter() call_from_executor(ready) run_in_executor(in_executor)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ Back up current editor state. """
e = self.editor self._style = e.current_style self._show_line_numbers = e.show_line_numbers self._highlight_search = e.highlight_search self._show_ruler = e.show_ruler self._relative_number = e.relative_number self._cursorcolumn = e.cursorcolumn self._cursorline = e.cursorline self._colorcolumn = e.colorcolumn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _apply(self, input_string): """ Apply command. """
e = self.editor # Parse command. m = COMMAND_GRAMMAR.match(input_string) if m is None: return variables = m.variables() command = variables.get('command') set_option = variables.get('set_option') # Preview colorschemes. if command == 'colorscheme': colorscheme = variables.get('colorscheme') if colorscheme: e.use_colorscheme(colorscheme) # Preview some set commands. if command == 'set': if set_option in ('hlsearch', 'hls'): e.highlight_search = True elif set_option in ('nohlsearch', 'nohls'): e.highlight_search = False elif set_option in ('nu', 'number'): e.show_line_numbers = True elif set_option in ('nonu', 'nonumber'): e.show_line_numbers = False elif set_option in ('ruler', 'ru'): e.show_ruler = True elif set_option in ('noruler', 'noru'): e.show_ruler = False elif set_option in ('relativenumber', 'rnu'): e.relative_number = True elif set_option in ('norelativenumber', 'nornu'): e.relative_number = False elif set_option in ('cursorline', 'cul'): e.cursorline = True elif set_option in ('cursorcolumn', 'cuc'): e.cursorcolumn = True elif set_option in ('nocursorline', 'nocul'): e.cursorline = False elif set_option in ('nocursorcolumn', 'nocuc'): e.cursorcolumn = False elif set_option in ('colorcolumn', 'cc'): value = variables.get('set_value', '') if value: e.colorcolumn = [ int(v) for v in value.split(',') if v.isdigit()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cmd(name, accepts_force=False): """ Decarator that registers a command that doesn't take any parameters. """
def decorator(func): @_cmd(name) def command_wrapper(editor, variables): force = bool(variables['force']) if force and not accepts_force: editor.show_message('No ! allowed') elif accepts_force: func(editor, force=force) else: func(editor) return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bn(editor, force=False): """ Go to next buffer. """
eb = editor.window_arrangement.active_editor_buffer if not force and eb.has_unsaved_changes: editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT) else: editor.window_arrangement.go_to_next_buffer()