text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bp(editor, force=False): """ Go to previous buffer. """
eb = editor.window_arrangement.active_editor_buffer if not force and eb.has_unsaved_changes: editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT) else: editor.window_arrangement.go_to_previous_buffer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buffer_list(editor): """ List all buffers. """
def handler(): wa = editor.window_arrangement for info in wa.list_open_buffers(): char = '%' if info.is_active else '' eb = info.editor_buffer print(' %3i %-2s %-20s line %i' % ( info.index, char, eb.location, (eb.buffer.document.cursor_position_row + 1))) six.moves.input('\nPress ENTER to continue...') run_in_terminal(handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buffer_wipe(editor, force=False): """ Wipe buffer. """
eb = editor.window_arrangement.active_editor_buffer if not force and eb.has_unsaved_changes: editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT) else: editor.window_arrangement.close_buffer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buffer_edit(editor, location, force=False): """ Edit new buffer. """
if location is None: # Edit/open without a location will reload the current file, if there are # no changes. eb = editor.window_arrangement.active_editor_buffer if eb.location is None: editor.show_message(_NO_FILE_NAME) elif not force and eb.has_unsaved_changes: editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT) else: eb.reload() else: editor.file_explorer = '' editor.window_arrangement.open_buffer(location, show_in_current_window=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quit_all(editor, force=False): """ Quit all. """
quit(editor, all_=True, force=force)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(editor, location, force=False): """ Write file. """
if location and not force and os.path.exists(location): editor.show_message('File exists (add ! to overriwe)') else: eb = editor.window_arrangement.active_editor_buffer if location is None and eb.location is None: editor.show_message(_NO_FILE_NAME) else: eb.write(location)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_and_quit(editor, location, force=False): """ Write file and quit. """
write(editor, location, force=force) editor.application.exit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_and_quit_all(editor): """ Write current buffer and quit all. """
eb = editor.window_arrangement.active_editor_buffer if eb.location is None: editor.show_message(_NO_FILE_NAME) else: eb.write() quit(editor, all_=True, force=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tab_stop(editor, value): """ Set tabstop. """
if value is None: editor.show_message('tabstop=%i' % editor.tabstop) else: try: value = int(value) if value > 0: editor.tabstop = value else: editor.show_message('Argument must be positive') except ValueError: editor.show_message('Number required after =')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_scroll_offset(editor, value): """ Set scroll offset. """
if value is None: editor.show_message('scrolloff=%i' % editor.scroll_offset) else: try: value = int(value) if value >= 0: editor.scroll_offset = value else: editor.show_message('Argument must be positive') except ValueError: editor.show_message('Number required after =')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def whitespace_before_cursor_on_line(): """ Filter which evaluates to True when the characters before the cursor are whitespace, or we are at the start of te line. """
b = get_app().current_buffer before_cursor = b.document.current_line_before_cursor return bool(not before_cursor or before_cursor[-1].isspace())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def show_editor_buffer(self, editor_buffer): """ Open this `EditorBuffer` in the active window. """
assert isinstance(editor_buffer, EditorBuffer) self.active_window.editor_buffer = editor_buffer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close_editor_buffer(self, editor_buffer): """ Close all the windows that have this editor buffer open. """
for split, window in self._walk_through_windows(): if window.editor_buffer == editor_buffer: self._close_window(window)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _close_window(self, window): """ Close this window. """
if window == self.active_window: self.close_active_window() else: original_active_window = self.active_window self.close_active_window() self.active_window = original_active_window
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close_active_window(self): """ Close active window. """
active_split = self._get_active_split() # First remove the active window from its split. index = active_split.index(self.active_window) del active_split[index] # Move focus. if len(active_split): new_active_window = active_split[max(0, index - 1)] while isinstance(new_active_window, (HSplit, VSplit)): new_active_window = new_active_window[0] self.active_window = new_active_window else: self.active_window = None # No windows left. # When there is exactly on item left, move this back into the parent # split. (We don't want to keep a split with one item around -- exept # for the root.) if len(active_split) == 1 and active_split != self.root: parent = self._get_split_parent(active_split) index = parent.index(active_split) parent[index] = active_split[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cycle_focus(self): """ Cycle through all windows. """
windows = self.windows() new_index = (windows.index(self.active_window) + 1) % len(windows) self.active_window = windows[new_index]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_unsaved_changes(self): """ True when any of the visible buffers in this tab has unsaved changes. """
for w in self.windows(): if w.editor_buffer.has_unsaved_changes: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def active_editor_buffer(self): """ The active EditorBuffer or None. """
if self.active_tab and self.active_tab.active_window: return self.active_tab.active_window.editor_buffer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def active_pt_window(self): " The active prompt_toolkit layout Window. " if self.active_tab: w = self.active_tab.active_window if w: return w.pt_window
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_editor_buffer_for_location(self, location): """ Return the `EditorBuffer` for this location. When this file was not yet loaded, return None """
for eb in self.editor_buffers: if eb.location == location: return eb
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_editor_buffer_for_buffer_name(self, buffer_name): """ Return the `EditorBuffer` for this buffer_name. When not found, return None """
for eb in self.editor_buffers: if eb.buffer_name == buffer_name: return eb
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close_tab(self): """ Close active tab. """
if len(self.tab_pages) > 1: # Cannot close last tab. del self.tab_pages[self.active_tab_index] self.active_tab_index = max(0, self.active_tab_index - 1) # Clean up buffers. self._auto_close_new_empty_buffers()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hsplit(self, location=None, new=False, text=None): """ Split horizontally. """
assert location is None or text is None or new is False # Don't pass two of them. if location or text or new: editor_buffer = self._get_or_create_editor_buffer(location=location, text=text) else: editor_buffer = None self.active_tab.hsplit(editor_buffer)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def keep_only_current_window(self): """ Close all other windows, except the current one. """
self.tab_pages = [TabPage(self.active_tab.active_window)] self.active_tab_index = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def go_to_next_buffer(self, _previous=False): """ Open next buffer in active window. """
if self.active_editor_buffer: # Find the active opened buffer. index = self.editor_buffers.index(self.active_editor_buffer) # Get index of new buffer. if _previous: new_index = (len(self.editor_buffers) + index - 1) % len(self.editor_buffers) else: new_index = (index + 1) % len(self.editor_buffers) # Open new buffer in active tab. self.active_tab.show_editor_buffer(self.editor_buffers[new_index]) # Clean up buffers. self._auto_close_new_empty_buffers()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def go_to_previous_tab(self): """ Focus the previous tab. """
self.active_tab_index = (self.active_tab_index - 1 + len(self.tab_pages)) % len(self.tab_pages)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_editor_buffer(self, editor_buffer, show_in_current_window=False): """ Insert this new buffer in the list of buffers, right after the active one. """
assert isinstance(editor_buffer, EditorBuffer) and editor_buffer not in self.editor_buffers # Add to list of EditorBuffers eb = self.active_editor_buffer if eb is None: self.editor_buffers.append(editor_buffer) else: # Append right after the currently active one. try: index = self.editor_buffers.index(self.active_editor_buffer) except ValueError: index = 0 self.editor_buffers.insert(index, editor_buffer) # When there are no tabs/windows yet, create one for this buffer. if self.tab_pages == []: self.tab_pages.append(TabPage(Window(editor_buffer))) self.active_tab_index = 0 # To be shown? if show_in_current_window and self.active_tab: self.active_tab.show_editor_buffer(editor_buffer) # Start reporter. editor_buffer.run_reporter()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_or_create_editor_buffer(self, location=None, text=None): """ Given a location, return the `EditorBuffer` instance that we have if the file is already open, or create a new one. When location is None, this creates a new buffer. """
assert location is None or text is None # Don't pass two of them. assert location is None or isinstance(location, string_types) if location is None: # Create and add an empty EditorBuffer eb = EditorBuffer(self.editor, text=text) self._add_editor_buffer(eb) return eb else: # When a location is given, first look whether the file was already # opened. eb = self.get_editor_buffer_for_location(location) # Not found? Create one. if eb is None: # Create and add EditorBuffer eb = EditorBuffer(self.editor, location) self._add_editor_buffer(eb) return eb else: # Found! Return it. return eb
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close_buffer(self): """ Close current buffer. When there are other windows showing the same buffer, they are closed as well. When no windows are left, the previous buffer or an empty buffer is shown. """
eb = self.active_editor_buffer # Remove this buffer. index = self.editor_buffers.index(eb) self.editor_buffers.remove(eb) # Close the active window. self.active_tab.close_active_window() # Close all the windows that still have this buffer open. for i, t in enumerate(self.tab_pages[:]): t.close_editor_buffer(eb) # Remove tab when there are no windows left. if t.window_count() == 0: self.tab_pages.remove(t) if i >= self.active_tab_index: self.active_tab_index = max(0, self.active_tab_index - 1) # When there are no windows/tabs left, create a new tab. if len(self.tab_pages) == 0: self.active_tab_index = None if len(self.editor_buffers) > 0: # Open the previous buffer. new_index = (len(self.editor_buffers) + index - 1) % len(self.editor_buffers) eb = self.editor_buffers[new_index] # Create a window for this buffer. self.tab_pages.append(TabPage(Window(eb))) self.active_tab_index = 0 else: # Create a new buffer. (This will also create the window # automatically.) eb = self._get_or_create_editor_buffer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_tab(self, location=None): """ Create a new tab page. """
eb = self._get_or_create_editor_buffer(location) self.tab_pages.insert(self.active_tab_index + 1, TabPage(Window(eb))) self.active_tab_index += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_open_buffers(self): """ Return a `OpenBufferInfo` list that gives information about the open buffers. """
active_eb = self.active_editor_buffer visible_ebs = self.active_tab.visible_editor_buffers() def make_info(i, eb): return OpenBufferInfo( index=i, editor_buffer=eb, is_active=(eb == active_eb), is_visible=(eb in visible_ebs)) return [make_info(i, eb) for i, eb in enumerate(self.editor_buffers)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_code_directive(): """Register code directive."""
if not SPHINX_INSTALLED: docutils.parsers.rst.directives.register_directive('code', CodeBlockDirective) docutils.parsers.rst.directives.register_directive('code-block', CodeBlockDirective) docutils.parsers.rst.directives.register_directive('sourcecode', CodeBlockDirective)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_ignored_languages(source): """Yield ignored languages. Languages are ignored via comment. For example, to ignore C++, JSON, and Python: ['cpp', 'json', 'python'] """
for (index, line) in enumerate(source.splitlines()): match = RSTCHECK_COMMENT_RE.match(line) if match: key_and_value = line[match.end():].strip().split('=') if len(key_and_value) != 2: raise Error('Expected "key=value" syntax', line_number=index + 1) if key_and_value[0] == 'ignore-language': for language in key_and_value[1].split(','): yield language.strip()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_file(parameters): """Return list of errors."""
(filename, args) = parameters if filename == '-': contents = sys.stdin.read() else: with contextlib.closing( docutils.io.FileInput(source_path=filename) ) as input_file: contents = input_file.read() args = load_configuration_from_file( os.path.dirname(os.path.realpath(filename)), args) ignore_directives_and_roles(args.ignore_directives, args.ignore_roles) for substitution in args.ignore_substitutions: contents = contents.replace('|{}|'.format(substitution), 'None') ignore = { 'languages': args.ignore_language, 'messages': args.ignore_messages, } all_errors = [] for error in check(contents, filename=filename, report_level=args.report, ignore=ignore, debug=args.debug): all_errors.append(error) return (filename, all_errors)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_rst(code, ignore): """Yield errors in nested RST code."""
filename = '<string>' for result in check(code, filename=filename, ignore=ignore): yield result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_directives_and_roles_from_sphinx(): """Return a tuple of Sphinx directive and roles."""
if SPHINX_INSTALLED: sphinx_directives = list(sphinx.domains.std.StandardDomain.directives) sphinx_roles = list(sphinx.domains.std.StandardDomain.roles) for domain in [sphinx.domains.c.CDomain, sphinx.domains.cpp.CPPDomain, sphinx.domains.javascript.JavaScriptDomain, sphinx.domains.python.PythonDomain]: sphinx_directives += list(domain.directives) + [ '{}:{}'.format(domain.name, item) for item in list(domain.directives)] sphinx_roles += list(domain.roles) + [ '{}:{}'.format(domain.name, item) for item in list(domain.roles)] else: sphinx_roles = [ 'abbr', 'command', 'dfn', 'doc', 'download', 'envvar', 'file', 'guilabel', 'kbd', 'keyword', 'mailheader', 'makevar', 'manpage', 'menuselection', 'mimetype', 'newsgroup', 'option', 'program', 'py:func', 'ref', 'regexp', 'samp', 'term', 'token'] sphinx_directives = [ 'autosummary', 'currentmodule', 'centered', 'c:function', 'c:type', 'include', 'deprecated', 'envvar', 'glossary', 'index', 'no-code-block', 'literalinclude', 'hlist', 'option', 'productionlist', 'py:function', 'seealso', 'toctree', 'todo', 'versionadded', 'versionchanged'] return (sphinx_directives, sphinx_roles)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ignore_sphinx(): """Register Sphinx directives and roles to ignore."""
(directives, roles) = _get_directives_and_roles_from_sphinx() directives += [ 'centered', 'include', 'deprecated', 'index', 'no-code-block', 'literalinclude', 'hlist', 'seealso', 'toctree', 'todo', 'versionadded', 'versionchanged'] ext_autosummary = [ 'autosummary', 'currentmodule', ] ignore_directives_and_roles(directives + ext_autosummary, roles + ['ctype'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_config(directory_or_file, debug=False): """Return configuration filename. If `directory_or_file` is a file, return the real-path of that file. If it is a directory, find the configuration (any file name in CONFIG_FILES) in that directory or its ancestors. """
directory_or_file = os.path.realpath(directory_or_file) if os.path.isfile(directory_or_file): if debug: print('using config file {}'.format(directory_or_file), file=sys.stderr) return directory_or_file directory = directory_or_file while directory: for filename in CONFIG_FILES: candidate = os.path.join(directory, filename) if os.path.exists(candidate): if debug: print('using config file {}'.format(candidate), file=sys.stderr) return candidate parent_directory = os.path.dirname(directory) if parent_directory == directory: break else: directory = parent_directory
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_configuration_from_file(directory, args): """Return new ``args`` with configuration loaded from file."""
args = copy.copy(args) directory_or_file = directory if args.config is not None: directory_or_file = args.config options = _get_options(directory_or_file, debug=args.debug) args.report = options.get('report', args.report) threshold_dictionary = docutils.frontend.OptionParser.thresholds args.report = int(threshold_dictionary.get(args.report, args.report)) args.ignore_language = get_and_split( options, 'ignore_language', args.ignore_language) args.ignore_messages = options.get( 'ignore_messages', args.ignore_messages) args.ignore_directives = get_and_split( options, 'ignore_directives', args.ignore_directives) args.ignore_substitutions = get_and_split( options, 'ignore_substitutions', args.ignore_substitutions) args.ignore_roles = get_and_split( options, 'ignore_roles', args.ignore_roles) return args
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_gcc_style_error_message(message, filename, has_column=True): """Parse GCC-style error message. Return (line_number, message). Raise ValueError if message cannot be parsed. """
colons = 2 if has_column else 1 prefix = filename + ':' if not message.startswith(prefix): raise ValueError() message = message[len(prefix):] split_message = message.split(':', colons) line_number = int(split_message[0]) return (line_number, split_message[colons].strip())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_in_subprocess(code, filename_suffix, arguments, working_directory): """Return None on success."""
temporary_file = tempfile.NamedTemporaryFile(mode='wb', suffix=filename_suffix) temporary_file.write(code.encode('utf-8')) temporary_file.flush() process = subprocess.Popen(arguments + [temporary_file.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory) def run(): """Yield errors.""" raw_result = process.communicate() if process.returncode != 0: return (raw_result[1].decode(get_encoding()), temporary_file.name) return run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def beginning_of_code_block(node, line_number, full_contents, is_code_node): """Return line number of beginning of code block."""
if SPHINX_INSTALLED and not is_code_node: delta = len(node.non_default_attributes()) current_line_contents = full_contents.splitlines()[line_number:] blank_lines = next( (i for (i, x) in enumerate(current_line_contents) if x), 0) return ( line_number + delta - 1 + blank_lines - 1 + SPHINX_CODE_BLOCK_DELTA) else: lines = full_contents.splitlines() code_block_length = len(node.rawsource.splitlines()) try: # Case where there are no extra spaces. if lines[line_number - 1].strip(): return line_number - code_block_length + 1 except IndexError: pass # The offsets are wrong if the RST text has multiple blank lines after # the code block. This is a workaround. for line_number in range(line_number, 1, -1): if lines[line_number - 2].strip(): break return line_number - code_block_length
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_args(): """Return parsed command-line arguments."""
threshold_choices = docutils.frontend.OptionParser.threshold_choices parser = argparse.ArgumentParser( description=__doc__ + (' Sphinx is enabled.' if SPHINX_INSTALLED else ''), prog='rstcheck') parser.add_argument('files', nargs='+', type=decode_filename, help='files to check') parser.add_argument('--config', metavar='CONFIG', default=None, help='location of config file') parser.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories') parser.add_argument('--report', metavar='level', choices=threshold_choices, default='info', help='report system messages at or higher than ' 'level; ' + ', '.join(choice for choice in threshold_choices if not choice.isdigit()) + ' (default: %(default)s)') parser.add_argument('--ignore-language', '--ignore', metavar='language', default='', help='comma-separated list of languages to ignore') parser.add_argument('--ignore-messages', metavar='messages', default='', help='python regex that match the messages to ignore') parser.add_argument('--ignore-directives', metavar='directives', default='', help='comma-separated list of directives to ignore') parser.add_argument('--ignore-substitutions', metavar='substitutions', default='', help='comma-separated list of substitutions to ignore') parser.add_argument('--ignore-roles', metavar='roles', default='', help='comma-separated list of roles to ignore') parser.add_argument('--debug', action='store_true', help='show messages helpful for debugging') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) args = parser.parse_args() if '-' in args.files: if len(args.files) > 1: parser.error("'-' for standard in can only be checked alone") else: args.files = list(find_files(filenames=args.files, recursive=args.recursive)) return args
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def output_message(text, file=sys.stderr): """Output message to terminal."""
if file.encoding is None: # If the output file does not support Unicode, encode it to a byte # string. On some machines, this occurs when Python is redirecting to # file (or piping to something like Vim). text = text.encode('utf-8') print(text, file=file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_sphinx_if_possible(): """Register Sphinx directives and roles."""
if SPHINX_INSTALLED: srcdir = tempfile.mkdtemp() outdir = os.path.join(srcdir, '_build') try: sphinx.application.Sphinx(srcdir=srcdir, confdir=None, outdir=outdir, doctreedir=outdir, buildername='dummy', status=None) yield finally: shutil.rmtree(srcdir) else: yield
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """Return 0 on success."""
args = parse_args() if not args.files: return 0 with enable_sphinx_if_possible(): status = 0 pool = multiprocessing.Pool(multiprocessing.cpu_count()) try: if len(args.files) > 1: results = pool.map( _check_file, [(name, args) for name in args.files]) else: # This is for the case where we read from standard in. results = [_check_file((args.files[0], args))] for (filename, errors) in results: for error in errors: line_number = error[0] message = error[1] if not re.match(r'\([A-Z]+/[0-9]+\)', message): message = '(ERROR/3) ' + message output_message('{}:{}: {}'.format(filename, line_number, message)) status = 1 except (IOError, UnicodeError) as exception: output_message(exception) status = 1 return status
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """Run directive."""
try: language = self.arguments[0] except IndexError: language = '' code = '\n'.join(self.content) literal = docutils.nodes.literal_block(code, code) literal['classes'].append('code-block') literal['language'] = language return [literal]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_literal_block(self, node): """Check syntax of code block."""
# For "..code-block:: language" language = node.get('language', None) is_code_node = False if not language: # For "..code:: language" is_code_node = True classes = node.get('classes') if 'code' in classes: language = classes[-1] else: return if language in self.ignore['languages']: return if language == 'doctest' or ( language == 'python' and node.rawsource.lstrip().startswith('>>> ')): self.visit_doctest_block(node) raise docutils.nodes.SkipNode checker = { 'bash': bash_checker, 'c': c_checker, 'cpp': cpp_checker, 'json': lambda source, _: lambda: check_json(source), 'xml': lambda source, _: lambda: check_xml(source), 'python': lambda source, _: lambda: check_python(source), 'rst': lambda source, _: lambda: check_rst(source, ignore=self.ignore) }.get(language) if checker: run = checker(node.rawsource, self.working_directory) self._add_check(node=node, run=run, language=language, is_code_node=is_code_node) raise docutils.nodes.SkipNode
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_paragraph(self, node): """Check syntax of reStructuredText."""
find = re.search(r'\[[^\]]+\]\([^\)]+\)', node.rawsource) if find is not None: self.document.reporter.warning( '(rst) Link is formatted in Markdown style.', base_node=node)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_check(self, node, run, language, is_code_node): """Add checker that will be run."""
def run_check(): """Yield errors.""" all_results = run() if all_results is not None: if all_results: for result in all_results: error_offset = result[0] - 1 line_number = getattr(node, 'line', None) if line_number is not None: yield ( beginning_of_code_block( node=node, line_number=line_number, full_contents=self.contents, is_code_node=is_code_node) + error_offset, '({}) {}'.format(language, result[1])) else: yield (self.filename, 0, 'unknown error') self.checkers.append(run_check)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def translate(self): """Run CheckTranslator."""
visitor = CheckTranslator(self.document, contents=self.contents, filename=self.filename, ignore=self.ignore) self.document.walkabout(visitor) self.checkers += visitor.checkers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_backend(config): """ Imports and initializes the Backend class. """
backend_name = config['backend'] path = backend_name.split('.') backend_mod_name, backend_class_name = '.'.join(path[:-1]), path[-1] backend_mod = importlib.import_module(backend_mod_name) backend_class = getattr(backend_mod, backend_class_name) return backend_class(config['settings'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def queue_once_key(name, kwargs, restrict_to=None): """ Turns a list the name of the task, the kwargs and allowed keys into a redis key. """
keys = ['qo', force_string(name)] # Restrict to only the keys allowed in keys. if restrict_to is not None: restrict_kwargs = {key: kwargs[key] for key in restrict_to} keys += kwargs_to_list(restrict_kwargs) else: keys += kwargs_to_list(kwargs) key = "_".join(keys) return key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def raise_or_lock(self, key, timeout): """ Check the lock file and create one if it does not exist. """
lock_path = self._get_lock_path(key) try: # Create lock file, raise exception if it exists fd = os.open(lock_path, os.O_CREAT | os.O_EXCL) except OSError as error: if error.errno == errno.EEXIST: # File already exists, check its modification time mtime = os.path.getmtime(lock_path) ttl = mtime + timeout - time.time() if ttl > 0: raise AlreadyQueued(ttl) else: # Update modification time if timeout happens os.utime(lock_path, None) return else: # Re-raise unexpected OSError raise else: os.close(fd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_lock(self, key): """ Remove the lock file. """
lock_path = self._get_lock_path(key) os.remove(lock_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_async(self, args=None, kwargs=None, **options): """ Attempts to queues a task. Will raises an AlreadyQueued exception if already queued. :param \*args: positional arguments passed on to the task. :param \*\*kwargs: keyword arguments passed on to the task. :keyword \*\*once: (optional) :param: graceful: (optional) If True, wouldn't raise an exception if already queued. Instead will return none. :param: timeout: (optional) An `int' number of seconds after which the lock will expire. If not set, defaults to 1 hour. :param: keys: (optional) """
once_options = options.get('once', {}) once_graceful = once_options.get( 'graceful', self.once.get('graceful', False)) once_timeout = once_options.get( 'timeout', self.once.get('timeout', self.default_timeout)) if not options.get('retries'): key = self.get_key(args, kwargs) try: self.once_backend.raise_or_lock(key, timeout=once_timeout) except AlreadyQueued as e: if once_graceful: return EagerResult(None, None, states.REJECTED) raise e return super(QueueOnce, self).apply_async(args, kwargs, **options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cached_dataframe(self, csv_path, compute_fn): """ If a CSV path is in the _memory_cache, then return that cached value. If we've already saved the DataFrame as a CSV then load it. Otherwise run the provided `compute_fn`, and store its result in memory and and save it as a CSV. """
if not csv_path.endswith(".csv"): raise ValueError("Invalid path '%s', must be a CSV file" % csv_path) if csv_path in self._memory_cache: return self._memory_cache[csv_path] if exists(csv_path) and not self.is_empty(csv_path): df = self._read_csv(csv_path) else: df = compute_fn() if not isinstance(df, pd.DataFrame): raise TypeError( "Expected compute_fn to return DataFrame, got %s : %s" % ( df, type(df))) self._write_csv(df, csv_path) self._memory_cache[csv_path] = df return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cached_object(self, path, compute_fn): """ If `cached_object` has already been called for a value of `path` in this running Python instance, then it should have a cached value in the _memory_cache; return that value. If this function was never called before with a particular value of `path`, then call compute_fn, and pickle it to `path`. If `path` already exists, unpickle it and store that value in _memory_cache. """
if path in self._memory_cache: return self._memory_cache[path] if exists(path) and not self.is_empty(path): obj = load_pickle(path) else: obj = compute_fn() dump_pickle(obj, path) self._memory_cache[path] = obj return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_header_id(line): """ Pull the transcript or protein identifier from the header line which starts with '>' """
if type(line) is not binary_type: raise TypeError("Expected header line to be of type %s but got %s" % ( binary_type, type(line))) if len(line) <= 1: raise ValueError("No identifier on FASTA line") # split line at first space to get the unique identifier for # this sequence space_index = line.find(b" ") if space_index >= 0: identifier = line[1:space_index] else: identifier = line[1:] # annoyingly Ensembl83 reformatted the transcript IDs of its # cDNA FASTA to include sequence version numbers # .e.g. # "ENST00000448914.1" instead of "ENST00000448914" # So now we have to parse out the identifier dot_index = identifier.find(b".") if dot_index >= 0: identifier = identifier[:dot_index] return identifier.decode("ascii")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_file(self, fasta_path): """ Read the contents of a FASTA file into a dictionary """
fasta_dictionary = {} for (identifier, sequence) in self.iterate_over_file(fasta_path): fasta_dictionary[identifier] = sequence return fasta_dictionary
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iterate_over_file(self, fasta_path): """ Generator that yields identifiers paired with sequences. """
with self._open(fasta_path) as f: for line in f: line = line.rstrip() if len(line) == 0: continue # have to slice into a bytes object or else I get a single integer first_char = line[0:1] if first_char == b">": id_and_seq = self._read_header(line) if id_and_seq is not None: yield id_and_seq elif first_char == b";": # semicolon are comment characters continue else: self.current_lines.append(line) # the last sequence is still in the lines buffer after we're done with # the file so make sure to yield it id_and_seq = self._current_entry() if id_and_seq is not None: yield id_and_seq
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _open(self, fasta_path): """ Open either a text file or compressed gzip file as a stream of bytes. """
if fasta_path.endswith("gz") or fasta_path.endswith("gzip"): return GzipFile(fasta_path, 'rb') else: return open(fasta_path, 'rb')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_reference_name(name): """ Search the dictionary of species-specific references to find a reference name that matches aside from capitalization. If no matching reference is found, raise an exception. """
lower_name = name.strip().lower() for reference in Species._reference_names_to_species.keys(): if reference.lower() == lower_name: return reference raise ValueError("Reference genome '%s' not found" % name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def genome_for_reference_name( reference_name, allow_older_downloaded_release=True): """ Given a genome reference name, such as "GRCh38", returns the corresponding Ensembl Release object. If `allow_older_downloaded_release` is True, and some older releases have been downloaded, then return the most recent locally available release. Otherwise, return the newest release of Ensembl (even if its data hasn't already been downloaded). """
reference_name = normalize_reference_name(reference_name) species = find_species_by_reference(reference_name) (min_ensembl_release, max_ensembl_release) = \ species.reference_assemblies[reference_name] if allow_older_downloaded_release: # go through candidate releases in descending order for release in reversed(range(min_ensembl_release, max_ensembl_release + 1)): # check if release has been locally downloaded candidate = EnsemblRelease.cached(release=release, species=species) if candidate.required_local_files_exist(): return candidate # see if any of the releases between [max, min] are already locally # available return EnsemblRelease.cached(release=max_ensembl_release, species=species)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_species_name(name): """ If species name was "Homo sapiens" then replace spaces with underscores and return "homo_sapiens". Also replace common names like "human" with "homo_sapiens". """
lower_name = name.lower().strip() # if given a common name such as "human", look up its latin equivalent if lower_name in Species._common_names_to_species: return Species._common_names_to_species[lower_name].latin_name return lower_name.replace(" ", "_")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_species_object(species_name_or_object): """ Helper for validating user supplied species names or objects. """
if isinstance(species_name_or_object, Species): return species_name_or_object elif isinstance(species_name_or_object, str): return find_species_by_name(species_name_or_object) else: raise ValueError("Unexpected type for species: %s : %s" % ( species_name_or_object, type(species_name_or_object)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(cls, latin_name, synonyms, reference_assemblies): """ Create a Species object from the given arguments and enter into all the dicts used to look the species up by its fields. """
species = Species( latin_name=latin_name, synonyms=synonyms, reference_assemblies=reference_assemblies) cls._latin_names_to_species[species.latin_name] = species for synonym in synonyms: if synonym in cls._common_names_to_species: raise ValueError("Can't use synonym '%s' for both %s and %s" % ( synonym, species, cls._common_names_to_species[synonym])) cls._common_names_to_species[synonym] = species for reference_name in reference_assemblies: if reference_name in cls._reference_names_to_species: raise ValueError("Can't use reference '%s' for both %s and %s" % ( reference_name, species, cls._reference_names_to_species[reference_name])) cls._reference_names_to_species[reference_name] = species return species
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def offset(self, position): """Offset of given position from stranded start of this locus. For example, if a Locus goes from 10..20 and is on the negative strand, then the offset of position 13 is 7, whereas if the Locus is on the positive strand, then the offset is 3. """
if position > self.end or position < self.start: raise ValueError( "Position %d outside valid range %d..%d of %s" % ( position, self.start, self.end, self)) elif self.on_forward_strand: return position - self.start else: return self.end - position
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overlaps(self, contig, start, end, strand=None): """ Does this locus overlap with a given range of positions? Since locus position ranges are inclusive, we should make sure that e.g. chr1:10-10 overlaps with chr1:10-10 """
return ( self.can_overlap(contig, strand) and self.distance_to_interval(start, end) == 0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _memoize_cache_key(args, kwargs): """Turn args tuple and kwargs dictionary into a hashable key. Expects that all arguments to a memoized function are either hashable or can be uniquely identified from type(arg) and repr(arg). """
cache_key_list = [] # hack to get around the unhashability of lists, # add a special case to convert them to tuples for arg in args: if type(arg) is list: cache_key_list.append(tuple(arg)) else: cache_key_list.append(arg) for (k, v) in sorted(kwargs.items()): if type(v) is list: cache_key_list.append((k, tuple(v))) else: cache_key_list.append((k, v)) return tuple(cache_key_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def memoize(fn): """Simple reset-able memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared. """
cache = {} @wraps(fn) def wrapped_fn(*args, **kwargs): cache_key = _memoize_cache_key(args, kwargs) try: return cache[cache_key] except KeyError: value = fn(*args, **kwargs) cache[cache_key] = value return value def clear_cache(): cache.clear() # Needed to ensure that EnsemblRelease.clear_cache # is able to clear memoized values from each of its methods wrapped_fn.clear_cache = clear_cache # expose the cache so we can check if an item has already been computed wrapped_fn.cache = cache # if we want to check whether an item is in the cache, first need # to construct the same cache key as used by wrapped_fn wrapped_fn.make_cache_key = _memoize_cache_key return wrapped_fn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_init_values(cls, release, species, server): """ Normalizes the arguments which uniquely specify an EnsemblRelease genome. """
release = check_release_number(release) species = check_species_object(species) return (release, species, server)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cached( cls, release=MAX_ENSEMBL_RELEASE, species=human, server=ENSEMBL_FTP_SERVER): """ Construct EnsemblRelease if it's never been made before, otherwise return an old instance. """
init_args_tuple = cls.normalize_init_values(release, species, server) if init_args_tuple in cls._genome_cache: genome = cls._genome_cache[init_args_tuple] else: genome = cls._genome_cache[init_args_tuple] = cls(*init_args_tuple) return genome
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cache_subdirectory( reference_name=None, annotation_name=None, annotation_version=None): """ Which cache subdirectory to use for a given annotation database over a particular reference. All arguments can be omitted to just get the base subdirectory for all pyensembl cached datasets. """
if reference_name is None: reference_name = "" if annotation_name is None: annotation_name = "" if annotation_version is None: annotation_version = "" reference_dir = join(CACHE_BASE_SUBDIR, reference_name) annotation_dir = "%s%s" % (annotation_name, annotation_version) return join(reference_dir, annotation_dir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fields(self): """ Fields used for hashing, string representation, equality comparison """
return ( ('reference_name', self.reference_name,), ('annotation_name', self.annotation_name), ('annotation_version', self.annotation_version), ('cache_directory_path', self.cache_directory_path), ('decompress_on_download', self.decompress_on_download), ('copy_local_files_to_cache', self.copy_local_files_to_cache) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cached_path(self, path_or_url): """ When downloading remote files, the default behavior is to name local files the same as their remote counterparts. """
assert path_or_url, "Expected non-empty string for path_or_url" remote_filename = split(path_or_url)[1] if self.is_url_format(path_or_url): # passing `decompress=False` since there is logic below # for stripping decompression extensions for both local # and remote files local_filename = datacache.build_local_filename( download_url=path_or_url, filename=remote_filename, decompress=False) else: local_filename = remote_filename # if we expect the download function to decompress this file then # we should use its name without the compression extension if self.decompress_on_download: local_filename = self._remove_compression_suffix_if_present( local_filename) if len(local_filename) == 0: raise ValueError("Can't determine local filename for %s" % ( path_or_url,)) return join(self.cache_directory_path, local_filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _download_if_necessary(self, url, download_if_missing, overwrite): """ Return local cached path to a remote file, download it if necessary. """
cached_path = self.cached_path(url) missing = not exists(cached_path) if (missing or overwrite) and download_if_missing: logger.info("Fetching %s from URL %s", cached_path, url) datacache.download._download_and_decompress_if_necessary( full_path=cached_path, download_url=url, timeout=3600) elif missing: raise MissingRemoteFile(url) return cached_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _copy_if_necessary(self, local_path, overwrite): """ Return cached path to local file, copying it to the cache if necessary. """
local_path = abspath(local_path) if not exists(local_path): raise MissingLocalFile(local_path) elif not self.copy_local_files_to_cache: return local_path else: cached_path = self.cached_path(local_path) if exists(cached_path) and not overwrite: return cached_path copy2(local_path, cached_path) return cached_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_or_copy_if_necessary( self, path_or_url, download_if_missing=False, overwrite=False): """ Download a remote file or copy Get the local path to a possibly remote file. Download if file is missing from the cache directory and `download_if_missing` is True. Download even if local file exists if both `download_if_missing` and `overwrite` are True. If the file is on the local file system then return its path, unless self.copy_local_to_cache is True, and then copy it to the cache first. Parameters path_or_url : str download_if_missing : bool, optional Download files if missing from local cache overwrite : bool, optional Overwrite existing copy if it exists """
assert path_or_url, "Expected non-empty string for path_or_url" if self.is_url_format(path_or_url): return self._download_if_necessary( path_or_url, download_if_missing, overwrite) else: return self._copy_if_necessary(path_or_url, overwrite)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_cached_files(self, prefixes=[], suffixes=[]): """ Deletes any cached files matching the prefixes or suffixes given """
for filename in listdir(self.cache_directory_path): delete = ( any([filename.endswith(ext) for ext in suffixes]) or any([filename.startswith(pre) for pre in prefixes])) if delete: path = join(self.cache_directory_path, filename) logger.info("Deleting %s", path) remove(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Returns a dictionary of the essential fields of this Genome. """
return dict( reference_name=self.reference_name, annotation_name=self.annotation_name, annotation_version=self.annotation_version, gtf_path_or_url=self._gtf_path_or_url, transcript_fasta_paths_or_urls=self._transcript_fasta_paths_or_urls, protein_fasta_paths_or_urls=self._protein_fasta_paths_or_urls, decompress_on_download=self.decompress_on_download, copy_local_files_to_cache=self.copy_local_files_to_cache, cache_directory_path=self.cache_directory_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init_lazy_fields(self): """ Member data that gets loaded or constructed on demand """
self.gtf_path = None self._protein_sequences = None self._transcript_sequences = None self._db = None self.protein_fasta_paths = None self.transcript_fasta_paths = None # only memoizing the Gene, Transcript, and Exon objects self._genes = {} self._transcripts = {} self._exons = {}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_cached_path( self, field_name, path_or_url, download_if_missing=False, overwrite=False): """ Get the local path for a possibly remote file, invoking either a download or install error message if it's missing. """
if len(field_name) == 0: raise ValueError("Expected non-empty field name") if len(path_or_url) == 0: raise ValueError("Expected non-empty path_or_url") return self.download_cache.local_path_or_install_error( field_name=field_name, path_or_url=path_or_url, download_if_missing=download_if_missing, overwrite=overwrite)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(self, overwrite=False): """ Download data files needed by this Genome instance. Parameters overwrite : bool, optional Download files regardless whether local copy already exists. """
self._set_local_paths(download_if_missing=True, overwrite=overwrite)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index(self, overwrite=False): """ Assuming that all necessary data for this Genome has been downloaded, generate the GTF database and save efficient representation of FASTA sequence files. """
if self.requires_gtf: self.db.connect_or_create(overwrite=overwrite) if self.requires_transcript_fasta: self.transcript_sequences.index(overwrite=overwrite) if self.requires_protein_fasta: self.protein_sequences.index(overwrite=overwrite)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_string(self): """ Add every missing file to the install string shown to the user in an error message. """
args = [ "--reference-name", self.reference_name, "--annotation-name", self.annotation_name] if self.annotation_version: args.extend(["--annotation-version", str(self.annotation_version)]) if self.requires_gtf: args.append("--gtf") args.append("\"%s\"" % self._gtf_path_or_url) if self.requires_protein_fasta: args += [ "--protein-fasta \"%s\"" % path for path in self._protein_fasta_paths_or_urls] if self.requires_transcript_fasta: args += [ "--transcript-fasta \"%s\"" % path for path in self._transcript_fasta_paths_or_urls] return "pyensembl install %s" % " ".join(args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_cache(self): """ Clear any in-memory cached values and short-lived on-disk materializations from MemoryCache """
for maybe_fn in self.__dict__.values(): # clear cache associated with all memoization decorators, # GTF and SequenceData objects if hasattr(maybe_fn, "clear_cache"): maybe_fn.clear_cache()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_index_files(self): """ Delete all data aside from source GTF and FASTA files """
self.clear_cache() db_path = self.db.local_db_path() if exists(db_path): remove(db_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _all_feature_values( self, column, feature, distinct=True, contig=None, strand=None): """ Cached lookup of all values for a particular feature property from the database, caches repeated queries in memory and stores them as a CSV. Parameters column : str Name of property (e.g. exon_id) feature : str Type of entry (e.g. exon) distinct : bool, optional Keep only unique values contig : str, optional Restrict query to particular contig strand : str, optional Restrict results to "+" or "-" strands Returns a list constructed from query results. """
return self.db.query_feature_values( column=column, feature=feature, distinct=distinct, contig=contig, strand=strand)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gene_by_id(self, gene_id): """ Construct a Gene object for the given gene ID. """
if gene_id not in self._genes: field_names = [ "seqname", "start", "end", "strand", ] optional_field_names = [ "gene_name", "gene_biotype", ] # Do not look for gene_name and gene_biotype if they are # not in the database. field_names.extend([ name for name in optional_field_names if self.db.column_exists("gene", name) ]) result = self.db.query_one( field_names, filter_column="gene_id", filter_value=gene_id, feature="gene") if not result: raise ValueError("Gene not found: %s" % (gene_id,)) gene_name, gene_biotype = None, None assert len(result) >= 4 and len(result) <= 6, \ "Result is not the expected length: %d" % len(result) contig, start, end, strand = result[:4] if len(result) == 5: if "gene_name" in field_names: gene_name = result[4] else: gene_biotype = result[4] elif len(result) == 6: gene_name, gene_biotype = result[4:] self._genes[gene_id] = Gene( gene_id=gene_id, gene_name=gene_name, contig=contig, start=start, end=end, strand=strand, biotype=gene_biotype, genome=self) return self._genes[gene_id]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gene_by_protein_id(self, protein_id): """ Get the gene ID associated with the given protein ID, return its Gene object """
gene_id = self.gene_id_of_protein_id(protein_id) return self.gene_by_id(gene_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gene_id_of_protein_id(self, protein_id): """ What is the gene ID associated with a given protein ID? """
results = self._query_gene_ids( "protein_id", protein_id, feature="CDS") if len(results) == 0: raise ValueError("Protein ID not found: %s" % protein_id) assert len(results) == 1, \ ("Should have only one gene ID for a given protein ID, " "but found %d: %s" % (len(results), results)) return results[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transcripts(self, contig=None, strand=None): """ Construct Transcript object for every transcript entry in the database. Optionally restrict to a particular chromosome using the `contig` argument. """
transcript_ids = self.transcript_ids(contig=contig, strand=strand) return [ self.transcript_by_id(transcript_id) for transcript_id in transcript_ids ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transcript_by_id(self, transcript_id): """Construct Transcript object with given transcript ID"""
if transcript_id not in self._transcripts: optional_field_names = [ "transcript_name", "transcript_biotype", "transcript_support_level", ] field_names = [ "seqname", "start", "end", "strand", "gene_id", ] # Do not look for the optional fields if they are not in the database. field_names.extend([ name for name in optional_field_names if self.db.column_exists("transcript", name) ]) result = self.db.query_one( select_column_names=field_names, filter_column="transcript_id", filter_value=transcript_id, feature="transcript", distinct=True) if not result: raise ValueError("Transcript not found: %s" % (transcript_id,)) transcript_name, transcript_biotype, tsl = None, None, None assert 5 <= len(result) <= 5 + len(optional_field_names), \ "Result is not the expected length: %d" % len(result) contig, start, end, strand, gene_id = result[:5] if len(result) > 5: extra_field_names = [f for f in optional_field_names if f in field_names] extra_data = dict(zip(extra_field_names, result[5:])) transcript_name = extra_data.get("transcript_name") transcript_biotype = extra_data.get("transcript_biotype") tsl = extra_data.get("transcript_support_level") if not tsl or tsl == 'NA': tsl = None else: tsl = int(tsl) self._transcripts[transcript_id] = Transcript( transcript_id=transcript_id, transcript_name=transcript_name, contig=contig, start=start, end=end, strand=strand, biotype=transcript_biotype, gene_id=gene_id, genome=self, support_level=tsl) return self._transcripts[transcript_id]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transcript_id_of_protein_id(self, protein_id): """ What is the transcript ID associated with a given protein ID? """
results = self._query_transcript_ids( "protein_id", protein_id, feature="CDS") if len(results) == 0: raise ValueError("Protein ID not found: %s" % protein_id) assert len(results) == 1, \ ("Should have only one transcript ID for a given protein ID, " "but found %d: %s" % (len(results), results)) return results[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exons(self, contig=None, strand=None): """ Create exon object for all exons in the database, optionally restrict to a particular chromosome using the `contig` argument. """
# DataFrame with single column called "exon_id" exon_ids = self.exon_ids(contig=contig, strand=strand) return [ self.exon_by_id(exon_id) for exon_id in exon_ids ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exon_by_id(self, exon_id): """Construct an Exon object from its ID by looking up the exon"s properties in the given Database. """
if exon_id not in self._exons: field_names = [ "seqname", "start", "end", "strand", "gene_name", "gene_id", ] contig, start, end, strand, gene_name, gene_id = self.db.query_one( select_column_names=field_names, filter_column="exon_id", filter_value=exon_id, feature="exon", distinct=True) self._exons[exon_id] = Exon( exon_id=exon_id, contig=contig, start=start, end=end, strand=strand, gene_name=gene_name, gene_id=gene_id) return self._exons[exon_id]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_release_number(release): """ Check to make sure a release is in the valid range of Ensembl releases. """
try: release = int(release) except: raise ValueError("Invalid Ensembl release: %s" % release) if release < MIN_ENSEMBL_RELEASE or release > MAX_ENSEMBL_RELEASE: raise ValueError( "Invalid Ensembl releases %d, must be between %d and %d" % ( release, MIN_ENSEMBL_RELEASE, MAX_ENSEMBL_RELEASE)) return release
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _species_subdir( ensembl_release, species="homo_sapiens", filetype="gtf", server=ENSEMBL_FTP_SERVER): """ Assume ensembl_release has already been normalize by calling function but species might be either a common name or latin name. """
return SPECIES_SUBDIR_TEMPLATE % { "release": ensembl_release, "filetype": filetype, "species": species, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_release_properties(ensembl_release, species): """ Make sure a given release is valid, normalize it to be an integer, normalize the species name, and get its associated reference. """
ensembl_release = check_release_number(ensembl_release) if not isinstance(species, Species): species = find_species_by_name(species) reference_name = species.which_reference(ensembl_release) return ensembl_release, species.latin_name, reference_name