text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join_selected_lines(self, separator=' '): """ Join the selected lines. """
assert self.selection_state # Get lines. from_, to = sorted([self.cursor_position, self.selection_state.original_cursor_position]) before = self.text[:from_] lines = self.text[from_:to].splitlines() after = self.text[to:] # Replace leading spaces with just one space. lines = [l.lstrip(' ') + separator for l in lines] # Set new document. self.document = Document(text=before + ''.join(lines) + after, cursor_position=len(before + ''.join(lines[:-1])) - 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def swap_characters_before_cursor(self): """ Swap the last two characters before the cursor. """
pos = self.cursor_position if pos >= 2: a = self.text[pos - 2] b = self.text[pos - 1] self.text = self.text[:pos-2] + b + a + self.text[pos:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def go_to_history(self, index): """ Go to this item in the history. """
if index < len(self._working_lines): self.working_index = index self.cursor_position = len(self.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_history_lines_completion(self): """ Start a completion based on all the other lines in the document and the history. """
found_completions = set() completions = [] # For every line of the whole history, find matches with the current line. current_line = self.document.current_line_before_cursor.lstrip() for i, string in enumerate(self._working_lines): for j, l in enumerate(string.split('\n')): l = l.strip() if l and l.startswith(current_line): # When a new line has been found. if l not in found_completions: found_completions.add(l) # Create completion. if i == self.working_index: display_meta = "Current, line %s" % (j+1) else: display_meta = "History %s, line %s" % (i+1, j+1) completions.append(Completion( l, start_position=-len(current_line), display_meta=display_meta)) self.set_completions(completions=completions[::-1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def go_to_completion(self, index): """ Select a completion from the list of current completions. """
assert index is None or isinstance(index, int) assert self.complete_state # Set new completion state = self.complete_state.go_to_index(index) # Set text/cursor position new_text, new_cursor_position = state.new_text_and_position() self.document = Document(new_text, new_cursor_position) # (changing text/cursor position will unset complete_state.) self.complete_state = state
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_completion(self, completion): """ Insert a given completion. """
assert isinstance(completion, Completion) # If there was already a completion active, cancel that one. if self.complete_state: self.go_to_completion(None) self.complete_state = None # Insert text from the given completion. self.delete_before_cursor(-completion.start_position) self.insert_text(completion.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_history_search(self): """ Set `history_search_text`. """
if self.enable_history_search(): if self.history_search_text is None: self.history_search_text = self.document.text_before_cursor else: self.history_search_text = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def history_forward(self, count=1): """ Move forwards through the history. :param count: Amount of items to move forward. """
self._set_history_search() # Go forward in history. found_something = False for i in range(self.working_index + 1, len(self._working_lines)): if self._history_matches(i): self.working_index = i count -= 1 found_something = True if count == 0: break # If we found an entry, move cursor to the end of the first line. if found_something: self.cursor_position = 0 self.cursor_position += self.document.get_end_of_line_position()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def history_backward(self, count=1): """ Move backwards through history. """
self._set_history_search() # Go back in history. found_something = False for i in range(self.working_index - 1, -1, -1): if self._history_matches(i): self.working_index = i count -= 1 found_something = True if count == 0: break # If we move to another entry, move cursor to the end of the line. if found_something: self.cursor_position = len(self.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_selection(self, selection_type=SelectionType.CHARACTERS): """ Take the current cursor position as the start of this selection. """
self.selection_state = SelectionState(self.cursor_position, selection_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def paste_clipboard_data(self, data, paste_mode=PasteMode.EMACS, count=1): """ Insert the data from the clipboard. """
assert isinstance(data, ClipboardData) assert paste_mode in (PasteMode.VI_BEFORE, PasteMode.VI_AFTER, PasteMode.EMACS) original_document = self.document self.document = self.document.paste_clipboard_data(data, paste_mode=paste_mode, count=count) # Remember original document. This assignment should come at the end, # because assigning to 'document' will erase it. self.document_before_paste = original_document
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def newline(self, copy_margin=True): """ Insert a line ending at the current position. """
if copy_margin: self.insert_text('\n' + self.document.leading_whitespace_in_current_line) else: self.insert_text('\n')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_line_above(self, copy_margin=True): """ Insert a new line above the current one. """
if copy_margin: insert = self.document.leading_whitespace_in_current_line + '\n' else: insert = '\n' self.cursor_position += self.document.get_start_of_line_position() self.insert_text(insert) self.cursor_position -= 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_line_below(self, copy_margin=True): """ Insert a new line below the current one. """
if copy_margin: insert = '\n' + self.document.leading_whitespace_in_current_line else: insert = '\n' self.cursor_position += self.document.get_end_of_line_position() self.insert_text(insert)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_text(self, data, overwrite=False, move_cursor=True, fire_event=True): """ Insert characters at cursor position. :param fire_event: Fire `on_text_insert` event. This is mainly used to trigger autocompletion while typing. """
# Original text & cursor position. otext = self.text ocpos = self.cursor_position # In insert/text mode. if overwrite: # Don't overwrite the newline itself. Just before the line ending, # it should act like insert mode. overwritten_text = otext[ocpos:ocpos + len(data)] if '\n' in overwritten_text: overwritten_text = overwritten_text[:overwritten_text.find('\n')] self.text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text):] else: self.text = otext[:ocpos] + data + otext[ocpos:] if move_cursor: self.cursor_position += len(data) # Fire 'on_text_insert' event. if fire_event: self.on_text_insert.fire()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self): """ Returns `True` if valid. """
# Don't call the validator again, if it was already called for the # current input. if self.validation_state != ValidationState.UNKNOWN: return self.validation_state == ValidationState.VALID # Validate first. If not valid, set validation exception. if self.validator: try: self.validator.validate(self.document) except ValidationError as e: # Set cursor position (don't allow invalid values.) cursor_position = e.cursor_position self.cursor_position = min(max(0, cursor_position), len(self.text)) self.validation_state = ValidationState.INVALID self.validation_error = e return False self.validation_state = ValidationState.VALID self.validation_error = None return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_in_editor(self, cli): """ Open code in editor. :param cli: :class:`~prompt_toolkit.interface.CommandLineInterface` instance. """
if self.read_only(): raise EditReadOnlyBuffer() # Write to temporary file descriptor, filename = tempfile.mkstemp(self.tempfile_suffix) os.write(descriptor, self.text.encode('utf-8')) os.close(descriptor) # Open in editor # (We need to use `cli.run_in_terminal`, because not all editors go to # the alternate screen buffer, and some could influence the cursor # position.) succes = cli.run_in_terminal(lambda: self._open_file_in_editor(filename)) # Read content again. if succes: with open(filename, 'rb') as f: text = f.read().decode('utf-8') # Drop trailing newline. (Editors are supposed to add it at the # end, but we don't need it.) if text.endswith('\n'): text = text[:-1] self.document = Document( text=text, cursor_position=len(text)) # Clean up temp file. os.remove(filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _open_file_in_editor(self, filename): """ Call editor executable. Return True when we received a zero return code. """
# If the 'VISUAL' or 'EDITOR' environment variable has been set, use that. # Otherwise, fall back to the first available editor that we can find. visual = os.environ.get('VISUAL') editor = os.environ.get('EDITOR') editors = [ visual, editor, # Order of preference. '/usr/bin/editor', '/usr/bin/nano', '/usr/bin/pico', '/usr/bin/vi', '/usr/bin/emacs', ] for e in editors: if e: try: # Use 'shlex.split()', because $VISUAL can contain spaces # and quotes. returncode = subprocess.call(shlex.split(e) + [filename]) return returncode == 0 except OSError: # Executable does not exist, try the next one. pass return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lines(self): """ Array of all the lines. """
# Cache, because this one is reused very often. if self._cache.lines is None: self._cache.lines = _ImmutableLineList(self.text.split('\n')) return self._cache.lines
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _line_start_indexes(self): """ Array pointing to the start indexes of all the lines. """
# Cache, because this is often reused. (If it is used, it's often used # many times. And this has to be fast for editing big documents!) if self._cache.line_indexes is None: # Create list of line lengths. line_lengths = map(len, self.lines) # Calculate cumulative sums. indexes = [0] append = indexes.append pos = 0 for line_length in line_lengths: pos += line_length + 1 append(pos) # Remove the last item. (This is not a new line.) if len(indexes) > 1: indexes.pop() self._cache.line_indexes = indexes return self._cache.line_indexes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def leading_whitespace_in_current_line(self): """ The leading whitespace in the left margin of the current line. """
current_line = self.current_line length = len(current_line) - len(current_line.lstrip()) return current_line[:length]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _find_line_start_index(self, index): """ For the index of a character at a certain line, calculate the index of the first character on that line. Return (row, index) tuple. """
indexes = self._line_start_indexes pos = bisect.bisect_right(indexes, index) - 1 return pos, indexes[pos]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_match_at_current_position(self, sub): """ `True` when this substring is found at the cursor position. """
return self.text.find(sub, self.cursor_position) == self.cursor_position
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, sub, in_current_line=False, include_current_position=False, ignore_case=False, count=1): """ Find `text` after the cursor, return position relative to the cursor position. Return `None` if nothing was found. :param count: Find the n-th occurance. """
assert isinstance(ignore_case, bool) if in_current_line: text = self.current_line_after_cursor else: text = self.text_after_cursor if not include_current_position: if len(text) == 0: return # (Otherwise, we always get a match for the empty string.) else: text = text[1:] flags = re.IGNORECASE if ignore_case else 0 iterator = re.finditer(re.escape(sub), text, flags) try: for i, match in enumerate(iterator): if i + 1 == count: if include_current_position: return match.start(0) else: return match.start(0) + 1 except StopIteration: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_all(self, sub, ignore_case=False): """ Find all occurances of the substring. Return a list of absolute positions in the document. """
flags = re.IGNORECASE if ignore_case else 0 return [a.start() for a in re.finditer(re.escape(sub), self.text, flags)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_backwards(self, sub, in_current_line=False, ignore_case=False, count=1): """ Find `text` before the cursor, return position relative to the cursor position. Return `None` if nothing was found. :param count: Find the n-th occurance. """
if in_current_line: before_cursor = self.current_line_before_cursor[::-1] else: before_cursor = self.text_before_cursor[::-1] flags = re.IGNORECASE if ignore_case else 0 iterator = re.finditer(re.escape(sub[::-1]), before_cursor, flags) try: for i, match in enumerate(iterator): if i + 1 == count: return - match.start(0) - len(sub) except StopIteration: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_word_before_cursor(self, WORD=False): """ Give the word before the cursor. If we have whitespace before the cursor this returns an empty string. """
if self.text_before_cursor[-1:].isspace(): return '' else: return self.text_before_cursor[self.find_start_of_previous_word(WORD=WORD):]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_word_under_cursor(self, WORD=False): """ Return the word, currently below the cursor. This returns an empty string when the cursor is on a whitespace region. """
start, end = self.find_boundaries_of_current_word(WORD=WORD) return self.text[self.cursor_position + start: self.cursor_position + end]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_next_word_beginning(self, count=1, WORD=False): """ Return an index relative to the cursor position pointing to the start of the next word. Return `None` if nothing was found. """
if count < 0: return self.find_previous_word_beginning(count=-count, WORD=WORD) regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterator = regex.finditer(self.text_after_cursor) try: for i, match in enumerate(iterator): # Take first match, unless it's the word on which we're right now. if i == 0 and match.start(1) == 0: count += 1 if i + 1 == count: return match.start(1) except StopIteration: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_next_word_ending(self, include_current_position=False, count=1, WORD=False): """ Return an index relative to the cursor position pointing to the end of the next word. Return `None` if nothing was found. """
if count < 0: return self.find_previous_word_ending(count=-count, WORD=WORD) if include_current_position: text = self.text_after_cursor else: text = self.text_after_cursor[1:] regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterable = regex.finditer(text) try: for i, match in enumerate(iterable): if i + 1 == count: value = match.end(1) if include_current_position: return value else: return value + 1 except StopIteration: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_previous_word_ending(self, count=1, WORD=False): """ Return an index relative to the cursor position pointing to the end of the previous word. Return `None` if nothing was found. """
if count < 0: return self.find_next_word_ending(count=-count, WORD=WORD) text_before_cursor = self.text_after_cursor[:1] + self.text_before_cursor[::-1] regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterator = regex.finditer(text_before_cursor) try: for i, match in enumerate(iterator): # Take first match, unless it's the word on which we're right now. if i == 0 and match.start(1) == 0: count += 1 if i + 1 == count: return -match.start(1) + 1 except StopIteration: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_next_matching_line(self, match_func, count=1): """ Look downwards for empty lines. Return the line index, relative to the current line. """
result = None for index, line in enumerate(self.lines[self.cursor_position_row + 1:]): if match_func(line): result = 1 + index count -= 1 if count == 0: break return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cursor_left_position(self, count=1): """ Relative position for cursor left. """
if count < 0: return self.get_cursor_right_position(-count) return - min(self.cursor_position_col, count)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cursor_right_position(self, count=1): """ Relative position for cursor_right. """
if count < 0: return self.get_cursor_left_position(-count) return min(count, len(self.current_line_after_cursor))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_enclosing_bracket_right(self, left_ch, right_ch, end_pos=None): """ Find the right bracket enclosing current position. Return the relative position to the cursor position. When `end_pos` is given, don't look past the position. """
if self.current_char == right_ch: return 0 if end_pos is None: end_pos = len(self.text) else: end_pos = min(len(self.text), end_pos) stack = 1 # Look forward. for i in range(self.cursor_position + 1, end_pos): c = self.text[i] if c == left_ch: stack += 1 elif c == right_ch: stack -= 1 if stack == 0: return i - self.cursor_position
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_enclosing_bracket_left(self, left_ch, right_ch, start_pos=None): """ Find the left bracket enclosing current position. Return the relative position to the cursor position. When `start_pos` is given, don't look past the position. """
if self.current_char == left_ch: return 0 if start_pos is None: start_pos = 0 else: start_pos = max(0, start_pos) stack = 1 # Look backward. for i in range(self.cursor_position - 1, start_pos - 1, -1): c = self.text[i] if c == right_ch: stack += 1 elif c == left_ch: stack -= 1 if stack == 0: return i - self.cursor_position
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_matching_bracket_position(self, start_pos=None, end_pos=None): """ Return relative cursor position of matching [, (, { or < bracket. When `start_pos` or `end_pos` are given. Don't look past the positions. """
# Look for a match. for A, B in '()', '[]', '{}', '<>': if self.current_char == A: return self.find_enclosing_bracket_right(A, B, end_pos=end_pos) or 0 elif self.current_char == B: return self.find_enclosing_bracket_left(A, B, start_pos=start_pos) or 0 return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_start_of_line_position(self, after_whitespace=False): """ Relative position for the start of this line. """
if after_whitespace: current_line = self.current_line return len(current_line) - len(current_line.lstrip()) - self.cursor_position_col else: return - len(self.current_line_before_cursor)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def empty_line_count_at_the_end(self): """ Return number of empty lines at the end of the document. """
count = 0 for line in self.lines[::-1]: if not line or line.isspace(): count += 1 else: break return count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_after(self, text): """ Create a new document, with this text inserted after the buffer. It keeps selection ranges and cursor position in sync. """
return Document( text=self.text + text, cursor_position=self.cursor_position, selection=self.selection)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_before(self, text): """ Create a new document, with this text inserted before the buffer. It keeps selection ranges and cursor position in sync. """
selection_state = self.selection if selection_state: selection_state = SelectionState( original_cursor_position=selection_state.original_cursor_position + len(text), type=selection_state.type) return Document( text=text + self.text, cursor_position=self.cursor_position + len(text), selection=selection_state)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_key_bindings( get_search_state=None, enable_abort_and_exit_bindings=False, enable_system_bindings=False, enable_search=False, enable_open_in_editor=False, enable_extra_page_navigation=False, enable_auto_suggest_bindings=False): """ Create a Registry object that contains the default key bindings. :param enable_abort_and_exit_bindings: Filter to enable Ctrl-C and Ctrl-D. :param enable_system_bindings: Filter to enable the system bindings (meta-! prompt and Control-Z suspension.) :param enable_search: Filter to enable the search bindings. :param enable_open_in_editor: Filter to enable open-in-editor. :param enable_open_in_editor: Filter to enable open-in-editor. :param enable_extra_page_navigation: Filter for enabling extra page navigation. (Bindings for up/down scrolling through long pages, like in Emacs or Vi.) :param enable_auto_suggest_bindings: Filter to enable fish-style suggestions. """
assert get_search_state is None or callable(get_search_state) # Accept both Filters and booleans as input. enable_abort_and_exit_bindings = to_cli_filter(enable_abort_and_exit_bindings) enable_system_bindings = to_cli_filter(enable_system_bindings) enable_search = to_cli_filter(enable_search) enable_open_in_editor = to_cli_filter(enable_open_in_editor) enable_extra_page_navigation = to_cli_filter(enable_extra_page_navigation) enable_auto_suggest_bindings = to_cli_filter(enable_auto_suggest_bindings) registry = MergedRegistry([ # Load basic bindings. load_basic_bindings(), load_mouse_bindings(), ConditionalRegistry(load_abort_and_exit_bindings(), enable_abort_and_exit_bindings), ConditionalRegistry(load_basic_system_bindings(), enable_system_bindings), # Load emacs bindings. load_emacs_bindings(), ConditionalRegistry(load_emacs_open_in_editor_bindings(), enable_open_in_editor), ConditionalRegistry(load_emacs_search_bindings(get_search_state=get_search_state), enable_search), ConditionalRegistry(load_emacs_system_bindings(), enable_system_bindings), ConditionalRegistry(load_extra_emacs_page_navigation_bindings(), enable_extra_page_navigation), # Load Vi bindings. load_vi_bindings(get_search_state=get_search_state), ConditionalRegistry(load_vi_open_in_editor_bindings(), enable_open_in_editor), ConditionalRegistry(load_vi_search_bindings(get_search_state=get_search_state), enable_search), ConditionalRegistry(load_vi_system_bindings(), enable_system_bindings), ConditionalRegistry(load_extra_vi_page_navigation_bindings(), enable_extra_page_navigation), # Suggestion bindings. # (This has to come at the end, because the Vi bindings also have an # implementation for the "right arrow", but we really want the # suggestion binding when a suggestion is available.) ConditionalRegistry(load_auto_suggestion_bindings(), enable_auto_suggest_bindings), ]) return registry
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_key_bindings_for_prompt(**kw): """ Create a ``Registry`` object with the defaults key bindings for an input prompt. This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D), incremental search and auto suggestions. (Not for full screen applications.) """
kw.setdefault('enable_abort_and_exit_bindings', True) kw.setdefault('enable_search', True) kw.setdefault('enable_auto_suggest_bindings', True) return load_key_bindings(**kw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _split_multiline_prompt(get_prompt_tokens): """ Take a `get_prompt_tokens` function and return three new functions instead. One that tells whether this prompt consists of multiple lines; one that returns the tokens to be shown on the lines above the input; and another one with the tokens to be shown at the first line of the input. """
def has_before_tokens(cli): for token, char in get_prompt_tokens(cli): if '\n' in char: return True return False def before(cli): result = [] found_nl = False for token, char in reversed(explode_tokens(get_prompt_tokens(cli))): if found_nl: result.insert(0, (token, char)) elif char == '\n': found_nl = True return result def first_input_line(cli): result = [] for token, char in reversed(explode_tokens(get_prompt_tokens(cli))): if char == '\n': break else: result.insert(0, (token, char)) return result return has_before_tokens, before, first_input_line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prompt(message='', **kwargs): """ Get input from the user and return it. This is a wrapper around a lot of ``prompt_toolkit`` functionality and can be a replacement for `raw_input`. (or GNU readline.) If you want to keep your history across several calls, create one :class:`~prompt_toolkit.history.History` instance and pass it every time. This function accepts many keyword arguments. Except for the following, they are a proxy to the arguments of :func:`.create_prompt_application`. :param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that print statements from other threads won't destroy the prompt. (They will be printed above the prompt instead.) :param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3) :param true_color: When True, use 24bit colors instead of 256 colors. :param refresh_interval: (number; in seconds) When given, refresh the UI every so many seconds. """
patch_stdout = kwargs.pop('patch_stdout', False) return_asyncio_coroutine = kwargs.pop('return_asyncio_coroutine', False) true_color = kwargs.pop('true_color', False) refresh_interval = kwargs.pop('refresh_interval', 0) eventloop = kwargs.pop('eventloop', None) application = create_prompt_application(message, **kwargs) return run_application(application, patch_stdout=patch_stdout, return_asyncio_coroutine=return_asyncio_coroutine, true_color=true_color, refresh_interval=refresh_interval, eventloop=eventloop)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_application( application, patch_stdout=False, return_asyncio_coroutine=False, true_color=False, refresh_interval=0, eventloop=None): """ Run a prompt toolkit application. :param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that print statements from other threads won't destroy the prompt. (They will be printed above the prompt instead.) :param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3) :param true_color: When True, use 24bit colors instead of 256 colors. :param refresh_interval: (number; in seconds) When given, refresh the UI every so many seconds. """
assert isinstance(application, Application) if return_asyncio_coroutine: eventloop = create_asyncio_eventloop() else: eventloop = eventloop or create_eventloop() # Create CommandLineInterface. cli = CommandLineInterface( application=application, eventloop=eventloop, output=create_output(true_color=true_color)) # Set up refresh interval. if refresh_interval: done = [False] def start_refresh_loop(cli): def run(): while not done[0]: time.sleep(refresh_interval) cli.request_redraw() t = threading.Thread(target=run) t.daemon = True t.start() def stop_refresh_loop(cli): done[0] = True cli.on_start += start_refresh_loop cli.on_stop += stop_refresh_loop # Replace stdout. patch_context = cli.patch_stdout_context(raw=True) if patch_stdout else DummyContext() # Read input and return it. if return_asyncio_coroutine: # Create an asyncio coroutine and call it. exec_context = {'patch_context': patch_context, 'cli': cli, 'Document': Document} exec_(textwrap.dedent(''' def prompt_coro(): # Inline import, because it slows down startup when asyncio is not # needed. import asyncio @asyncio.coroutine def run(): with patch_context: result = yield from cli.run_async() if isinstance(result, Document): # Backwards-compatibility. return result.text return result return run() '''), exec_context) return exec_context['prompt_coro']() else: try: with patch_context: result = cli.run() if isinstance(result, Document): # Backwards-compatibility. return result.text return result finally: eventloop.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _create_ansi_color_dict(color_cls): " Create a table that maps the 16 named ansi colors to their Windows code. " return { 'ansidefault': color_cls.BLACK, 'ansiblack': color_cls.BLACK, 'ansidarkgray': color_cls.BLACK | color_cls.INTENSITY, 'ansilightgray': color_cls.GRAY, 'ansiwhite': color_cls.GRAY | color_cls.INTENSITY, # Low intensity. 'ansidarkred': color_cls.RED, 'ansidarkgreen': color_cls.GREEN, 'ansibrown': color_cls.YELLOW, 'ansidarkblue': color_cls.BLUE, 'ansipurple': color_cls.MAGENTA, 'ansiteal': color_cls.CYAN, # High intensity. 'ansired': color_cls.RED | color_cls.INTENSITY, 'ansigreen': color_cls.GREEN | color_cls.INTENSITY, 'ansiyellow': color_cls.YELLOW | color_cls.INTENSITY, 'ansiblue': color_cls.BLUE | color_cls.INTENSITY, 'ansifuchsia': color_cls.MAGENTA | color_cls.INTENSITY, 'ansiturquoise': color_cls.CYAN | color_cls.INTENSITY, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _winapi(self, func, *a, **kw): """ Flush and call win API function. """
self.flush() if _DEBUG_RENDER_OUTPUT: self.LOG.write(('%r' % func.__name__).encode('utf-8') + b'\n') self.LOG.write(b' ' + ', '.join(['%r' % i for i in a]).encode('utf-8') + b'\n') self.LOG.write(b' ' + ', '.join(['%r' % type(i) for i in a]).encode('utf-8') + b'\n') self.LOG.flush() try: return func(*a, **kw) except ArgumentError as e: if _DEBUG_RENDER_OUTPUT: self.LOG.write((' Error in %r %r %s\n' % (func.__name__, e, e)).encode('utf-8'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_win32_screen_buffer_info(self): """ Return Screen buffer info. """
# NOTE: We don't call the `GetConsoleScreenBufferInfo` API through # `self._winapi`. Doing so causes Python to crash on certain 64bit # Python versions. (Reproduced with 64bit Python 2.7.6, on Windows # 10). It is not clear why. Possibly, it has to do with passing # these objects as an argument, or through *args. # The Python documentation contains the following - possibly related - warning: # ctypes does not support passing unions or structures with # bit-fields to functions by value. While this may work on 32-bit # x86, it's not guaranteed by the library to work in the general # case. Unions and structures with bit-fields should always be # passed to functions by pointer. # Also see: # - https://github.com/ipython/ipython/issues/10070 # - https://github.com/jonathanslenders/python-prompt-toolkit/issues/406 # - https://github.com/jonathanslenders/python-prompt-toolkit/issues/86 self.flush() sbinfo = CONSOLE_SCREEN_BUFFER_INFO() success = windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(sbinfo)) # success = self._winapi(windll.kernel32.GetConsoleScreenBufferInfo, # self.hconsole, byref(sbinfo)) if success: return sbinfo else: raise NoConsoleScreenBufferError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enter_alternate_screen(self): """ Go to alternate screen buffer. """
if not self._in_alternate_screen: GENERIC_READ = 0x80000000 GENERIC_WRITE = 0x40000000 # Create a new console buffer and activate that one. handle = self._winapi(windll.kernel32.CreateConsoleScreenBuffer, GENERIC_READ|GENERIC_WRITE, DWORD(0), None, DWORD(1), None) self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle) self.hconsole = handle self._in_alternate_screen = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quit_alternate_screen(self): """ Make stdout again the active buffer. """
if self._in_alternate_screen: stdout = self._winapi(windll.kernel32.GetStdHandle, STD_OUTPUT_HANDLE) self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, stdout) self._winapi(windll.kernel32.CloseHandle, self.hconsole) self.hconsole = stdout self._in_alternate_screen = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def win32_refresh_window(cls): """ Call win32 API to refresh the whole Window. This is sometimes necessary when the application paints background for completion menus. When the menu disappears, it leaves traces due to a bug in the Windows Console. Sending a repaint request solves it. """
# Get console handle handle = windll.kernel32.GetConsoleWindow() RDW_INVALIDATE = 0x0001 windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_color_table(): """ Build an RGB-to-256 color conversion table """
FG = FOREGROUND_COLOR BG = BACKROUND_COLOR return [ (0x00, 0x00, 0x00, FG.BLACK, BG.BLACK), (0x00, 0x00, 0xaa, FG.BLUE, BG.BLUE), (0x00, 0xaa, 0x00, FG.GREEN, BG.GREEN), (0x00, 0xaa, 0xaa, FG.CYAN, BG.CYAN), (0xaa, 0x00, 0x00, FG.RED, BG.RED), (0xaa, 0x00, 0xaa, FG.MAGENTA, BG.MAGENTA), (0xaa, 0xaa, 0x00, FG.YELLOW, BG.YELLOW), (0x88, 0x88, 0x88, FG.GRAY, BG.GRAY), (0x44, 0x44, 0xff, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY), (0x44, 0xff, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY), (0x44, 0xff, 0xff, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY), (0xff, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY), (0xff, 0x44, 0xff, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY), (0xff, 0xff, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY), (0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY), (0xff, 0xff, 0xff, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY), ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log_tensor_stats(self, tensor, name): """Add distribution statistics on a tensor's elements to the current History entry """
# TODO Handle the case of duplicate names. if (isinstance(tensor, tuple) or isinstance(tensor, list)): while (isinstance(tensor, tuple) or isinstance(tensor, list)) and (isinstance(tensor[0], tuple) or isinstance(tensor[0], list)): tensor = [item for sublist in tensor for item in sublist] tensor = torch.cat([t.view(-1) for t in tensor]) # checking for inheritance from _TensorBase didn't work for some reason if not hasattr(tensor, 'shape'): cls = type(tensor) raise TypeError('Expected Tensor, not {}.{}'.format( cls.__module__, cls.__name__)) history = self._history() if history is None or not history.compute: return # HalfTensors on cpu do not support view(), upconvert to 32bit if isinstance(tensor, torch.HalfTensor): tensor = tensor.clone().type(torch.FloatTensor).detach() flat = tensor.view(-1) # For pytorch 0.3 we use unoptimized numpy histograms (detach is new in 0.4) if not hasattr(flat, "detach"): tensor = flat.cpu().clone().numpy() history.row.update({ name: wandb.Histogram(tensor) }) return if flat.is_cuda: # TODO(jhr): see if pytorch will accept something upstream to check cuda support for ops # until then, we are going to have to catch a specific exception to check for histc support. if self._is_cuda_histc_supported is None: self._is_cuda_histc_supported = True check = torch.cuda.FloatTensor(1).fill_(0) try: check = flat.histc(bins=self._num_bins) except RuntimeError as e: # Only work around missing support with specific exception if str(e).startswith("_th_histc is not implemented"): self._is_cuda_histc_supported = False if not self._is_cuda_histc_supported: flat = flat.cpu().clone().detach() # As of torch 1.0.1.post2+nightly, float16 cuda summary ops are not supported (convert to float32) if isinstance(flat, torch.cuda.HalfTensor): flat = flat.clone().type(torch.cuda.FloatTensor).detach() if isinstance(flat, torch.HalfTensor): flat = flat.clone().type(torch.FloatTensor).detach() tmin = flat.min().item() tmax = flat.max().item() tensor = flat.histc(bins=self._num_bins, min=tmin, max=tmax) tensor = tensor.cpu().clone().detach() bins = torch.linspace(tmin, tmax, steps=self._num_bins + 1) history.row.update({ name: wandb.Histogram(np_histogram=( tensor.tolist(), bins.tolist())) })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_height_for_line(self, lineno, width): """ Return the height that a given line would need if it is rendered in a space with the given width. """
try: return self._line_heights[lineno, width] except KeyError: text = token_list_to_text(self.get_line(lineno)) result = self.get_height_for_text(text, width) # Cache and return self._line_heights[lineno, width] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preferred_width(self, cli, max_available_width): """ Return the preferred width for this control. That is the width of the longest line. """
text = token_list_to_text(self._get_tokens_cached(cli)) line_lengths = [get_cwidth(l) for l in text.split('\n')] return max(line_lengths)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mouse_handler(self, cli, mouse_event): """ Handle mouse events. (When the token list contained mouse handlers and the user clicked on on any of these, the matching handler is called. This handler can still return `NotImplemented` in case we want the `Window` to handle this particular event.) """
if self._tokens: # Read the generator. tokens_for_line = list(split_lines(self._tokens)) try: tokens = tokens_for_line[mouse_event.position.y] except IndexError: return NotImplemented else: # Find position in the token list. xpos = mouse_event.position.x # Find mouse handler for this character. count = 0 for item in tokens: count += len(item[1]) if count >= xpos: if len(item) >= 3: # Handler found. Call it. # (Handler can return NotImplemented, so return # that result.) handler = item[2] return handler(cli, mouse_event) else: break # Otherwise, don't handle here. return NotImplemented
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_tokens_for_line_func(self, cli, document): """ Create a function that returns the tokens for a given line. """
# Cache using `document.text`. def get_tokens_for_line(): return self.lexer.lex_document(cli, document) return self._token_cache.get(document.text, get_tokens_for_line)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_content(self, cli, width, height): """ Create a UIContent. """
buffer = self._buffer(cli) # Get the document to be shown. If we are currently searching (the # search buffer has focus, and the preview_search filter is enabled), # then use the search document, which has possibly a different # text/cursor position.) def preview_now(): """ True when we should preview a search. """ return bool(self.preview_search(cli) and cli.buffers[self.search_buffer_name].text) if preview_now(): if self.get_search_state: ss = self.get_search_state(cli) else: ss = cli.search_state document = buffer.document_for_search(SearchState( text=cli.current_buffer.text, direction=ss.direction, ignore_case=ss.ignore_case)) else: document = buffer.document get_processed_line = self._create_get_processed_line_func(cli, document) self._last_get_processed_line = get_processed_line def translate_rowcol(row, col): " Return the content column for this coordinate. " return Point(y=row, x=get_processed_line(row).source_to_display(col)) def get_line(i): " Return the tokens for a given line number. " tokens = get_processed_line(i).tokens # Add a space at the end, because that is a possible cursor # position. (When inserting after the input.) We should do this on # all the lines, not just the line containing the cursor. (Because # otherwise, line wrapping/scrolling could change when moving the # cursor around.) tokens = tokens + [(self.default_char.token, ' ')] return tokens content = UIContent( get_line=get_line, line_count=document.line_count, cursor_position=translate_rowcol(document.cursor_position_row, document.cursor_position_col), default_char=self.default_char) # If there is an auto completion going on, use that start point for a # pop-up menu position. (But only when this buffer has the focus -- # there is only one place for a menu, determined by the focussed buffer.) if cli.current_buffer_name == self.buffer_name: menu_position = self.menu_position(cli) if self.menu_position else None if menu_position is not None: assert isinstance(menu_position, int) menu_row, menu_col = buffer.document.translate_index_to_position(menu_position) content.menu_position = translate_rowcol(menu_row, menu_col) elif buffer.complete_state: # Position for completion menu. # Note: We use 'min', because the original cursor position could be # behind the input string when the actual completion is for # some reason shorter than the text we had before. (A completion # can change and shorten the input.) menu_row, menu_col = buffer.document.translate_index_to_position( min(buffer.cursor_position, buffer.complete_state.original_document.cursor_position)) content.menu_position = translate_rowcol(menu_row, menu_col) else: content.menu_position = None return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mouse_handler(self, cli, mouse_event): """ Mouse handler for this control. """
buffer = self._buffer(cli) position = mouse_event.position # Focus buffer when clicked. if self.has_focus(cli): if self._last_get_processed_line: processed_line = self._last_get_processed_line(position.y) # Translate coordinates back to the cursor position of the # original input. xpos = processed_line.display_to_source(position.x) index = buffer.document.translate_row_col_to_index(position.y, xpos) # Set the cursor position. if mouse_event.event_type == MouseEventType.MOUSE_DOWN: buffer.exit_selection() buffer.cursor_position = index elif mouse_event.event_type == MouseEventType.MOUSE_UP: # When the cursor was moved to another place, select the text. # (The >1 is actually a small but acceptable workaround for # selecting text in Vi navigation mode. In navigation mode, # the cursor can never be after the text, so the cursor # will be repositioned automatically.) if abs(buffer.cursor_position - index) > 1: buffer.start_selection(selection_type=SelectionType.CHARACTERS) buffer.cursor_position = index # Select word around cursor on double click. # Two MOUSE_UP events in a short timespan are considered a double click. double_click = self._last_click_timestamp and time.time() - self._last_click_timestamp < .3 self._last_click_timestamp = time.time() if double_click: start, end = buffer.document.find_boundaries_of_current_word() buffer.cursor_position += start buffer.start_selection(selection_type=SelectionType.CHARACTERS) buffer.cursor_position += end - start else: # Don't handle scroll events here. return NotImplemented # Not focussed, but focussing on click events. else: if self.focus_on_click(cli) and mouse_event.event_type == MouseEventType.MOUSE_UP: # Focus happens on mouseup. (If we did this on mousedown, the # up event will be received at the point where this widget is # focussed and be handled anyway.) cli.focus(self.buffer_name) else: return NotImplemented
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_arg_tokens(cli): """ Tokens for the arg-prompt. """
arg = cli.input_processor.arg return [ (Token.Prompt.Arg, '(arg: '), (Token.Prompt.Arg.Text, str(arg)), (Token.Prompt.Arg, ') '), ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_message(cls, message='> '): """ Create a default prompt with a static message text. """
assert isinstance(message, text_type) def get_message_tokens(cli): return [(Token.Prompt, message)] return cls(get_message_tokens)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_jsonl_file(fname, data): """Writes a jsonl file. Args: data: list of json encoded data """
if not isinstance(data, list): print('warning: malformed json data for file', fname) return with open(fname, 'w') as of: for row in data: # TODO: other malformed cases? if row.strip(): of.write('%s\n' % row.strip())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def received_winch(self): """ Notify the event loop that SIGWINCH has been received """
# Process signal asynchronously, because this handler can write to the # output, and doing this inside the signal handler causes easily # reentrant calls, giving runtime errors.. # Furthur, this has to be thread safe. When the CommandLineInterface # runs not in the main thread, this function still has to be called # from the main thread. (The only place where we can install signal # handlers.) def process_winch(): if self._callbacks: self._callbacks.terminal_size_changed() self.call_from_executor(process_winch)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_reader(self, fd, callback): " Add read file descriptor to the event loop. " fd = fd_to_int(fd) self._read_fds[fd] = callback self.selector.register(fd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def remove_reader(self, fd): " Remove read file descriptor from the event loop. " fd = fd_to_int(fd) if fd in self._read_fds: del self._read_fds[fd] self.selector.unregister(fd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def memoized(maxsize=1024): """ Momoization decorator for immutable classes and pure functions. """
cache = SimpleCache(maxsize=maxsize) def decorator(obj): @wraps(obj) def new_callable(*a, **kw): def create_new(): return obj(*a, **kw) key = (a, tuple(kw.items())) return cache.get(key, create_new) return new_callable return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, key, getter_func): """ Get object from the cache. If not found, call `getter_func` to resolve it, and put that on the top of the cache instead. """
# Look in cache first. try: return self._data[key] except KeyError: # Not found? Get it. value = getter_func() self._data[key] = value self._keys.append(key) # Remove the oldest key when the size is exceeded. if len(self._data) > self.maxsize: key_to_remove = self._keys.popleft() if key_to_remove in self._data: del self._data[key_to_remove] return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_binding(self, *keys, **kwargs): """ Decorator for annotating key bindings. :param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine when this key binding is active. :param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`. When True, ignore potential longer matches when this key binding is hit. E.g. when there is an active eager key binding for Ctrl-X, execute the handler immediately and ignore the key binding for Ctrl-X Ctrl-E of which it is a prefix. :param save_before: Callable that takes an `Event` and returns True if we should save the current buffer, before handling the event. (That's the default.) """
filter = to_cli_filter(kwargs.pop('filter', True)) eager = to_cli_filter(kwargs.pop('eager', False)) save_before = kwargs.pop('save_before', lambda e: True) to_cli_filter(kwargs.pop('invalidate_ui', True)) # Deprecated! (ignored.) assert not kwargs assert keys assert all(isinstance(k, (Key, text_type)) for k in keys), \ 'Key bindings should consist of Key and string (unicode) instances.' assert callable(save_before) if isinstance(filter, Never): # When a filter is Never, it will always stay disabled, so in that case # don't bother putting it in the registry. It will slow down every key # press otherwise. def decorator(func): return func else: def decorator(func): self.key_bindings.append( _Binding(keys, func, filter=filter, eager=eager, save_before=save_before)) self._clear_cache() return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_binding(self, function): """ Remove a key binding. This expects a function that was given to `add_binding` method as parameter. Raises `ValueError` when the given function was not registered before. """
assert callable(function) for b in self.key_bindings: if b.handler == function: self.key_bindings.remove(b) self._clear_cache() return # No key binding found for this function. Raise ValueError. raise ValueError('Binding not found: %r' % (function, ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _update_cache(self): " If the original registry was changed. Update our copy version. " expected_version = (self.registry._version, self._extra_registry._version) if self._last_version != expected_version: registry2 = Registry() # Copy all bindings from `self.registry`, adding our condition. for reg in (self.registry, self._extra_registry): for b in reg.key_bindings: registry2.key_bindings.append( _Binding( keys=b.keys, handler=b.handler, filter=self.filter & b.filter, eager=b.eager, save_before=b.save_before)) self._registry2 = registry2 self._last_version = expected_version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_cache(self): """ If one of the original registries was changed. Update our merged version. """
expected_version = ( tuple(r._version for r in self.registries) + (self._extra_registry._version, )) if self._last_version != expected_version: registry2 = Registry() for reg in self.registries: registry2.key_bindings.extend(reg.key_bindings) # Copy all bindings from `self._extra_registry`. registry2.key_bindings.extend(self._extra_registry.key_bindings) self._registry2 = registry2 self._last_version = expected_version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nest(thing): """Use tensorflows nest function if available, otherwise just wrap object in an array"""
tfutil = util.get_module('tensorflow.python.util') if tfutil: return tfutil.nest.flatten(thing) else: return [thing]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def val_to_json(key, val, mode="summary", step=None): """Converts a wandb datatype to its JSON representation"""
converted = val typename = util.get_full_typename(val) if util.is_matplotlib_typename(typename): # This handles plots with images in it because plotly doesn't support it # TODO: should we handle a list of plots? val = util.ensure_matplotlib_figure(val) if any(len(ax.images) > 0 for ax in val.axes): PILImage = util.get_module( "PIL.Image", required="Logging plots with images requires pil: pip install pillow") buf = six.BytesIO() val.savefig(buf) val = Image(PILImage.open(buf)) else: converted = util.convert_plots(val) elif util.is_plotly_typename(typename): converted = util.convert_plots(val) if isinstance(val, IterableMedia): val = [val] if isinstance(val, collections.Sequence) and len(val) > 0: is_media = [isinstance(v, IterableMedia) for v in val] if all(is_media): cwd = wandb.run.dir if wandb.run else "." if step is None: step = "summary" if isinstance(val[0], Image): converted = Image.transform(val, cwd, "{}_{}.jpg".format(key, step)) elif isinstance(val[0], Audio): converted = Audio.transform(val, cwd, key, step) elif isinstance(val[0], Html): converted = Html.transform(val, cwd, key, step) elif isinstance(val[0], Object3D): converted = Object3D.transform(val, cwd, key, step) elif any(is_media): raise ValueError( "Mixed media types in the same list aren't supported") elif isinstance(val, Histogram): converted = Histogram.transform(val) elif isinstance(val, Graph): if mode == "history": raise ValueError("Graphs are only supported in summary") converted = Graph.transform(val) elif isinstance(val, Table): converted = Table.transform(val) return converted
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_json(payload, mode="history"): """Converts all keys in a potentially nested array into their JSON representation"""
for key, val in six.iteritems(payload): if isinstance(val, dict): payload[key] = to_json(val, mode) else: payload[key] = val_to_json( key, val, mode, step=payload.get("_step")) return payload
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def guess_mode(self, data): """ Guess what type of image the np.array is representing """
# TODO: do we want to support dimensions being at the beginning of the array? if data.ndim == 2: return "L" elif data.shape[-1] == 3: return "RGB" elif data.shape[-1] == 4: return "RGBA" else: raise ValueError( "Un-supported shape for image conversion %s" % list(data.shape))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform(images, out_dir, fname): """ Combines a list of images into a single sprite returning meta information """
from PIL import Image as PILImage base = os.path.join(out_dir, "media", "images") width, height = images[0].image.size num_images_to_log = len(images) if num_images_to_log > Image.MAX_IMAGES: logging.warn( "The maximum number of images to store per step is %i." % Image.MAX_IMAGES) num_images_to_log = Image.MAX_IMAGES if width * num_images_to_log > Image.MAX_DIMENSION: max_images_by_dimension = Image.MAX_DIMENSION // width logging.warn("The maximum total width for all images in a collection is 65500, or {} images, each with a width of {} pixels. Only logging the first {} images.".format(max_images_by_dimension, width, max_images_by_dimension)) num_images_to_log = max_images_by_dimension total_width = width * num_images_to_log sprite = PILImage.new( mode='RGB', size=(total_width, height), color=(0, 0, 0)) for i, image in enumerate(images[:num_images_to_log]): location = width * i sprite.paste(image.image, (location, 0)) util.mkdir_exists_ok(base) sprite.save(os.path.join(base, fname), transparency=0) meta = {"width": width, "height": height, "count": num_images_to_log, "_type": "images"} # TODO: hacky way to enable image grouping for now grouping = images[0].grouping if grouping: meta["grouping"] = grouping captions = Image.captions(images[:num_images_to_log]) if captions: meta["captions"] = captions return meta
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_command(self, command): """ Handle command. This will run in a separate thread, in order not to block the event loop. """
logger.info('Handle command %r', command) def in_executor(): self.handling_command = True try: if self.callback is not None: self.callback(self, command) finally: self.server.call_from_executor(done) def done(): self.handling_command = False # Reset state and draw again. (If the connection is still open -- # the application could have called TelnetConnection.close() if not self.closed: self.cli.reset() self.cli.buffers[DEFAULT_BUFFER].reset() self.cli.renderer.request_absolute_cursor_position() self.vt100_output.flush() self.cli._redraw() self.server.run_in_executor(in_executor)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def erase_screen(self): """ Erase output screen. """
self.vt100_output.erase_screen() self.vt100_output.cursor_goto(0, 0) self.vt100_output.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send(self, data): """ Send text to the client. """
assert isinstance(data, text_type) # When data is send back to the client, we should replace the line # endings. (We didn't allocate a real pseudo terminal, and the telnet # connection is raw, so we are responsible for inserting \r.) self.stdout.write(data.replace('\n', '\r\n')) self.stdout.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _process_callbacks(self): """ Process callbacks from `call_from_executor` in eventloop. """
# Flush all the pipe content. os.read(self._schedule_pipe[0], 1024) # Process calls from executor. calls_from_executor, self._calls_from_executor = self._calls_from_executor, [] for c in calls_from_executor: c()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Run the eventloop for the telnet server. """
listen_socket = self.create_socket(self.host, self.port) logger.info('Listening for telnet connections on %s port %r', self.host, self.port) try: while True: # Removed closed connections. self.connections = set([c for c in self.connections if not c.closed]) # Ignore connections handling commands. connections = set([c for c in self.connections if not c.handling_command]) # Wait for next event. read_list = ( [listen_socket, self._schedule_pipe[0]] + [c.conn for c in connections]) read, _, _ = select.select(read_list, [], []) for s in read: # When the socket itself is ready, accept a new connection. if s == listen_socket: self._accept(listen_socket) # If we receive something on our "call_from_executor" pipe, process # these callbacks in a thread safe way. elif s == self._schedule_pipe[0]: self._process_callbacks() # Handle incoming data on socket. else: self._handle_incoming_data(s) finally: listen_socket.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _accept(self, listen_socket): """ Accept new incoming connection. """
conn, addr = listen_socket.accept() connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding) self.connections.add(connection) logger.info('New connection %r %r', *addr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_incoming_data(self, conn): """ Handle incoming data on socket. """
connection = [c for c in self.connections if c.conn == conn][0] data = conn.recv(1024) if data: connection.feed(data) else: self.connections.remove(connection)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self, *args, **kwargs): """Wrapper around execute that logs in cases of failure."""
try: return self.client.execute(*args, **kwargs) except requests.exceptions.HTTPError as err: res = err.response logger.error("%s response executing GraphQL." % res.status_code) logger.error(res.text) self.display_gorilla_error_if_found(res) six.reraise(*sys.exc_info())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_pip(self, out_dir): """Saves the current working set of pip packages to requirements.txt"""
try: import pkg_resources installed_packages = [d for d in iter(pkg_resources.working_set)] installed_packages_list = sorted( ["%s==%s" % (i.key, i.version) for i in installed_packages] ) with open(os.path.join(out_dir, 'requirements.txt'), 'w') as f: f.write("\n".join(installed_packages_list)) except Exception as e: logger.error("Error saving pip packages")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_patches(self, out_dir): """Save the current state of this repository to one or more patches. Makes one patch against HEAD and another one against the most recent commit that occurs in an upstream branch. This way we can be robust to history editing as long as the user never does "push -f" to break history on an upstream branch. Writes the first patch to <out_dir>/diff.patch and the second to <out_dir>/upstream_diff_<commit_id>.patch. Args: out_dir (str): Directory to write the patch files. """
if not self.git.enabled: return False try: root = self.git.root if self.git.dirty: patch_path = os.path.join(out_dir, 'diff.patch') if self.git.has_submodule_diff: with open(patch_path, 'wb') as patch: # we diff against HEAD to ensure we get changes in the index subprocess.check_call( ['git', 'diff', '--submodule=diff', 'HEAD'], stdout=patch, cwd=root, timeout=5) else: with open(patch_path, 'wb') as patch: subprocess.check_call( ['git', 'diff', 'HEAD'], stdout=patch, cwd=root, timeout=5) upstream_commit = self.git.get_upstream_fork_point() if upstream_commit and upstream_commit != self.git.repo.head.commit: sha = upstream_commit.hexsha upstream_patch_path = os.path.join( out_dir, 'upstream_diff_{}.patch'.format(sha)) if self.git.has_submodule_diff: with open(upstream_patch_path, 'wb') as upstream_patch: subprocess.check_call( ['git', 'diff', '--submodule=diff', sha], stdout=upstream_patch, cwd=root, timeout=5) else: with open(upstream_patch_path, 'wb') as upstream_patch: subprocess.check_call( ['git', 'diff', sha], stdout=upstream_patch, cwd=root, timeout=5) except (subprocess.CalledProcessError, subprocess.TimeoutExpired): logger.error('Error generating diff')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_projects(self, entity=None): """Lists projects in W&B scoped by entity. Args: entity (str, optional): The entity to scope this project to. Returns: [{"id","name","description"}] """
query = gql(''' query Models($entity: String!) { models(first: 10, entityName: $entity) { edges { node { id name description } } } } ''') return self._flatten_edges(self.gql(query, variable_values={ 'entity': entity or self.settings('entity')})['models'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_runs(self, project, entity=None): """Lists runs in W&B scoped by project. Args: project (str): The project to scope the runs to entity (str, optional): The entity to scope this project to. Defaults to public models Returns: [{"id",name","description"}] """
query = gql(''' query Buckets($model: String!, $entity: String!) { model(name: $model, entityName: $entity) { buckets(first: 10) { edges { node { id name description } } } } } ''') return self._flatten_edges(self.gql(query, variable_values={ 'entity': entity or self.settings('entity'), 'model': project or self.settings('project')})['model']['buckets'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def launch_run(self, command, project=None, entity=None, run_id=None): """Launch a run in the cloud. Args: command (str): The command to run program (str): The file to run project (str): The project to scope the runs to entity (str, optional): The entity to scope this project to. Defaults to public models run_id (str, optional): The run_id to scope to Returns: [{"podName","status"}] """
query = gql(''' mutation launchRun( $entity: String $model: String $runId: String $image: String $command: String $patch: String $cwd: String $datasets: [String] ) { launchRun(input: {id: $runId, entityName: $entity, patch: $patch, modelName: $model, image: $image, command: $command, datasets: $datasets, cwd: $cwd}) { podName status runId } } ''') patch = BytesIO() if self.git.dirty: self.git.repo.git.execute(['git', 'diff'], output_stream=patch) patch.seek(0) cwd = "." if self.git.enabled: cwd = cwd + os.getcwd().replace(self.git.repo.working_dir, "") return self.gql(query, variable_values={ 'entity': entity or self.settings('entity'), 'model': project or self.settings('project'), 'command': command, 'runId': run_id, 'patch': patch.read().decode("utf8"), 'cwd': cwd })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_config(self, project, run=None, entity=None): """Get the relevant configs for a run Args: project (str): The project to download, (can include bucket) run (str, optional): The run to download entity (str, optional): The entity to scope this project to. """
query = gql(''' query Model($name: String!, $entity: String!, $run: String!) { model(name: $name, entityName: $entity) { bucket(name: $run) { config commit patch files(names: ["wandb-metadata.json"]) { edges { node { url } } } } } } ''') response = self.gql(query, variable_values={ 'name': project, 'run': run, 'entity': entity }) if response['model'] == None: raise ValueError("Run {}/{}/{} not found".format(entity, project, run) ) run = response['model']['bucket'] commit = run['commit'] patch = run['patch'] config = json.loads(run['config'] or '{}') if len(run['files']['edges']) > 0: url = run['files']['edges'][0]['node']['url'] res = requests.get(url) res.raise_for_status() metadata = res.json() else: metadata = {} return (commit, config, patch, metadata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_resume_status(self, entity, project_name, name): """Check if a run exists and get resume information. Args: entity (str, optional): The entity to scope this project to. project_name (str): The project to download, (can include bucket) run (str, optional): The run to download """
query = gql(''' query Model($project: String!, $entity: String, $name: String!) { model(name: $project, entityName: $entity) { id name entity { id name } bucket(name: $name, missingOk: true) { id name logLineCount historyLineCount eventsLineCount historyTail eventsTail } } } ''') response = self.gql(query, variable_values={ 'entity': entity, 'project': project_name, 'name': name, }) if 'model' not in response or 'bucket' not in response['model']: return None project = response['model'] self.set_setting('project', project_name) if 'entity' in project: self.set_setting('entity', project['entity']['name']) return project['bucket']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upsert_run(self, id=None, name=None, project=None, host=None, group=None, tags=None, config=None, description=None, entity=None, state=None, repo=None, job_type=None, program_path=None, commit=None, sweep_name=None, summary_metrics=None, num_retries=None): """Update a run Args: id (str, optional): The existing run to update name (str, optional): The name of the run to create group (str, optional): Name of the group this run is a part of project (str, optional): The name of the project config (dict, optional): The latest config params description (str, optional): A description of this project entity (str, optional): The entity to scope this project to. repo (str, optional): Url of the program's repository. state (str, optional): State of the program. job_type (str, optional): Type of job, e.g 'train'. program_path (str, optional): Path to the program. commit (str, optional): The Git SHA to associate the run with summary_metrics (str, optional): The JSON summary metrics """
mutation = gql(''' mutation UpsertBucket( $id: String, $name: String, $project: String, $entity: String!, $groupName: String, $description: String, $commit: String, $config: JSONString, $host: String, $debug: Boolean, $program: String, $repo: String, $jobType: String, $state: String, $sweep: String, $tags: [String!], $summaryMetrics: JSONString, ) { upsertBucket(input: { id: $id, name: $name, groupName: $groupName, modelName: $project, entityName: $entity, description: $description, config: $config, commit: $commit, host: $host, debug: $debug, jobProgram: $program, jobRepo: $repo, jobType: $jobType, state: $state, sweep: $sweep, tags: $tags, summaryMetrics: $summaryMetrics, }) { bucket { id name description config project { id name entity { id name } } } } } ''') if config is not None: config = json.dumps(config) if not description: description = None kwargs = {} if num_retries is not None: kwargs['num_retries'] = num_retries variable_values = { 'id': id, 'entity': entity or self.settings('entity'), 'name': name, 'project': project, 'groupName': group, 'tags': tags, 'description': description, 'config': config, 'commit': commit, 'host': host, 'debug': env.is_debug(), 'repo': repo, 'program': program_path, 'jobType': job_type, 'state': state, 'sweep': sweep_name, 'summaryMetrics': summary_metrics } response = self.gql( mutation, variable_values=variable_values, **kwargs) run = response['upsertBucket']['bucket'] project = run.get('project') if project: self.set_setting('project', project['name']) entity = project.get('entity') if entity: self.set_setting('entity', entity['name']) return response['upsertBucket']['bucket']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upload_urls(self, project, files, run=None, entity=None, description=None): """Generate temporary resumeable upload urls Args: project (str): The project to download files (list or dict): The filenames to upload run (str, optional): The run to upload to entity (str, optional): The entity to scope this project to. Defaults to wandb models Returns: (bucket_id, file_info) bucket_id: id of bucket we uploaded to file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files. { 'weights.h5': { "url": "https://weights.url" }, 'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }, } """
query = gql(''' query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) { model(name: $name, entityName: $entity) { bucket(name: $run, desc: $description) { id files(names: $files) { edges { node { name url(upload: true) updatedAt } } } } } } ''') run_id = run or self.settings('run') entity = entity or self.settings('entity') query_result = self.gql(query, variable_values={ 'name': project, 'run': run_id, 'entity': entity, 'description': description, 'files': [file for file in files] }) run = query_result['model']['bucket'] if run: result = {file['name']: file for file in self._flatten_edges(run['files'])} return run['id'], result else: raise CommError("Run does not exist {}/{}/{}.".format(entity, project, run_id))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_file(self, url): """Initiate a streaming download Args: url (str): The url to download Returns: A tuple of the content length and the streaming response """
response = requests.get(url, stream=True) response.raise_for_status() return (int(response.headers.get('content-length', 0)), response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upload_file(self, url, file, callback=None, extra_headers={}): """Uploads a file to W&B with failure resumption Args: url (str): The url to download file (str): The path to the file you want to upload callback (:obj:`func`, optional): A callback which is passed the number of bytes uploaded since the last time it was called, used to report progress Returns: The requests library response object """
extra_headers = extra_headers.copy() response = None if os.stat(file.name).st_size == 0: raise CommError("%s is an empty file" % file.name) try: progress = Progress(file, callback=callback) response = requests.put( url, data=progress, headers=extra_headers) response.raise_for_status() except requests.exceptions.RequestException as e: total = progress.len status = self._status_request(url, total) # TODO(adrian): there's probably even more stuff we should add here # like if we're offline, we should retry then too if status.status_code in (308, 408, 500, 502, 503, 504): util.sentry_reraise(retry.TransientException(exc=e)) else: util.sentry_reraise(e) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_agent(self, host, sweep_id=None, project_name=None): """Register a new agent Args: host (str): hostname persistent (bool): long running or oneoff sweep (str): sweep id project_name: (str): model that contains sweep """
mutation = gql(''' mutation CreateAgent( $host: String! $projectName: String!, $entityName: String!, $sweep: String! ) { createAgent(input: { host: $host, projectName: $projectName, entityName: $entityName, sweep: $sweep, }) { agent { id } } } ''') if project_name is None: project_name = self.settings('project') # don't retry on validation errors def no_retry_400(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'host': host, 'entityName': self.settings("entity"), 'projectName': project_name, 'sweep': sweep_id}, check_retry_fn=no_retry_400) return response['createAgent']['agent']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def agent_heartbeat(self, agent_id, metrics, run_states): """Notify server about agent state, receive commands. Args: agent_id (str): agent_id metrics (dict): system metrics run_states (dict): run_id: state mapping Returns: List of commands to execute. """
mutation = gql(''' mutation Heartbeat( $id: ID!, $metrics: JSONString, $runState: JSONString ) { agentHeartbeat(input: { id: $id, metrics: $metrics, runState: $runState }) { agent { id } commands } } ''') try: response = self.gql(mutation, variable_values={ 'id': agent_id, 'metrics': json.dumps(metrics), 'runState': json.dumps(run_states)}) except Exception as e: # GQL raises exceptions with stringified python dictionaries :/ message = ast.literal_eval(e.args[0])["message"] logger.error('Error communicating with W&B: %s', message) return [] else: return json.loads(response['agentHeartbeat']['commands'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upsert_sweep(self, config): """Upsert a sweep object. Args: config (str): sweep config (will be converted to yaml) """
mutation = gql(''' mutation UpsertSweep( $config: String, $description: String, $entityName: String!, $projectName: String! ) { upsertSweep(input: { config: $config, description: $description, entityName: $entityName, projectName: $projectName }) { sweep { name } } } ''') # don't retry on validation errors # TODO(jhr): generalize error handling routines def no_retry_400_or_404(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400 and e.response.status_code != 404: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'config': yaml.dump(config), 'description': config.get("description"), 'entityName': self.settings("entity"), 'projectName': self.settings("project")}, check_retry_fn=no_retry_400_or_404) return response['upsertSweep']['sweep']['name']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_current(self, fname, md5): """Checksum a file and compare the md5 with the known md5 """
return os.path.isfile(fname) and util.md5_file(fname) == md5