INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Parse a string such as 'foo/bar/*.py' Assumes is_pattern(s) has been called and returned True 1. directory to process 2. pattern to match
def parse_pattern(s): """Parse a string such as 'foo/bar/*.py' Assumes is_pattern(s) has been called and returned True 1. directory to process 2. pattern to match""" if '{' in s: return None, None # Unsupported by fnmatch if s and s[0] == '~': s = os.path.expanduser(s) parts = s.split('/') absolute = len(parts) > 1 and not parts[0] if parts[-1] == '': # # Outcome of trailing / parts = parts[:-1] # discard if len(parts) == 0: directory = '' pattern = '' else: directory = '/'.join(parts[:-1]) pattern = parts[-1] if not is_pattern(directory): # Check for e.g. /abc/*/def if is_pattern(pattern): if not directory: directory = '/' if absolute else '.' return directory, pattern return None, None
On success return an absolute path and a pattern. Otherwise print a message and return None, None
def validate_pattern(fn): """On success return an absolute path and a pattern. Otherwise print a message and return None, None """ directory, pattern = parse_pattern(fn) if directory is None: print_err("Invalid pattern {}.".format(fn)) return None, None target = resolve_path(directory) mode = auto(get_mode, target) if not mode_exists(mode): print_err("cannot access '{}': No such file or directory".format(fn)) return None, None if not mode_isdir(mode): print_err("cannot access '{}': Not a directory".format(fn)) return None, None return target, pattern
Return a list of paths matching a pattern (or None on error).
def process_pattern(fn): """Return a list of paths matching a pattern (or None on error). """ directory, pattern = validate_pattern(fn) if directory is not None: filenames = fnmatch.filter(auto(listdir, directory), pattern) if filenames: return [directory + '/' + sfn for sfn in filenames] else: print_err("cannot access '{}': No such file or directory".format(fn))
Resolves path and converts it into an absolute path.
def resolve_path(path): """Resolves path and converts it into an absolute path.""" if path[0] == '~': # ~ or ~user path = os.path.expanduser(path) if path[0] != '/': # Relative path if cur_dir[-1] == '/': path = cur_dir + path else: path = cur_dir + '/' + path comps = path.split('/') new_comps = [] for comp in comps: # We strip out xxx/./xxx and xxx//xxx, except that we want to keep the # leading / for absolute paths. This also removes the trailing slash # that autocompletion adds to a directory. if comp == '.' or (comp == '' and len(new_comps) > 0): continue if comp == '..': if len(new_comps) > 1: new_comps.pop() else: new_comps.append(comp) if len(new_comps) == 1 and new_comps[0] == '': return '/' return '/'.join(new_comps)
Determines if a given file is located locally or remotely. We assume that any directories from the pyboard take precedence over local directories of the same name. /flash and /sdcard are associated with the default device. /dev_name/path where dev_name is the name of a given device is also considered to be associated with the named device. If the file is associated with a remote device, then this function returns a tuple (dev, dev_filename) where dev is the device and dev_filename is the portion of the filename relative to the device. If the file is not associated with the remote device, then the dev portion of the returned tuple will be None.
def get_dev_and_path(filename): """Determines if a given file is located locally or remotely. We assume that any directories from the pyboard take precedence over local directories of the same name. /flash and /sdcard are associated with the default device. /dev_name/path where dev_name is the name of a given device is also considered to be associated with the named device. If the file is associated with a remote device, then this function returns a tuple (dev, dev_filename) where dev is the device and dev_filename is the portion of the filename relative to the device. If the file is not associated with the remote device, then the dev portion of the returned tuple will be None. """ if DEFAULT_DEV: if DEFAULT_DEV.is_root_path(filename): return (DEFAULT_DEV, filename) test_filename = filename + '/' with DEV_LOCK: for dev in DEVS: if test_filename.startswith(dev.name_path): dev_filename = filename[len(dev.name_path)-1:] if dev_filename == '': dev_filename = '/' return (dev, dev_filename) return (None, filename)
Prints a string or converts bytes to a string and then prints.
def print_bytes(byte_str): """Prints a string or converts bytes to a string and then prints.""" if isinstance(byte_str, str): print(byte_str) else: print(str(byte_str, encoding='utf8'))
Decorator which adds extra functions to be downloaded to the pyboard.
def extra_funcs(*funcs): """Decorator which adds extra functions to be downloaded to the pyboard.""" def extra_funcs_decorator(real_func): def wrapper(*args, **kwargs): return real_func(*args, **kwargs) wrapper.extra_funcs = list(funcs) wrapper.source = inspect.getsource(real_func) wrapper.name = real_func.__name__ return wrapper return extra_funcs_decorator
If `filename` is a remote file, then this function calls func on the micropython board, otherwise it calls it locally.
def auto(func, filename, *args, **kwargs): """If `filename` is a remote file, then this function calls func on the micropython board, otherwise it calls it locally. """ dev, dev_filename = get_dev_and_path(filename) if dev is None: if len(dev_filename) > 0 and dev_filename[0] == '~': dev_filename = os.path.expanduser(dev_filename) return func(dev_filename, *args, **kwargs) return dev.remote_eval(func, dev_filename, *args, **kwargs)
Returns the boards name (if available).
def board_name(default): """Returns the boards name (if available).""" try: import board try: name = board.name except AttributeError: # There was a board.py file, but it didn't have an name attribute # We also ignore this as an error name = default except ImportError: # No board.py file on the pyboard - not an error name = default except BaseException as err: print('Error encountered executing board.py') import sys sys.print_exception(err) name = default return repr(name)
Copies the contents of the indicated file to an already opened file.
def cat(src_filename, dst_file): """Copies the contents of the indicated file to an already opened file.""" (dev, dev_filename) = get_dev_and_path(src_filename) if dev is None: with open(dev_filename, 'rb') as txtfile: for line in txtfile: dst_file.write(line) else: filesize = dev.remote_eval(get_filesize, dev_filename) return dev.remote(send_file_to_host, dev_filename, dst_file, filesize, xfer_func=recv_file_from_remote)
Copies a file from one place to another. Both the source and destination files must exist on the same machine.
def copy_file(src_filename, dst_filename): """Copies a file from one place to another. Both the source and destination files must exist on the same machine. """ try: with open(src_filename, 'rb') as src_file: with open(dst_filename, 'wb') as dst_file: while True: buf = src_file.read(BUFFER_SIZE) if len(buf) > 0: dst_file.write(buf) if len(buf) < BUFFER_SIZE: break return True except: return False
Copies one file to another. The source file may be local or remote and the destination file may be local or remote.
def cp(src_filename, dst_filename): """Copies one file to another. The source file may be local or remote and the destination file may be local or remote. """ src_dev, src_dev_filename = get_dev_and_path(src_filename) dst_dev, dst_dev_filename = get_dev_and_path(dst_filename) if src_dev is dst_dev: # src and dst are either on the same remote, or both are on the host return auto(copy_file, src_filename, dst_dev_filename) filesize = auto(get_filesize, src_filename) if dst_dev is None: # Copying from remote to host with open(dst_dev_filename, 'wb') as dst_file: return src_dev.remote(send_file_to_host, src_dev_filename, dst_file, filesize, xfer_func=recv_file_from_remote) if src_dev is None: # Copying from host to remote with open(src_dev_filename, 'rb') as src_file: return dst_dev.remote(recv_file_from_host, src_file, dst_dev_filename, filesize, xfer_func=send_file_to_remote) # Copying from remote A to remote B. We first copy the file # from remote A to the host and then from the host to remote B host_temp_file = tempfile.TemporaryFile() if src_dev.remote(send_file_to_host, src_dev_filename, host_temp_file, filesize, xfer_func=recv_file_from_remote): host_temp_file.seek(0) return dst_dev.remote(recv_file_from_host, host_temp_file, dst_dev_filename, filesize, xfer_func=send_file_to_remote) return False
Returns os.stat for a given file, adjusting the timestamps as appropriate.
def stat(filename): """Returns os.stat for a given file, adjusting the timestamps as appropriate.""" import os try: # on the host, lstat won't try to follow symlinks rstat = os.lstat(filename) except: rstat = os.stat(filename) return rstat[:7] + tuple(tim + TIME_OFFSET for tim in rstat[7:])
Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash.
def listdir_matches(match): """Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash. """ import os last_slash = match.rfind('/') if last_slash == -1: dirname = '.' match_prefix = match result_prefix = '' else: match_prefix = match[last_slash + 1:] if last_slash == 0: dirname = '/' result_prefix = '/' else: dirname = match[0:last_slash] result_prefix = dirname + '/' def add_suffix_if_dir(filename): try: if (os.stat(filename)[0] & 0x4000) != 0: return filename + '/' except FileNotFoundError: # This can happen when a symlink points to a non-existant file. pass return filename matches = [add_suffix_if_dir(result_prefix + filename) for filename in os.listdir(dirname) if filename.startswith(match_prefix)] return matches
Returns a list of tuples for each file contained in the named directory, or None if the directory does not exist. Each tuple contains the filename, followed by the tuple returned by calling os.stat on the filename.
def listdir_stat(dirname, show_hidden=True): """Returns a list of tuples for each file contained in the named directory, or None if the directory does not exist. Each tuple contains the filename, followed by the tuple returned by calling os.stat on the filename. """ import os try: files = os.listdir(dirname) except OSError: return None if dirname == '/': return list((file, stat('/' + file)) for file in files if is_visible(file) or show_hidden) return list((file, stat(dirname + '/' + file)) for file in files if is_visible(file) or show_hidden)
Removes a file or directory.
def remove_file(filename, recursive=False, force=False): """Removes a file or directory.""" import os try: mode = os.stat(filename)[0] if mode & 0x4000 != 0: # directory if recursive: for file in os.listdir(filename): success = remove_file(filename + '/' + file, recursive, force) if not success and not force: return False os.rmdir(filename) # PGH Work like Unix: require recursive else: if not force: return False else: os.remove(filename) except: if not force: return False return True
Removes a file or directory tree.
def rm(filename, recursive=False, force=False): """Removes a file or directory tree.""" return auto(remove_file, filename, recursive, force)
Creates a directory. Produces information in case of dry run. Issues error where necessary.
def make_dir(dst_dir, dry_run, print_func, recursed): """Creates a directory. Produces information in case of dry run. Issues error where necessary. """ parent = os.path.split(dst_dir.rstrip('/'))[0] # Check for nonexistent parent parent_files = auto(listdir_stat, parent) if parent else True # Relative dir if dry_run: if recursed: # Assume success: parent not actually created yet print_func("Creating directory {}".format(dst_dir)) elif parent_files is None: print_func("Unable to create {}".format(dst_dir)) return True if not mkdir(dst_dir): print_err("Unable to create {}".format(dst_dir)) return False return True
Synchronizes 2 directory trees.
def rsync(src_dir, dst_dir, mirror, dry_run, print_func, recursed, sync_hidden): """Synchronizes 2 directory trees.""" # This test is a hack to avoid errors when accessing /flash. When the # cache synchronisation issue is solved it should be removed if not isinstance(src_dir, str) or not len(src_dir): return sstat = auto(get_stat, src_dir) smode = stat_mode(sstat) if mode_isfile(smode): print_err('Source is a file not a directory.') return d_src = {} # Look up stat tuple from name in current directory src_files = auto(listdir_stat, src_dir, show_hidden=sync_hidden) if src_files is None: print_err('Source directory {} does not exist.'.format(src_dir)) return for name, stat in src_files: d_src[name] = stat d_dst = {} dst_files = auto(listdir_stat, dst_dir, show_hidden=sync_hidden) if dst_files is None: # Directory does not exist if not make_dir(dst_dir, dry_run, print_func, recursed): return else: # dest exists for name, stat in dst_files: d_dst[name] = stat set_dst = set(d_dst.keys()) set_src = set(d_src.keys()) to_add = set_src - set_dst # Files to copy to dest to_del = set_dst - set_src # To delete from dest to_upd = set_dst.intersection(set_src) # In both: may need updating for src_basename in to_add: # Name in source but absent from destination src_filename = src_dir + '/' + src_basename dst_filename = dst_dir + '/' + src_basename print_func("Adding %s" % dst_filename) src_stat = d_src[src_basename] src_mode = stat_mode(src_stat) if not dry_run: if not mode_isdir(src_mode): cp(src_filename, dst_filename) if mode_isdir(src_mode): rsync(src_filename, dst_filename, mirror=mirror, dry_run=dry_run, print_func=print_func, recursed=True, sync_hidden=sync_hidden) if mirror: # May delete for dst_basename in to_del: # In dest but not in source dst_filename = dst_dir + '/' + dst_basename print_func("Removing %s" % dst_filename) if not dry_run: rm(dst_filename, recursive=True, force=True) for src_basename in to_upd: # Names are identical src_stat = d_src[src_basename] dst_stat = d_dst[src_basename] src_filename = src_dir + '/' + src_basename dst_filename = dst_dir + '/' + src_basename src_mode = stat_mode(src_stat) dst_mode = stat_mode(dst_stat) if mode_isdir(src_mode): if mode_isdir(dst_mode): # src and dst are both directories - recurse rsync(src_filename, dst_filename, mirror=mirror, dry_run=dry_run, print_func=print_func, recursed=True, sync_hidden=sync_hidden) else: msg = "Source '{}' is a directory and destination " \ "'{}' is a file. Ignoring" print_err(msg.format(src_filename, dst_filename)) else: if mode_isdir(dst_mode): msg = "Source '{}' is a file and destination " \ "'{}' is a directory. Ignoring" print_err(msg.format(src_filename, dst_filename)) else: if stat_mtime(src_stat) > stat_mtime(dst_stat): msg = "{} is newer than {} - copying" print_func(msg.format(src_filename, dst_filename)) if not dry_run: cp(src_filename, dst_filename)
Function which runs on the pyboard. Matches up with send_file_to_remote.
def recv_file_from_host(src_file, dst_filename, filesize, dst_mode='wb'): """Function which runs on the pyboard. Matches up with send_file_to_remote.""" import sys import ubinascii if HAS_BUFFER: try: import pyb usb = pyb.USB_VCP() except: try: import machine usb = machine.USB_VCP() except: usb = None if usb and usb.isconnected(): # We don't want 0x03 bytes in the data to be interpreted as a Control-C # This gets reset each time the REPL runs a line, so we don't need to # worry about resetting it ourselves usb.setinterrupt(-1) try: with open(dst_filename, dst_mode) as dst_file: bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) read_buf = bytearray(buf_size) while bytes_remaining > 0: # Send back an ack as a form of flow control sys.stdout.write('\x06') read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: if HAS_BUFFER: bytes_read = sys.stdin.buffer.readinto(read_buf, read_size) else: bytes_read = sys.stdin.readinto(read_buf, read_size) if bytes_read > 0: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(ubinascii.unhexlify(write_buf[0:read_size])) bytes_remaining -= read_size return True except: return False
Intended to be passed to the `remote` function as the xfer_func argument. Matches up with recv_file_from_host.
def send_file_to_remote(dev, src_file, dst_filename, filesize, dst_mode='wb'): """Intended to be passed to the `remote` function as the xfer_func argument. Matches up with recv_file_from_host. """ bytes_remaining = filesize save_timeout = dev.timeout dev.timeout = 1 while bytes_remaining > 0: # Wait for ack so we don't get too far ahead of the remote ack = dev.read(1) if ack is None or ack != b'\x06': sys.stderr.write("timed out or error in transfer to remote\n") sys.exit(2) if HAS_BUFFER: buf_size = BUFFER_SIZE else: buf_size = BUFFER_SIZE // 2 read_size = min(bytes_remaining, buf_size) buf = src_file.read(read_size) #sys.stdout.write('\r%d/%d' % (filesize - bytes_remaining, filesize)) #sys.stdout.flush() if HAS_BUFFER: dev.write(buf) else: dev.write(binascii.hexlify(buf)) bytes_remaining -= read_size #sys.stdout.write('\r') dev.timeout = save_timeout
Intended to be passed to the `remote` function as the xfer_func argument. Matches up with send_file_to_host.
def recv_file_from_remote(dev, src_filename, dst_file, filesize): """Intended to be passed to the `remote` function as the xfer_func argument. Matches up with send_file_to_host. """ bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: read_buf = dev.read(buf_remaining) bytes_read = len(read_buf) if bytes_read: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(binascii.unhexlify(write_buf[0:read_size])) # Send an ack to the remote as a form of flow control dev.write(b'\x06') # ASCII ACK is 0x06 bytes_remaining -= read_size
Function which runs on the pyboard. Matches up with recv_file_from_remote.
def send_file_to_host(src_filename, dst_file, filesize): """Function which runs on the pyboard. Matches up with recv_file_from_remote.""" import sys import ubinascii try: with open(src_filename, 'rb') as src_file: bytes_remaining = filesize if HAS_BUFFER: buf_size = BUFFER_SIZE else: buf_size = BUFFER_SIZE // 2 while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf = src_file.read(read_size) if HAS_BUFFER: sys.stdout.buffer.write(buf) else: sys.stdout.write(ubinascii.hexlify(buf)) bytes_remaining -= read_size # Wait for an ack so we don't get ahead of the remote while True: char = sys.stdin.read(1) if char: if char == '\x06': break # This should only happen if an error occurs sys.stdout.write(char) return True except: return False
Takes a single column of words, and prints it as multiple columns that will fit in termwidth columns.
def print_cols(words, print_func, termwidth=79): """Takes a single column of words, and prints it as multiple columns that will fit in termwidth columns. """ width = max([word_len(word) for word in words]) nwords = len(words) ncols = max(1, (termwidth + 1) // (width + 1)) nrows = (nwords + ncols - 1) // ncols for row in range(nrows): for i in range(row, nwords, nrows): word = words[i] if word[0] == '\x1b': print_func('%-*s' % (width + 11, words[i]), end='\n' if i + nrows >= nwords else ' ') else: print_func('%-*s' % (width, words[i]), end='\n' if i + nrows >= nwords else ' ')
Takes a filename and the stat info and returns the decorated filename. The decoration takes the form of a single character which follows the filename. Currently, the only decoration is '/' for directories.
def decorated_filename(filename, stat): """Takes a filename and the stat info and returns the decorated filename. The decoration takes the form of a single character which follows the filename. Currently, the only decoration is '/' for directories. """ mode = stat[0] if mode_isdir(mode): return DIR_COLOR + filename + END_COLOR + '/' if mode_issymlink(mode): return filename + '@' if filename.endswith('.py'): return PY_COLOR + filename + END_COLOR return filename
Prints detailed information about the file passed in.
def print_long(filename, stat, print_func): """Prints detailed information about the file passed in.""" size = stat_size(stat) mtime = stat_mtime(stat) file_mtime = time.localtime(mtime) curr_time = time.time() if mtime > (curr_time + SIX_MONTHS) or mtime < (curr_time - SIX_MONTHS): print_func('%6d %s %2d %04d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[0], decorated_filename(filename, stat))) else: print_func('%6d %s %2d %02d:%02d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[3], file_mtime[4], decorated_filename(filename, stat)))
Tries to connect automagically via network or serial.
def connect(port, baud=115200, user='micro', password='python', wait=0): """Tries to connect automagically via network or serial.""" try: ip_address = socket.gethostbyname(port) #print('Connecting to ip', ip_address) connect_telnet(port, ip_address, user=user, password=password) except socket.gaierror: # Doesn't look like a hostname or IP-address, assume its a serial port #print('connecting to serial', port) connect_serial(port, baud=baud, wait=wait)
Connect to a MicroPython board via telnet.
def connect_telnet(name, ip_address=None, user='micro', password='python'): """Connect to a MicroPython board via telnet.""" if ip_address is None: try: ip_address = socket.gethostbyname(name) except socket.gaierror: ip_address = name if not QUIET: if name == ip_address: print('Connecting to (%s) ...' % ip_address) else: print('Connecting to %s (%s) ...' % (name, ip_address)) dev = DeviceNet(name, ip_address, user, password) add_device(dev)
Connect to a MicroPython board via a serial port.
def connect_serial(port, baud=115200, wait=0): """Connect to a MicroPython board via a serial port.""" if not QUIET: print('Connecting to %s (buffer-size %d)...' % (port, BUFFER_SIZE)) try: dev = DeviceSerial(port, baud, wait) except DeviceError as err: sys.stderr.write(str(err)) sys.stderr.write('\n') return False add_device(dev) return True
The main program.
def real_main(): """The main program.""" global RTS global DTR try: default_baud = int(os.getenv('RSHELL_BAUD')) except: default_baud = 115200 default_port = os.getenv('RSHELL_PORT') default_rts = os.getenv('RSHELL_RTS') or RTS default_dtr = os.getenv('RSHELL_DTR') or DTR default_user = os.getenv('RSHELL_USER') or 'micro' default_password = os.getenv('RSHELL_PASSWORD') or 'python' default_editor = os.getenv('RSHELL_EDITOR') or os.getenv('VISUAL') or os.getenv('EDITOR') or 'vi' global BUFFER_SIZE try: default_buffer_size = int(os.getenv('RSHELL_BUFFER_SIZE')) except: default_buffer_size = BUFFER_SIZE parser = argparse.ArgumentParser( prog="rshell", usage="%(prog)s [options] [command]", description="Remote Shell for a MicroPython board.", epilog=("You can specify the default serial port using the " + "RSHELL_PORT environment variable.") ) parser.add_argument( "-b", "--baud", dest="baud", action="store", type=int, help="Set the baudrate used (default = %d)" % default_baud, default=default_baud ) parser.add_argument( "--buffer-size", dest="buffer_size", action="store", type=int, help="Set the buffer size used for transfers " "(default = %d for USB, %d for UART)" % (USB_BUFFER_SIZE, UART_BUFFER_SIZE), ) parser.add_argument( "-p", "--port", dest="port", help="Set the serial port to use (default '%s')" % default_port, default=default_port ) parser.add_argument( "--rts", dest="rts", help="Set the RTS state (default '%s')" % default_rts, default=default_rts ) parser.add_argument( "--dtr", dest="dtr", help="Set the DTR state (default '%s')" % default_dtr, default=default_dtr ) parser.add_argument( "-u", "--user", dest="user", help="Set username to use (default '%s')" % default_user, default=default_user ) parser.add_argument( "-w", "--password", dest="password", help="Set password to use (default '%s')" % default_password, default=default_password ) parser.add_argument( "-e", "--editor", dest="editor", help="Set the editor to use (default '%s')" % default_editor, default=default_editor ) parser.add_argument( "-f", "--file", dest="filename", help="Specifies a file of commands to process." ) parser.add_argument( "-d", "--debug", dest="debug", action="store_true", help="Enable debug features", default=False ) parser.add_argument( "-n", "--nocolor", dest="nocolor", action="store_true", help="Turn off colorized output", default=False ) parser.add_argument( "-l", "--list", dest="list", action="store_true", help="Display serial ports", default=False ) parser.add_argument( "-a", "--ascii", dest="ascii_xfer", action="store_true", help="ASCII encode binary files for transfer", default=False ) parser.add_argument( "--wait", dest="wait", type=int, action="store", help="Seconds to wait for serial port", default=0 ) parser.add_argument( "--timing", dest="timing", action="store_true", help="Print timing information about each command", default=False ) parser.add_argument( '-V', '--version', dest='version', action='store_true', help='Reports the version and exits.', default=False ) parser.add_argument( "--quiet", dest="quiet", action="store_true", help="Turns off some output (useful for testing)", default=False ) parser.add_argument( "cmd", nargs=argparse.REMAINDER, help="Optional command to execute" ) args = parser.parse_args(sys.argv[1:]) if args.buffer_size is not None: BUFFER_SIZE = args.buffer_size if args.debug: print("Debug = %s" % args.debug) print("Port = %s" % args.port) print("Baud = %d" % args.baud) print("User = %s" % args.user) print("Password = %s" % args.password) print("Wait = %d" % args.wait) print("List = %d" % args.list) print("nocolor = %d" % args.nocolor) print("ascii = %d" % args.ascii_xfer) print("Timing = %d" % args.timing) print("Quiet = %d" % args.quiet) print("BUFFER_SIZE = %d" % BUFFER_SIZE) print("Cmd = [%s]" % ', '.join(args.cmd)) if args.version: print(__version__) return global DEBUG DEBUG = args.debug global QUIET QUIET = args.quiet global EDITOR EDITOR = args.editor if args.nocolor: global DIR_COLOR, PROMPT_COLOR, PY_COLOR, END_COLOR DIR_COLOR = '' PROMPT_COLOR = '' PY_COLOR = '' END_COLOR = '' else: if sys.platform == 'darwin': # The readline that comes with OSX screws up colors in the prompt global FAKE_INPUT_PROMPT FAKE_INPUT_PROMPT = True global ASCII_XFER ASCII_XFER = args.ascii_xfer RTS = args.rts DTR = args.dtr if args.list: listports() return if args.port: ASCII_XFER = True if args.buffer_size is None: if is_micropython_usb_port(args.port): BUFFER_SIZE = USB_BUFFER_SIZE else: BUFFER_SIZE = UART_BUFFER_SIZE QUIET or print('Using buffer-size of', BUFFER_SIZE) try: connect(args.port, baud=args.baud, wait=args.wait, user=args.user, password=args.password) except DeviceError as err: print(err) else: autoscan() autoconnect() if args.filename: with open(args.filename) as cmd_file: shell = Shell(stdin=cmd_file, filename=args.filename, timing=args.timing) shell.cmdloop('') else: cmd_line = ' '.join(args.cmd) if cmd_line == '': print('Welcome to rshell.', EXIT_STR) if num_devices() == 0: print('') print('No MicroPython boards connected - use the connect command to add one') print('') shell = Shell(timing=args.timing) try: shell.cmdloop(cmd_line) except KeyboardInterrupt: print('')
This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns.
def main(): """This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns. """ save_settings = None stdin_fd = -1 try: import termios stdin_fd = sys.stdin.fileno() save_settings = termios.tcgetattr(stdin_fd) except: pass try: real_main() finally: if save_settings: termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings)
Closes the serial port.
def close(self): """Closes the serial port.""" if self.pyb and self.pyb.serial: self.pyb.serial.close() self.pyb = None
Determines if 'filename' corresponds to a directory on this device.
def is_root_path(self, filename): """Determines if 'filename' corresponds to a directory on this device.""" test_filename = filename + '/' for root_dir in self.root_dirs: if test_filename.startswith(root_dir): return True return False
Reads data from the pyboard over the serial port.
def read(self, num_bytes): """Reads data from the pyboard over the serial port.""" self.check_pyb() try: return self.pyb.serial.read(num_bytes) except (serial.serialutil.SerialException, TypeError): # Write failed - assume that we got disconnected self.close() raise DeviceError('serial port %s closed' % self.dev_name_short)
Calls func with the indicated args on the micropython board.
def remote(self, func, *args, xfer_func=None, **kwargs): """Calls func with the indicated args on the micropython board.""" global HAS_BUFFER HAS_BUFFER = self.has_buffer if hasattr(func, 'extra_funcs'): func_name = func.name func_lines = [] for extra_func in func.extra_funcs: func_lines += inspect.getsource(extra_func).split('\n') func_lines += [''] func_lines += filter(lambda line: line[:1] != '@', func.source.split('\n')) func_src = '\n'.join(func_lines) else: func_name = func.__name__ func_src = inspect.getsource(func) args_arr = [remote_repr(i) for i in args] kwargs_arr = ["{}={}".format(k, remote_repr(v)) for k, v in kwargs.items()] func_src += 'output = ' + func_name + '(' func_src += ', '.join(args_arr + kwargs_arr) func_src += ')\n' func_src += 'if output is None:\n' func_src += ' print("None")\n' func_src += 'else:\n' func_src += ' print(output)\n' time_offset = self.time_offset if self.adjust_for_timezone: time_offset -= time.localtime().tm_gmtoff func_src = func_src.replace('TIME_OFFSET', '{}'.format(time_offset)) func_src = func_src.replace('HAS_BUFFER', '{}'.format(HAS_BUFFER)) func_src = func_src.replace('BUFFER_SIZE', '{}'.format(BUFFER_SIZE)) func_src = func_src.replace('IS_UPY', 'True') if DEBUG: print('----- About to send %d bytes of code to the pyboard -----' % len(func_src)) print(func_src) print('-----') self.check_pyb() try: self.pyb.enter_raw_repl() self.check_pyb() output = self.pyb.exec_raw_no_follow(func_src) if xfer_func: xfer_func(self, *args, **kwargs) self.check_pyb() output, _ = self.pyb.follow(timeout=20) self.check_pyb() self.pyb.exit_raw_repl() except (serial.serialutil.SerialException, TypeError): self.close() raise DeviceError('serial port %s closed' % self.dev_name_short) if DEBUG: print('-----Response-----') print(output) print('-----') return output
Calls func with the indicated args on the micropython board, and converts the response back into python by using eval.
def remote_eval(self, func, *args, **kwargs): """Calls func with the indicated args on the micropython board, and converts the response back into python by using eval. """ return eval(self.remote(func, *args, **kwargs))
Calls func with the indicated args on the micropython board, and converts the response back into python by using eval.
def remote_eval_last(self, func, *args, **kwargs): """Calls func with the indicated args on the micropython board, and converts the response back into python by using eval. """ result = self.remote(func, *args, **kwargs).split(b'\r\n') messages = result[0:-2] messages = b'\n'.join(messages).decode('utf-8') return (eval(result[-2]), messages)
Sets the time on the pyboard to match the time on the host.
def sync_time(self): """Sets the time on the pyboard to match the time on the host.""" now = time.localtime(time.time()) self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1, now.tm_hour, now.tm_min, now.tm_sec, 0)) return now
Writes data to the pyboard over the serial port.
def write(self, buf): """Writes data to the pyboard over the serial port.""" self.check_pyb() try: return self.pyb.serial.write(buf) except (serial.serialutil.SerialException, BrokenPipeError, TypeError): # Write failed - assume that we got disconnected self.close() raise DeviceError('{} closed'.format(self.dev_name_short))
Sets the timeout associated with the serial port.
def timeout(self, value): """Sets the timeout associated with the serial port.""" self.check_pyb() try: self.pyb.serial.timeout = value except: # timeout is a property so it calls code, and that can fail # if the serial port is closed. pass
Override onecmd. 1 - So we don't have to have a do_EOF method. 2 - So we can strip comments 3 - So we can track line numbers
def onecmd(self, line): """Override onecmd. 1 - So we don't have to have a do_EOF method. 2 - So we can strip comments 3 - So we can track line numbers """ if DEBUG: print('Executing "%s"' % line) self.line_num += 1 if line == "EOF" or line == 'exit': if cmd.Cmd.use_rawinput: # This means that we printed a prompt, and we'll want to # print a newline to pretty things up for the caller. self.print('') return True # Strip comments comment_idx = line.find("#") if comment_idx >= 0: line = line[0:comment_idx] line = line.strip() # search multiple commands on the same line lexer = shlex.shlex(line) lexer.whitespace = '' for issemicolon, group in itertools.groupby(lexer, lambda x: x == ";"): if not issemicolon: self.onecmd_exec("".join(group))
Convenience function so you don't need to remember to put the \n at the end of the line.
def print(self, *args, end='\n', file=None): """Convenience function so you don't need to remember to put the \n at the end of the line. """ if file is None: file = self.stdout s = ' '.join(str(arg) for arg in args) + end file.write(s)
Wrapper for catching exceptions since cmd seems to silently absorb them.
def filename_complete(self, text, line, begidx, endidx): """Wrapper for catching exceptions since cmd seems to silently absorb them. """ try: return self.real_filename_complete(text, line, begidx, endidx) except: traceback.print_exc()
Figure out what filenames match the completion.
def real_filename_complete(self, text, line, begidx, endidx): """Figure out what filenames match the completion.""" # line contains the full command line that's been entered so far. # text contains the portion of the line that readline is trying to complete # text should correspond to line[begidx:endidx] # # The way the completer works text will start after one of the characters # in DELIMS. So if the filename entered so far was "embedded\ sp" then # text will point to the s in sp. # # The following bit of logic backs up to find the real beginning of the # filename. if begidx >= len(line): # This happens when you hit TAB on an empty filename before_match = begidx else: for before_match in range(begidx, 0, -1): if line[before_match] in DELIMS and before_match >= 1 and line[before_match - 1] != '\\': break # We set fixed to be the portion of the filename which is before text # and match is the full portion of the filename that's been entered so # far (that's the part we use for matching files). # # When we return a list of completions, the bit that we return should # just be the portion that we replace 'text' with. fixed = unescape(line[before_match+1:begidx]) # fixed portion of the match match = unescape(line[before_match+1:endidx]) # portion to match filenames against # We do the following to cover the case that the current directory # is / and the path being entered is relative. strip = '' if len(match) > 0 and match[0] == '/': abs_match = match elif cur_dir == '/': abs_match = cur_dir + match strip = cur_dir else: abs_match = cur_dir + '/' + match strip = cur_dir + '/' completions = [] prepend = '' if abs_match.rfind('/') == 0: # match is in the root directory # This means that we're looking for matches in the root directory # (i.e. abs_match is /foo and the user hit TAB). # So we'll supply the matching board names as possible completions. # Since they're all treated as directories we leave the trailing slash. with DEV_LOCK: if match[0] == '/': completions += [dev.name_path for dev in DEVS if dev.name_path.startswith(abs_match)] else: completions += [dev.name_path[1:] for dev in DEVS if dev.name_path.startswith(abs_match)] if DEFAULT_DEV: # Add root directories of the default device (i.e. /flash/ and /sd/) if match[0] == '/': completions += [root_dir for root_dir in DEFAULT_DEV.root_dirs if root_dir.startswith(match)] else: completions += [root_dir[1:] for root_dir in DEFAULT_DEV.root_dirs if root_dir[1:].startswith(match)] else: # This means that there are at least 2 slashes in abs_match. If one # of them matches a board name then we need to remove the board # name from fixed. Since the results from listdir_matches won't # contain the board name, we need to prepend each of the completions. with DEV_LOCK: for dev in DEVS: if abs_match.startswith(dev.name_path): prepend = dev.name_path[:-1] break paths = sorted(auto(listdir_matches, abs_match)) for path in paths: path = prepend + path if path.startswith(strip): path = path[len(strip):] completions.append(escape(path.replace(fixed, '', 1))) return completions
Figure out what directories match the completion.
def directory_complete(self, text, line, begidx, endidx): """Figure out what directories match the completion.""" return [filename for filename in self.filename_complete(text, line, begidx, endidx) if filename[-1] == '/']
This will convert the line passed into the do_xxx functions into an array of arguments and handle the Output Redirection Operator.
def line_to_args(self, line): """This will convert the line passed into the do_xxx functions into an array of arguments and handle the Output Redirection Operator. """ # Note: using shlex.split causes quoted substrings to stay together. args = shlex.split(line) self.redirect_filename = '' self.redirect_dev = None redirect_index = -1 if '>' in args: redirect_index = args.index('>') elif '>>' in args: redirect_index = args.index('>>') if redirect_index >= 0: if redirect_index + 1 >= len(args): raise ShellError("> requires a filename") self.redirect_filename = resolve_path(args[redirect_index + 1]) rmode = auto(get_mode, os.path.dirname(self.redirect_filename)) if not mode_isdir(rmode): raise ShellError("Unable to redirect to '%s', directory doesn't exist" % self.redirect_filename) if args[redirect_index] == '>': self.redirect_mode = 'w' if DEBUG: print('Redirecting (write) to', self.redirect_filename) else: self.redirect_mode = 'a' if DEBUG: print('Redirecting (append) to', self.redirect_filename) self.redirect_dev, self.redirect_filename = get_dev_and_path(self.redirect_filename) try: if self.redirect_dev is None: self.stdout = SmartFile(open(self.redirect_filename, self.redirect_mode)) else: # Redirecting to a remote device. We collect the results locally # and copy them to the remote device at the end of the command. self.stdout = SmartFile(tempfile.TemporaryFile(mode='w+')) except OSError as err: raise ShellError(err) del args[redirect_index + 1] del args[redirect_index] curr_cmd, _, _ = self.parseline(self.lastcmd) parser = self.create_argparser(curr_cmd) if parser: args = parser.parse_args(args) return args
args [arguments...] Debug function for verifying argument parsing. This function just prints out each argument that it receives.
def do_args(self, line): """args [arguments...] Debug function for verifying argument parsing. This function just prints out each argument that it receives. """ args = self.line_to_args(line) for idx in range(len(args)): self.print("arg[%d] = '%s'" % (idx, args[idx]))
boards Lists the boards that rshell is currently connected to.
def do_boards(self, _): """boards Lists the boards that rshell is currently connected to. """ rows = [] with DEV_LOCK: for dev in DEVS: if dev is DEFAULT_DEV: dirs = [dir[:-1] for dir in dev.root_dirs] else: dirs = [] dirs += ['/{}{}'.format(dev.name, dir)[:-1] for dir in dev.root_dirs] dirs = 'Dirs: ' + ' '.join(dirs) epoch = 'Epoch: {}'.format(time.gmtime(dev.time_offset)[0]) rows.append((dev.name, '@ %s' % dev.dev_name_short, dev.status(), epoch, dirs)) if rows: column_print('<<<< ', rows, self.print) else: print('No boards connected')
cat FILENAME... Concatenates files and sends to stdout.
def do_cat(self, line): """cat FILENAME... Concatenates files and sends to stdout. """ # note: when we get around to supporting cat from stdin, we'll need # to write stdin to a temp file, and then copy the file # since we need to know the filesize when copying to the pyboard. args = self.line_to_args(line) for filename in args: filename = resolve_path(filename) mode = auto(get_mode, filename) if not mode_exists(mode): print_err("Cannot access '%s': No such file" % filename) continue if not mode_isfile(mode): print_err("'%s': is not a file" % filename) continue cat(filename, self.stdout)
cd DIRECTORY Changes the current directory. ~ expansion is supported, and cd - goes to the previous directory.
def do_cd(self, line): """cd DIRECTORY Changes the current directory. ~ expansion is supported, and cd - goes to the previous directory. """ args = self.line_to_args(line) if len(args) == 0: dirname = '~' else: if args[0] == '-': dirname = self.prev_dir else: dirname = args[0] dirname = resolve_path(dirname) mode = auto(get_mode, dirname) if mode_isdir(mode): global cur_dir self.prev_dir = cur_dir cur_dir = dirname auto(chdir, dirname) else: print_err("Directory '%s' does not exist" % dirname)
connect TYPE TYPE_PARAMS connect serial port [baud] connect telnet ip-address-or-name Connects a pyboard to rshell.
def do_connect(self, line): """connect TYPE TYPE_PARAMS connect serial port [baud] connect telnet ip-address-or-name Connects a pyboard to rshell. """ args = self.line_to_args(line) num_args = len(args) if num_args < 1: print_err('Missing connection TYPE') return connect_type = args[0] if connect_type == 'serial': if num_args < 2: print_err('Missing serial port') return port = args[1] if num_args < 3: baud = 115200 else: try: baud = int(args[2]) except ValueError: print_err("Expecting baud to be numeric. Found '{}'".format(args[2])) return connect_serial(port, baud) elif connect_type == 'telnet': if num_args < 2: print_err('Missing hostname or ip-address') return name = args[1] connect_telnet(name) else: print_err('Unrecognized connection TYPE: {}'.format(connect_type))
cp SOURCE DEST Copy a single SOURCE file to DEST file. cp SOURCE... DIRECTORY Copy multiple SOURCE files to a directory. cp [-r|--recursive] [SOURCE|SOURCE_DIR]... DIRECTORY cp [-r] PATTERN DIRECTORY Copy matching files to DIRECTORY. The destination must be a directory except in the case of copying a single file. To copy directories -r must be specified. This will cause directories and their contents to be recursively copied.
def do_cp(self, line): """cp SOURCE DEST Copy a single SOURCE file to DEST file. cp SOURCE... DIRECTORY Copy multiple SOURCE files to a directory. cp [-r|--recursive] [SOURCE|SOURCE_DIR]... DIRECTORY cp [-r] PATTERN DIRECTORY Copy matching files to DIRECTORY. The destination must be a directory except in the case of copying a single file. To copy directories -r must be specified. This will cause directories and their contents to be recursively copied. """ args = self.line_to_args(line) if len(args.filenames) < 2: print_err('Missing destination file') return dst_dirname = resolve_path(args.filenames[-1]) dst_mode = auto(get_mode, dst_dirname) d_dst = {} # Destination directory: lookup stat by basename if args.recursive: dst_files = auto(listdir_stat, dst_dirname) if dst_files is None: err = "cp: target {} is not a directory" print_err(err.format(dst_dirname)) return for name, stat in dst_files: d_dst[name] = stat src_filenames = args.filenames[:-1] # Process PATTERN sfn = src_filenames[0] if is_pattern(sfn): if len(src_filenames) > 1: print_err("Usage: cp [-r] PATTERN DIRECTORY") return src_filenames = process_pattern(sfn) if src_filenames is None: return for src_filename in src_filenames: if is_pattern(src_filename): print_err("Only one pattern permitted.") return src_filename = resolve_path(src_filename) src_mode = auto(get_mode, src_filename) if not mode_exists(src_mode): print_err("File '{}' doesn't exist".format(src_filename)) return if mode_isdir(src_mode): if args.recursive: # Copying a directory src_basename = os.path.basename(src_filename) dst_filename = dst_dirname + '/' + src_basename if src_basename in d_dst: dst_stat = d_dst[src_basename] dst_mode = stat_mode(dst_stat) if not mode_isdir(dst_mode): err = "Destination {} is not a directory" print_err(err.format(dst_filename)) return else: if not mkdir(dst_filename): err = "Unable to create directory {}" print_err(err.format(dst_filename)) return rsync(src_filename, dst_filename, mirror=False, dry_run=False, print_func=lambda *args: None, recursed=False, sync_hidden=args.all) else: print_err("Omitting directory {}".format(src_filename)) continue if mode_isdir(dst_mode): dst_filename = dst_dirname + '/' + os.path.basename(src_filename) else: dst_filename = dst_dirname if not cp(src_filename, dst_filename): err = "Unable to copy '{}' to '{}'" print_err(err.format(src_filename, dst_filename)) break
echo TEXT... Display a line of text.
def do_echo(self, line): """echo TEXT... Display a line of text. """ args = self.line_to_args(line) self.print(*args)
edit FILE Copies the file locally, launches an editor to edit the file. When the editor exits, if the file was modified then its copied back. You can specify the editor used with the --editor command line option when you start rshell, or by using the VISUAL or EDITOR environment variable. if none of those are set, then vi will be used.
def do_edit(self, line): """edit FILE Copies the file locally, launches an editor to edit the file. When the editor exits, if the file was modified then its copied back. You can specify the editor used with the --editor command line option when you start rshell, or by using the VISUAL or EDITOR environment variable. if none of those are set, then vi will be used. """ if len(line) == 0: print_err("Must provide a filename") return filename = resolve_path(line) dev, dev_filename = get_dev_and_path(filename) mode = auto(get_mode, filename) if mode_exists(mode) and mode_isdir(mode): print_err("Unable to edit directory '{}'".format(filename)) return if dev is None: # File is local os.system("{} '{}'".format(EDITOR, filename)) else: # File is remote with tempfile.TemporaryDirectory() as temp_dir: local_filename = os.path.join(temp_dir, os.path.basename(filename)) if mode_exists(mode): print('Retrieving {} ...'.format(filename)) cp(filename, local_filename) old_stat = get_stat(local_filename) os.system("{} '{}'".format(EDITOR, local_filename)) new_stat = get_stat(local_filename) if old_stat != new_stat: self.print('Updating {} ...'.format(filename)) cp(local_filename, filename)
filesize FILE Prints the size of the file, in bytes. This function is primarily for testing.
def do_filesize(self, line): """filesize FILE Prints the size of the file, in bytes. This function is primarily for testing. """ if len(line) == 0: print_err("Must provide a filename") return filename = resolve_path(line) self.print(auto(get_filesize, filename))
filetype FILE Prints the type of file (dir or file). This function is primarily for testing.
def do_filetype(self, line): """filetype FILE Prints the type of file (dir or file). This function is primarily for testing. """ if len(line) == 0: print_err("Must provide a filename") return filename = resolve_path(line) mode = auto(get_mode, filename) if mode_exists(mode): if mode_isdir(mode): self.print('dir') elif mode_isfile(mode): self.print('file') else: self.print('unknown') else: self.print('missing')
help [COMMAND] List available commands with no arguments, or detailed help when a command is provided.
def do_help(self, line): """help [COMMAND] List available commands with no arguments, or detailed help when a command is provided. """ # We provide a help function so that we can trim the leading spaces # from the docstrings. The builtin help function doesn't do that. if not line: cmd.Cmd.do_help(self, line) self.print(EXIT_STR) return parser = self.create_argparser(line) if parser: parser.print_help() return try: doc = getattr(self, 'do_' + line).__doc__ if doc: self.print("%s" % trim(doc)) return except AttributeError: pass self.print(str(self.nohelp % (line,)))
ls [-a] [-l] [FILE|DIRECTORY|PATTERN]... PATTERN supports * ? [seq] [!seq] Unix filename matching List directory contents.
def do_ls(self, line): """ls [-a] [-l] [FILE|DIRECTORY|PATTERN]... PATTERN supports * ? [seq] [!seq] Unix filename matching List directory contents. """ args = self.line_to_args(line) if len(args.filenames) == 0: args.filenames = ['.'] for idx, fn in enumerate(args.filenames): if not is_pattern(fn): filename = resolve_path(fn) stat = auto(get_stat, filename) mode = stat_mode(stat) if not mode_exists(mode): err = "Cannot access '{}': No such file or directory" print_err(err.format(filename)) continue if not mode_isdir(mode): if args.long: print_long(fn, stat, self.print) else: self.print(fn) continue if len(args.filenames) > 1: if idx > 0: self.print('') self.print("%s:" % filename) pattern = '*' else: # A pattern was specified filename, pattern = validate_pattern(fn) if filename is None: # An error was printed continue files = [] ldir_stat = auto(listdir_stat, filename) if ldir_stat is None: err = "Cannot access '{}': No such file or directory" print_err(err.format(filename)) else: for filename, stat in sorted(ldir_stat, key=lambda entry: entry[0]): if is_visible(filename) or args.all: if fnmatch.fnmatch(filename, pattern): if args.long: print_long(filename, stat, self.print) else: files.append(decorated_filename(filename, stat)) if len(files) > 0: print_cols(sorted(files), self.print, self.columns)
mkdir DIRECTORY... Creates one or more directories.
def do_mkdir(self, line): """mkdir DIRECTORY... Creates one or more directories. """ args = self.line_to_args(line) for filename in args: filename = resolve_path(filename) if not mkdir(filename): print_err('Unable to create %s' % filename)
Runs as a thread which has a sole purpose of readding bytes from the serial port and writing them to stdout. Used by do_repl.
def repl_serial_to_stdout(self, dev): """Runs as a thread which has a sole purpose of readding bytes from the serial port and writing them to stdout. Used by do_repl. """ with self.serial_reader_running: try: save_timeout = dev.timeout # Set a timeout so that the read returns periodically with no data # and allows us to check whether the main thread wants us to quit. dev.timeout = 1 while not self.quit_serial_reader: try: char = dev.read(1) except serial.serialutil.SerialException: # This happens if the pyboard reboots, or a USB port # goes away. return except TypeError: # This is a bug in serialposix.py starting with python 3.3 # which causes a TypeError during the handling of the # select.error. So we treat this the same as # serial.serialutil.SerialException: return except ConnectionResetError: # This happens over a telnet session, if it resets return if not char: # This means that the read timed out. We'll check the quit # flag and return if needed if self.quit_when_no_output: break continue self.stdout.write(char) self.stdout.flush() dev.timeout = save_timeout except DeviceError: # The device is no longer present. return
repl [board-name] [~ line [~]] Enters into the regular REPL with the MicroPython board. Use Control-X to exit REPL mode and return the shell. It may take a second or two before the REPL exits. If you provide a line to the REPL command, then that will be executed. If you want the REPL to exit, end the line with the ~ character.
def do_repl(self, line): """repl [board-name] [~ line [~]] Enters into the regular REPL with the MicroPython board. Use Control-X to exit REPL mode and return the shell. It may take a second or two before the REPL exits. If you provide a line to the REPL command, then that will be executed. If you want the REPL to exit, end the line with the ~ character. """ args = self.line_to_args(line) if len(args) > 0 and line[0] != '~': name = args[0] line = ' '.join(args[1:]) else: name = '' dev = find_device_by_name(name) if not dev: print_err("Unable to find board '%s'" % name) return if line[0:2] == '~ ': line = line[2:] self.print('Entering REPL. Use Control-%c to exit.' % QUIT_REPL_CHAR) self.quit_serial_reader = False self.quit_when_no_output = False self.serial_reader_running = AutoBool() repl_thread = threading.Thread(target=self.repl_serial_to_stdout, args=(dev,), name='REPL_serial_to_stdout') repl_thread.daemon = True repl_thread.start() # Wait for reader to start while not self.serial_reader_running(): pass try: # Wake up the prompt dev.write(b'\r') if line: if line[-1] == '~': line = line[:-1] self.quit_when_no_output = True line = ';'.join(line.split('~')) dev.write(bytes(line, encoding='utf-8')) dev.write(b'\r') if not self.quit_when_no_output: while self.serial_reader_running(): char = getch() if not char: continue if char == QUIT_REPL_BYTE: self.quit_serial_reader = True # When using telnet with the WiPy, it doesn't support # an initial timeout. So for the meantime, we send a # space which should cause the wipy to echo back a # space which will wakeup our reader thread so it will # notice the quit. dev.write(b' ') # Give the reader thread a chance to detect the quit # then we don't have to call getch() above again which # means we'd need to wait for another character. time.sleep(0.5) # Print a newline so that the rshell prompt looks good. self.print('') # We stay in the loop so that we can still enter # characters until we detect the reader thread quitting # (mostly to cover off weird states). continue if char == b'\n': dev.write(b'\r') else: dev.write(char) except DeviceError as err: # The device is no longer present. self.print('') self.stdout.flush() print_err(err) repl_thread.join()
rm [-f|--force] FILE... Remove one or more files rm [-f|--force] PATTERN Remove multiple files rm -r [-f|--force] [FILE|DIRECTORY]... Files and/or directories rm -r [-f|--force] PATTERN Multiple files and/or directories Removes files or directories. To remove directories (and any contents) -r must be specified.
def do_rm(self, line): """rm [-f|--force] FILE... Remove one or more files rm [-f|--force] PATTERN Remove multiple files rm -r [-f|--force] [FILE|DIRECTORY]... Files and/or directories rm -r [-f|--force] PATTERN Multiple files and/or directories Removes files or directories. To remove directories (and any contents) -r must be specified. """ args = self.line_to_args(line) filenames = args.filename # Process PATTERN sfn = filenames[0] if is_pattern(sfn): if len(filenames) > 1: print_err("Usage: rm [-r] [-f] PATTERN") return filenames = process_pattern(sfn) if filenames is None: return for filename in filenames: filename = resolve_path(filename) if not rm(filename, recursive=args.recursive, force=args.force): if not args.force: print_err("Unable to remove '{}'".format(filename)) break
rsync [-m|--mirror] [-n|--dry-run] [-q|--quiet] SRC_DIR DEST_DIR Synchronizes a destination directory tree with a source directory tree.
def do_rsync(self, line): """rsync [-m|--mirror] [-n|--dry-run] [-q|--quiet] SRC_DIR DEST_DIR Synchronizes a destination directory tree with a source directory tree. """ args = self.line_to_args(line) src_dir = resolve_path(args.src_dir) dst_dir = resolve_path(args.dst_dir) verbose = not args.quiet pf = print if args.dry_run or verbose else lambda *args : None rsync(src_dir, dst_dir, mirror=args.mirror, dry_run=args.dry_run, print_func=pf, recursed=False, sync_hidden=args.all)
Set the status of the motor to the specified value if not already set.
def set_status(self, value): """ Set the status of the motor to the specified value if not already set. """ if not self._status == value: old = self._status self._status = value logger.info("{} changing status from {} to {}".format(self, old.name, value.name)) self._statusChanged(old, value)
Set the status to Status.stopping and also call `onStopping` with the provided args and kwargs.
def stop(self, *args, **kwargs): """ Set the status to Status.stopping and also call `onStopping` with the provided args and kwargs. """ if self.status in (Status.stopping, Status.stopped): logger.debug("{} is already {}".format(self, self.status.name)) else: self.status = Status.stopping self.onStopping(*args, **kwargs) self.status = Status.stopped
Returns name and corresponding instance name of the next node which is supposed to be a new Primary for backup instance in round-robin fashion starting from primary of master instance.
def next_primary_replica_name_for_backup(self, instance_id, master_primary_rank, primaries, node_reg, node_ids): """ Returns name and corresponding instance name of the next node which is supposed to be a new Primary for backup instance in round-robin fashion starting from primary of master instance. """ if node_reg is None: node_reg = self.node.nodeReg total_nodes = len(node_reg) rank = (master_primary_rank + 1) % total_nodes name = self.node.get_name_by_rank(rank, node_reg, node_ids) while name in primaries: rank = (rank + 1) % total_nodes name = self.node.get_name_by_rank(rank, node_reg, node_ids) return name, Replica.generateName(nodeName=name, instId=instance_id)
Returns name and corresponding instance name of the next node which is supposed to be a new Primary. In fact it is not round-robin on this abstraction layer as currently the primary of master instance is pointed directly depending on view number, instance id and total number of nodes. But since the view number is incremented by 1 before primary selection then current approach may be treated as round robin.
def next_primary_replica_name_for_master(self, node_reg, node_ids): """ Returns name and corresponding instance name of the next node which is supposed to be a new Primary. In fact it is not round-robin on this abstraction layer as currently the primary of master instance is pointed directly depending on view number, instance id and total number of nodes. But since the view number is incremented by 1 before primary selection then current approach may be treated as round robin. """ name = self._next_primary_node_name_for_master(node_reg, node_ids) return name, Replica.generateName(nodeName=name, instId=0)
Build a set of names of primaries, it is needed to avoid duplicates of primary nodes for different replicas.
def process_selection(self, instance_count, node_reg, node_ids): # Select primaries for current view_no if instance_count == 0: return [] ''' Build a set of names of primaries, it is needed to avoid duplicates of primary nodes for different replicas. ''' primaries = [] primary_rank = None for i in range(instance_count): if i == 0: primary_name = self._next_primary_node_name_for_master(node_reg, node_ids) primary_rank = self.node.get_rank_by_name(primary_name, node_reg, node_ids) if primary_rank is None: raise LogicError('primary_rank must not be None') else: primary_name, _ = self.next_primary_replica_name_for_backup( i, primary_rank, primaries, node_reg, node_ids) primaries.append(primary_name) logger.display("{} selected primary {} for instance {} (view {})" .format(PRIMARY_SELECTION_PREFIX, primary_name, i, self.viewNo), extra={"cli": "ANNOUNCE", "tags": ["node-election"]}) if len(primaries) != instance_count: raise LogicError('instances inconsistency') if len(primaries) != len(set(primaries)): raise LogicError('repeating instances') return primaries
Count the number of stewards added to the pool transaction store Note: This is inefficient, a production use case of this function should require an efficient storage mechanism
def countStewards(self) -> int: """ Count the number of stewards added to the pool transaction store Note: This is inefficient, a production use case of this function should require an efficient storage mechanism """ # THIS SHOULD NOT BE DONE FOR PRODUCTION return sum(1 for _, txn in self.ledger.getAllTxn() if (get_type(txn) == NYM) and (get_payload_data(txn).get(ROLE) == STEWARD))
Get a value (and proof optionally)for the given path in state trie. Does not return the proof is there is no aggregate signature for it. :param path: the path generate a state proof for :param head_hash: the root to create the proof against :param get_value: whether to return the value :return: a state proof or None
def get_value_from_state(self, path, head_hash=None, with_proof=False, multi_sig=None): ''' Get a value (and proof optionally)for the given path in state trie. Does not return the proof is there is no aggregate signature for it. :param path: the path generate a state proof for :param head_hash: the root to create the proof against :param get_value: whether to return the value :return: a state proof or None ''' if not multi_sig and with_proof: root_hash = head_hash if head_hash else self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) multi_sig = self.bls_store.get(encoded_root_hash) return super().get_value_from_state(path, head_hash, with_proof, multi_sig)
Takes all Ordered messages from outbox out of turn
def take_ordereds_out_of_turn(self) -> tuple: """ Takes all Ordered messages from outbox out of turn """ for replica in self._replicas.values(): yield replica.instId, replica._remove_ordered_from_queue()
Create a new replica with the specified parameters.
def _new_replica(self, instance_id: int, is_master: bool, bls_bft: BlsBft) -> Replica: """ Create a new replica with the specified parameters. """ return self._replica_class(self._node, instance_id, self._config, is_master, bls_bft, self._metrics)
Restore removed replicas to requiredNumberOfInstances :return:
def restore_replicas(self) -> None: ''' Restore removed replicas to requiredNumberOfInstances :return: ''' self.backup_instances_faulty.clear() for inst_id in range(self.node.requiredNumberOfInstances): if inst_id not in self.node.replicas.keys(): self.node.replicas.add_replica(inst_id)
The method for sending BackupInstanceFaulty messages if backups instances performance were degraded :param degraded_backups: list of backup instances ids which performance were degraded :return:
def on_backup_degradation(self, degraded_backups) -> None: ''' The method for sending BackupInstanceFaulty messages if backups instances performance were degraded :param degraded_backups: list of backup instances ids which performance were degraded :return: ''' self.__remove_replicas(degraded_backups, Suspicions.BACKUP_PRIMARY_DEGRADED, self.node.config.REPLICAS_REMOVING_WITH_DEGRADATION)
The method for sending BackupInstanceFaulty messages if backup primary disconnected :param degraded_backups: list of backup instances ids which performance were degraded :return:
def on_backup_primary_disconnected(self, degraded_backups) -> None: ''' The method for sending BackupInstanceFaulty messages if backup primary disconnected :param degraded_backups: list of backup instances ids which performance were degraded :return: ''' self.__remove_replicas(degraded_backups, Suspicions.BACKUP_PRIMARY_DISCONNECTED, self.node.config.REPLICAS_REMOVING_WITH_PRIMARY_DISCONNECTED)
The method for processing BackupInstanceFaulty from nodes and removing replicas with performance were degraded :param backup_faulty: BackupInstanceFaulty message with instances for removing :param frm: :return:
def process_backup_instance_faulty_msg(self, backup_faulty: BackupInstanceFaulty, frm: str) -> None: ''' The method for processing BackupInstanceFaulty from nodes and removing replicas with performance were degraded :param backup_faulty: BackupInstanceFaulty message with instances for removing :param frm: :return: ''' logger.debug("{} receive BackupInstanceFaulty " "from {}: {}".format(self.node, frm, backup_faulty)) instances = getattr(backup_faulty, f.INSTANCES.nm) if getattr(backup_faulty, f.VIEW_NO.nm) != self.node.viewNo or \ self.node.master_replica.instId in instances: return # Don't process BackupInstanceFaulty if strategy for this reason is not need quorum reason = Suspicions.get_by_code(getattr(backup_faulty, f.REASON.nm)) if ( reason == Suspicions.BACKUP_PRIMARY_DISCONNECTED and not self.is_quorum_strategy(self.node.config.REPLICAS_REMOVING_WITH_PRIMARY_DISCONNECTED) ) or ( reason == Suspicions.BACKUP_PRIMARY_DEGRADED and not self.is_quorum_strategy(self.node.config.REPLICAS_REMOVING_WITH_DEGRADATION) ): return for inst_id in getattr(backup_faulty, f.INSTANCES.nm): if inst_id not in self.node.replicas.keys(): continue self.backup_instances_faulty.setdefault(inst_id, dict()).setdefault(frm, 0) self.backup_instances_faulty[inst_id][frm] += 1 all_nodes_condition = self.node.quorums.backup_instance_faulty.is_reached( len(self.backup_instances_faulty[inst_id].keys())) this_node_condition = (self.node.name in self.backup_instances_faulty[inst_id] and self.node.quorums.backup_instance_faulty.is_reached( self.backup_instances_faulty[inst_id][self.node.name])) if all_nodes_condition or this_node_condition: self.node.replicas.remove_replica(inst_id) self.backup_instances_faulty.pop(inst_id)
Return value of the the following polynomial. .. math:: (a * e^(b*steps) - 1) / (e^b - 1) :param a: multiplier :param b: exponent multiplier :param steps: the number of steps
def _sumSeries(a: float, b: float, steps: int) -> float: """ Return value of the the following polynomial. .. math:: (a * e^(b*steps) - 1) / (e^b - 1) :param a: multiplier :param b: exponent multiplier :param steps: the number of steps """ return a * (exp(b * steps) - 1) / (exp(b) - 1)
Finds a b-value (common ratio) that satisfies a total duration within 1 millisecond. Not terribly efficient, so using lru_cache. Don't know a way to compute the common ratio when the sum of a finite geometric series is known. Found myself needing to factor polynomials with an arbitrarily high degree. :param start: a-value :param steps: how many steps :param total: total duration of the series of n-steps :return: b value
def goalDuration(start: float, steps: int, total: float) -> float: """ Finds a b-value (common ratio) that satisfies a total duration within 1 millisecond. Not terribly efficient, so using lru_cache. Don't know a way to compute the common ratio when the sum of a finite geometric series is known. Found myself needing to factor polynomials with an arbitrarily high degree. :param start: a-value :param steps: how many steps :param total: total duration of the series of n-steps :return: b value """ a = start up = None dn = None b = 1.0 while True: s = Ratchet._sumSeries(a, b, steps) - total if abs(s) < .001: break elif s < 0: dn = b # halfway between b and upper if upper defined b = (up + b) / 2 if up else b + 1 else: up = b b = (dn + b) / 2 if dn else b / 2 return b
Acquires lock for action. :return: True and 0.0 if lock successfully acquired or False and number of seconds to wait before the next try
def acquire(self): """ Acquires lock for action. :return: True and 0.0 if lock successfully acquired or False and number of seconds to wait before the next try """ now = self.get_current_time() logger.debug("now: {}, len(actionsLog): {}".format( now, len(self.actionsLog))) self._trimActionsLog(now) logger.debug("after trim, len(actionsLog): {}".format( len(self.actionsLog))) if len(self.actionsLog) == 0: self.actionsLog.append(now) logger.debug("len(actionsLog) was 0, after append, len(actionsLog):" " {}".format(len(self.actionsLog))) return True, 0.0 timeToWaitAfterPreviousTry = self.delayFunction(len(self.actionsLog)) timePassed = now - self.actionsLog[-1] logger.debug("timeToWaitAfterPreviousTry: {}, timePassed: {}". format(timeToWaitAfterPreviousTry, timePassed)) if timeToWaitAfterPreviousTry < timePassed: self.actionsLog.append(now) logger.debug( "timeToWaitAfterPreviousTry < timePassed was true, after " "append, len(actionsLog): {}".format(len(self.actionsLog))) return True, 0.0 else: logger.debug( "timeToWaitAfterPreviousTry < timePassed was false, " "len(actionsLog): {}".format(len(self.actionsLog))) return False, timeToWaitAfterPreviousTry - timePassed
Decorator which helps in creating lazy properties
def lazy_field(prop): """ Decorator which helps in creating lazy properties """ @property def wrapper(self): if self not in _lazy_value_cache: _lazy_value_cache[self] = {} self_cache = _lazy_value_cache[self] if prop in self_cache: return self_cache[prop] prop_value = prop(self) self_cache[prop] = prop_value return prop_value return wrapper
Transform a client request such that it can be stored in the ledger. Also this is what will be returned to the client in the reply :param req: :return:
def reqToTxn(req): """ Transform a client request such that it can be stored in the ledger. Also this is what will be returned to the client in the reply :param req: :return: """ if isinstance(req, str): req = json.loads(req) if isinstance(req, dict): kwargs = dict( identifier=req.get(f.IDENTIFIER.nm, None), reqId=req.get(f.REQ_ID.nm, None), operation=req.get(OPERATION, None), signature=req.get(f.SIG.nm, None), signatures=req.get(f.SIGS.nm, None), protocolVersion=req.get(f.PROTOCOL_VERSION.nm, None) ) req = TxnUtilConfig.client_request_class(**kwargs) if isinstance(req, Request): req_data = req.as_dict req_data[f.DIGEST.nm] = req.digest req_data[f.PAYLOAD_DIGEST.nm] = req.payload_digest else: raise TypeError( "Expected dict or str as input, but got: {}".format(type(req))) req_data = deepcopy(req_data) return do_req_to_txn(req_data=req_data, req_op=req_data[OPERATION])
Return the names of the remote nodes this node is connected to. Not all of these nodes may be used for communication (as opposed to conns property)
def connecteds(self) -> Set[str]: """ Return the names of the remote nodes this node is connected to. Not all of these nodes may be used for communication (as opposed to conns property) """ return {r.name for r in self.remotes.values() if self.isRemoteConnected(r)}
Find the remote by name or ha. :param name: the name of the remote to find :param ha: host address pair the remote to find :raises: RemoteNotFound
def getRemote(self, name: str = None, ha: HA = None): """ Find the remote by name or ha. :param name: the name of the remote to find :param ha: host address pair the remote to find :raises: RemoteNotFound """ return self.findInRemotesByName(name) if name else \ self.findInRemotesByHA(ha)
Find the remote by name. :param name: the name of the remote to find :raises: RemoteNotFound
def findInRemotesByName(self, name: str): """ Find the remote by name. :param name: the name of the remote to find :raises: RemoteNotFound """ remotes = [r for r in self.remotes.values() if r.name == name] if len(remotes) > 1: raise DuplicateRemotes(remotes) if not remotes: raise RemoteNotFound(name) return remotes[0]
Remove the remote by name. :param name: the name of the remote to remove :raises: RemoteNotFound
def removeRemoteByName(self, name: str) -> int: """ Remove the remote by name. :param name: the name of the remote to remove :raises: RemoteNotFound """ remote = self.getRemote(name) rid = remote.uid self.removeRemote(remote) return rid
Check whether the two arguments correspond to the same address
def sameAddr(self, ha, ha2) -> bool: """ Check whether the two arguments correspond to the same address """ if ha == ha2: return True if ha[1] != ha2[1]: return False return ha[0] in self.localips and ha2[0] in self.localips
Partitions the remotes into connected and disconnected :return: tuple(connected remotes, disconnected remotes)
def remotesByConnected(self): """ Partitions the remotes into connected and disconnected :return: tuple(connected remotes, disconnected remotes) """ conns, disconns = [], [] for r in self.remotes.values(): array = conns if self.isRemoteConnected(r) else disconns array.append(r) return conns, disconns
Service at most `limit` messages from the inBox. :param limit: the maximum number of messages to service :return: the number of messages successfully processed
async def serviceQueues(self, limit=None) -> int: """ Service at most `limit` messages from the inBox. :param limit: the maximum number of messages to service :return: the number of messages successfully processed """ return await self._inbox_router.handleAll(self._inbox, limit)
Adds signer to the wallet. Requires complete signer, identifier or seed. :param identifier: signer identifier or None to use random one :param seed: signer key seed or None to use random one :param signer: signer to add :param alias: a friendly readable name for the signer :param didMethodName: name of DID Method if not the default :return:
def addIdentifier(self, identifier=None, seed=None, signer=None, alias=None, didMethodName=None): """ Adds signer to the wallet. Requires complete signer, identifier or seed. :param identifier: signer identifier or None to use random one :param seed: signer key seed or None to use random one :param signer: signer to add :param alias: a friendly readable name for the signer :param didMethodName: name of DID Method if not the default :return: """ dm = self.didMethods.get(didMethodName) signer = signer or dm.newSigner(identifier=identifier, seed=seed) self.idsToSigners[signer.identifier] = signer if self.defaultId is None: # setting this signer as default signer to let use sign* methods # without explicit specification of signer self.defaultId = signer.identifier if alias: signer.alias = alias if signer.alias: self.aliasesToIds[signer.alias] = signer.identifier return signer.identifier, signer
Update signer for an already present identifier. The passed signer should have the same identifier as `identifier` or an error is raised. Also if the existing identifier has an alias in the wallet then the passed signer is given the same alias :param identifier: existing identifier in the wallet :param signer: new signer to update too :return:
def updateSigner(self, identifier, signer): """ Update signer for an already present identifier. The passed signer should have the same identifier as `identifier` or an error is raised. Also if the existing identifier has an alias in the wallet then the passed signer is given the same alias :param identifier: existing identifier in the wallet :param signer: new signer to update too :return: """ if identifier != signer.identifier: raise ValueError("Passed signer has identifier {} but it should" " have been {}".format(signer.identifier, identifier)) if identifier not in self.idsToSigners: raise KeyError("Identifier {} not present in wallet". format(identifier)) oldSigner = self.idsToSigners[identifier] if oldSigner.alias and oldSigner.alias in self.aliasesToIds: logger.debug('Changing alias of passed signer to {}'. format(oldSigner.alias)) signer.alias = oldSigner.alias self.idsToSigners[identifier] = signer
Checks whether signer identifier specified, or can it be inferred from alias or can be default used instead :param idr: :param alias: :param other: :return: signer identifier
def requiredIdr(self, idr: Identifier=None, alias: str=None): """ Checks whether signer identifier specified, or can it be inferred from alias or can be default used instead :param idr: :param alias: :param other: :return: signer identifier """ # TODO Need to create a new Identifier type that supports DIDs and CIDs if idr: if ':' in idr: idr = idr.split(':')[1] else: idr = self.aliasesToIds[alias] if alias else self.defaultId if not idr: raise EmptyIdentifier return idr
Creates signature for message using specified signer :param msg: message to sign :param identifier: signer identifier :param otherIdentifier: :return: signature that then can be assigned to request
def signMsg(self, msg: Dict, identifier: Identifier=None, otherIdentifier: Identifier=None): """ Creates signature for message using specified signer :param msg: message to sign :param identifier: signer identifier :param otherIdentifier: :return: signature that then can be assigned to request """ idr = self.requiredIdr(idr=identifier or otherIdentifier) signer = self._signerById(idr) signature = signer.sign(msg) return signature
Signs request. Modifies reqId and signature. May modify identifier. :param req: request :param requestIdStore: request id generator :param identifier: signer identifier :return: signed request
def signRequest(self, req: Request, identifier: Identifier=None) -> Request: """ Signs request. Modifies reqId and signature. May modify identifier. :param req: request :param requestIdStore: request id generator :param identifier: signer identifier :return: signed request """ idr = self.requiredIdr(idr=identifier or req._identifier) # idData = self._getIdData(idr) req._identifier = idr req.reqId = req.gen_req_id() # req.digest = req.getDigest() # QUESTION: `self.ids[idr]` would be overwritten if same identifier # is used to send 2 requests, why is `IdData` persisted? # self.ids[idr] = IdData(idData.signer, req.reqId) req.signature = self.signMsg(msg=req.signingPayloadState(identifier=idr), identifier=idr, otherIdentifier=req.identifier) return req
Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object
def signOp(self, op: Dict, identifier: Identifier=None) -> Request: """ Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object """ request = Request(operation=op, protocolVersion=CURRENT_PROTOCOL_VERSION) return self.signRequest(request, identifier)
For each signer in this wallet, return its alias if present else return its identifier. :param exclude: :return: List of identifiers/aliases.
def listIds(self, exclude=list()): """ For each signer in this wallet, return its alias if present else return its identifier. :param exclude: :return: List of identifiers/aliases. """ lst = list(self.aliasesToIds.keys()) others = set(self.idsToSigners.keys()) - \ set(self.aliasesToIds.values()) lst.extend(list(others)) for x in exclude: lst.remove(x) return lst
Save wallet into specified localtion. Returns the canonical path for the ``fpath`` where ``wallet`` has been stored. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - directory part of ``fpath`` exists and it's not a directory - NotADirectoryError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param wallet: wallet to save :param fpath: wallet file path, absolute or relative to keyrings base dir
def saveWallet(self, wallet, fpath): """Save wallet into specified localtion. Returns the canonical path for the ``fpath`` where ``wallet`` has been stored. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - directory part of ``fpath`` exists and it's not a directory - NotADirectoryError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param wallet: wallet to save :param fpath: wallet file path, absolute or relative to keyrings base dir """ if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the keyrings {}".format( fpath, self._baseDir)) self._createDirIfNotExists(_dpath) # ensure permissions from the bottom of the directory hierarchy while _dpath != self._baseDir: self._ensurePermissions(_dpath, self.dmode) _dpath = _dpath.parent with _fpath.open("w") as wf: self._ensurePermissions(_fpath, self.fmode) encodedWallet = self.encode(wallet) wf.write(encodedWallet) logger.debug("stored wallet '{}' in {}".format( wallet.name, _fpath)) return str(_fpath)
Load wallet from specified localtion. Returns loaded wallet. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param fpath: wallet file path, absolute or relative to keyrings base dir
def loadWallet(self, fpath): """Load wallet from specified localtion. Returns loaded wallet. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param fpath: wallet file path, absolute or relative to keyrings base dir """ if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the wallets {}".format( fpath, self._baseDir)) with _fpath.open() as wf: wallet = self.decode(wf.read()) return wallet
Add the specified PREPARE to this replica's list of received PREPAREs. :param prepare: the PREPARE to add to the list :param voter: the name of the node who sent the PREPARE
def addVote(self, prepare: Prepare, voter: str) -> None: """ Add the specified PREPARE to this replica's list of received PREPAREs. :param prepare: the PREPARE to add to the list :param voter: the name of the node who sent the PREPARE """ self._add_msg(prepare, voter)
Add the specified COMMIT to this replica's list of received COMMITs. :param commit: the COMMIT to add to the list :param voter: the name of the replica who sent the COMMIT
def addVote(self, commit: Commit, voter: str) -> None: """ Add the specified COMMIT to this replica's list of received COMMITs. :param commit: the COMMIT to add to the list :param voter: the name of the replica who sent the COMMIT """ super()._add_msg(commit, voter)
Discard a message and log a reason using the specified `logMethod`. :param msg: the message to discard :param reason: the reason why this message is being discarded :param logMethod: the logging function to be used :param cliOutput: if truthy, informs a CLI that the logged msg should be printed
def discard(self, msg, reason, logMethod=logging.error, cliOutput=False): """ Discard a message and log a reason using the specified `logMethod`. :param msg: the message to discard :param reason: the reason why this message is being discarded :param logMethod: the logging function to be used :param cliOutput: if truthy, informs a CLI that the logged msg should be printed """ reason = "" if not reason else " because {}".format(reason) logMethod("{} discarding message {}{}".format(self, msg, reason), extra={"cli": cliOutput})