idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
228,000
def validate ( self , definition , version = None , strict = False ) : if not HAS_KUBERNETES_VALIDATE : raise KubernetesValidateMissing ( ) errors = list ( ) warnings = list ( ) try : if version is None : try : version = self . version [ 'kubernetes' ] [ 'gitVersion' ] except KeyError : version = kubernetes_validate . latest_version ( ) kubernetes_validate . validate ( definition , version , strict ) except kubernetes_validate . utils . ValidationError as e : errors . append ( "resource definition validation error at %s: %s" % ( '.' . join ( [ str ( item ) for item in e . path ] ) , e . message ) ) # noqa: B306 except VersionNotSupportedError as e : errors . append ( "Kubernetes version %s is not supported by kubernetes-validate" % version ) except kubernetes_validate . utils . SchemaNotFoundError as e : warnings . append ( "Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)" % ( e . kind , e . api_version , e . version ) ) return warnings , errors
validate checks a kubernetes resource definition
288
10
228,001
def parse_api_groups ( self , request_resources = False , update = False ) : if not self . _cache . get ( 'resources' ) or update : self . _cache [ 'resources' ] = self . _cache . get ( 'resources' , { } ) groups_response = load_json ( self . client . request ( 'GET' , '/{}' . format ( DISCOVERY_PREFIX ) ) ) [ 'groups' ] groups = self . default_groups ( request_resources = request_resources ) for group in groups_response : new_group = { } for version_raw in group [ 'versions' ] : version = version_raw [ 'version' ] resource_group = self . _cache . get ( 'resources' , { } ) . get ( DISCOVERY_PREFIX , { } ) . get ( group [ 'name' ] , { } ) . get ( version ) preferred = version_raw == group [ 'preferredVersion' ] resources = resource_group . resources if resource_group else { } if request_resources : resources = self . get_resources_for_api_version ( DISCOVERY_PREFIX , group [ 'name' ] , version , preferred ) new_group [ version ] = ResourceGroup ( preferred , resources = resources ) groups [ DISCOVERY_PREFIX ] [ group [ 'name' ] ] = new_group self . _cache [ 'resources' ] . update ( groups ) self . _write_cache ( ) return self . _cache [ 'resources' ]
Discovers all API groups present in the cluster
341
10
228,002
def get ( self , * * kwargs ) : results = self . search ( * * kwargs ) # If there are multiple matches, prefer exact matches on api_version if len ( results ) > 1 and kwargs . get ( 'api_version' ) : results = [ result for result in results if result . group_version == kwargs [ 'api_version' ] ] # If there are multiple matches, prefer non-List kinds if len ( results ) > 1 and not all ( [ isinstance ( x , ResourceList ) for x in results ] ) : results = [ result for result in results if not isinstance ( result , ResourceList ) ] if len ( results ) == 1 : return results [ 0 ] elif not results : raise ResourceNotFoundError ( 'No matches found for {}' . format ( kwargs ) ) else : raise ResourceNotUniqueError ( 'Multiple matches found for {}: {}' . format ( kwargs , results ) )
Same as search but will throw an error if there are multiple or no results . If there are multiple results and only one is an exact match on api_version that resource will be returned .
209
38
228,003
def add_device ( dev ) : global DEV_IDX , DEFAULT_DEV with DEV_LOCK : for idx in range ( len ( DEVS ) ) : test_dev = DEVS [ idx ] if test_dev . dev_name_short == dev . dev_name_short : # This device is already in our list. Delete the old one if test_dev is DEFAULT_DEV : DEFAULT_DEV = None del DEVS [ idx ] break if find_device_by_name ( dev . name ) : # This name is taken - make it unique dev . name += '-%d' % DEV_IDX dev . name_path = '/' + dev . name + '/' DEVS . append ( dev ) DEV_IDX += 1 if DEFAULT_DEV is None : DEFAULT_DEV = dev
Adds a device to the list of devices we know about .
185
12
228,004
def find_device_by_name ( name ) : if not name : return DEFAULT_DEV with DEV_LOCK : for dev in DEVS : if dev . name == name : return dev return None
Tries to find a board by board name .
44
10
228,005
def is_micropython_usb_device ( port ) : if type ( port ) . __name__ == 'Device' : # Assume its a pyudev.device.Device if ( 'ID_BUS' not in port or port [ 'ID_BUS' ] != 'usb' or 'SUBSYSTEM' not in port or port [ 'SUBSYSTEM' ] != 'tty' ) : return False usb_id = 'usb vid:pid={}:{}' . format ( port [ 'ID_VENDOR_ID' ] , port [ 'ID_MODEL_ID' ] ) else : # Assume its a port from serial.tools.list_ports.comports() usb_id = port [ 2 ] . lower ( ) # We don't check the last digit of the PID since there are 3 possible # values. if usb_id . startswith ( 'usb vid:pid=f055:980' ) : return True # Check for Teensy VID:PID if usb_id . startswith ( 'usb vid:pid=16c0:0483' ) : return True return False
Checks a USB device to see if it looks like a MicroPython device .
249
16
228,006
def is_micropython_usb_port ( portName ) : for port in serial . tools . list_ports . comports ( ) : if port . device == portName : return is_micropython_usb_device ( port ) return False
Checks to see if the indicated portname is a MicroPython device or not .
54
17
228,007
def autoconnect ( ) : if not USE_AUTOCONNECT : return try : import pyudev except ImportError : return context = pyudev . Context ( ) monitor = pyudev . Monitor . from_netlink ( context ) connect_thread = threading . Thread ( target = autoconnect_thread , args = ( monitor , ) , name = 'AutoConnect' ) connect_thread . daemon = True connect_thread . start ( )
Sets up a thread to detect when USB devices are plugged and unplugged . If the device looks like a MicroPython board then it will automatically connect to it .
97
34
228,008
def autoconnect_thread ( monitor ) : monitor . start ( ) monitor . filter_by ( 'tty' ) epoll = select . epoll ( ) epoll . register ( monitor . fileno ( ) , select . POLLIN ) while True : try : events = epoll . poll ( ) except InterruptedError : continue for fileno , _ in events : if fileno == monitor . fileno ( ) : usb_dev = monitor . poll ( ) print ( 'autoconnect: {} action: {}' . format ( usb_dev . device_node , usb_dev . action ) ) dev = find_serial_device_by_port ( usb_dev . device_node ) if usb_dev . action == 'add' : # Try connecting a few times. Sometimes the serial port # reports itself as busy, which causes the connection to fail. for i in range ( 8 ) : if dev : connected = connect_serial ( dev . port , dev . baud , dev . wait ) elif is_micropython_usb_device ( usb_dev ) : connected = connect_serial ( usb_dev . device_node ) else : connected = False if connected : break time . sleep ( 0.25 ) elif usb_dev . action == 'remove' : print ( '' ) print ( "USB Serial device '%s' disconnected" % usb_dev . device_node ) if dev : dev . close ( ) break
Thread which detects USB Serial devices connecting and disconnecting .
308
11
228,009
def extra_info ( port ) : extra_items = [ ] if port . manufacturer : extra_items . append ( "vendor '{}'" . format ( port . manufacturer ) ) if port . serial_number : extra_items . append ( "serial '{}'" . format ( port . serial_number ) ) if port . interface : extra_items . append ( "intf '{}'" . format ( port . interface ) ) if extra_items : return ' with ' + ' ' . join ( extra_items ) return ''
Collects the serial nunber and manufacturer into a string if the fields are available .
116
17
228,010
def listports ( ) : detected = False for port in serial . tools . list_ports . comports ( ) : detected = True if port . vid : micropythonPort = '' if is_micropython_usb_device ( port ) : micropythonPort = ' *' print ( 'USB Serial Device {:04x}:{:04x}{} found @{}{}\r' . format ( port . vid , port . pid , extra_info ( port ) , port . device , micropythonPort ) ) else : print ( 'Serial Device:' , port . device ) if not detected : print ( 'No serial devices detected' )
listports will display a list of all of the serial ports .
140
13
228,011
def escape ( str ) : out = '' for char in str : if char in '\\ ' : out += '\\' out += char return out
Precede all special characters with a backslash .
31
12
228,012
def align_cell ( fmt , elem , width ) : if fmt == "<" : return elem + ' ' * ( width - len ( elem ) ) if fmt == ">" : return ' ' * ( width - len ( elem ) ) + elem return elem
Returns an aligned element .
60
5
228,013
def print_err ( * args , end = '\n' ) : print ( * args , end = end , file = sys . stderr ) sys . stderr . flush ( )
Similar to print but prints to stderr .
42
10
228,014
def validate_pattern ( fn ) : directory , pattern = parse_pattern ( fn ) if directory is None : print_err ( "Invalid pattern {}." . format ( fn ) ) return None , None target = resolve_path ( directory ) mode = auto ( get_mode , target ) if not mode_exists ( mode ) : print_err ( "cannot access '{}': No such file or directory" . format ( fn ) ) return None , None if not mode_isdir ( mode ) : print_err ( "cannot access '{}': Not a directory" . format ( fn ) ) return None , None return target , pattern
On success return an absolute path and a pattern . Otherwise print a message and return None None
138
18
228,015
def resolve_path ( path ) : if path [ 0 ] == '~' : # ~ or ~user path = os . path . expanduser ( path ) if path [ 0 ] != '/' : # Relative path if cur_dir [ - 1 ] == '/' : path = cur_dir + path else : path = cur_dir + '/' + path comps = path . split ( '/' ) new_comps = [ ] for comp in comps : # We strip out xxx/./xxx and xxx//xxx, except that we want to keep the # leading / for absolute paths. This also removes the trailing slash # that autocompletion adds to a directory. if comp == '.' or ( comp == '' and len ( new_comps ) > 0 ) : continue if comp == '..' : if len ( new_comps ) > 1 : new_comps . pop ( ) else : new_comps . append ( comp ) if len ( new_comps ) == 1 and new_comps [ 0 ] == '' : return '/' return '/' . join ( new_comps )
Resolves path and converts it into an absolute path .
240
11
228,016
def print_bytes ( byte_str ) : if isinstance ( byte_str , str ) : print ( byte_str ) else : print ( str ( byte_str , encoding = 'utf8' ) )
Prints a string or converts bytes to a string and then prints .
45
14
228,017
def extra_funcs ( * funcs ) : def extra_funcs_decorator ( real_func ) : def wrapper ( * args , * * kwargs ) : return real_func ( * args , * * kwargs ) wrapper . extra_funcs = list ( funcs ) wrapper . source = inspect . getsource ( real_func ) wrapper . name = real_func . __name__ return wrapper return extra_funcs_decorator
Decorator which adds extra functions to be downloaded to the pyboard .
100
15
228,018
def auto ( func , filename , * args , * * kwargs ) : dev , dev_filename = get_dev_and_path ( filename ) if dev is None : if len ( dev_filename ) > 0 and dev_filename [ 0 ] == '~' : dev_filename = os . path . expanduser ( dev_filename ) return func ( dev_filename , * args , * * kwargs ) return dev . remote_eval ( func , dev_filename , * args , * * kwargs )
If filename is a remote file then this function calls func on the micropython board otherwise it calls it locally .
112
23
228,019
def cat ( src_filename , dst_file ) : ( dev , dev_filename ) = get_dev_and_path ( src_filename ) if dev is None : with open ( dev_filename , 'rb' ) as txtfile : for line in txtfile : dst_file . write ( line ) else : filesize = dev . remote_eval ( get_filesize , dev_filename ) return dev . remote ( send_file_to_host , dev_filename , dst_file , filesize , xfer_func = recv_file_from_remote )
Copies the contents of the indicated file to an already opened file .
126
14
228,020
def copy_file ( src_filename , dst_filename ) : try : with open ( src_filename , 'rb' ) as src_file : with open ( dst_filename , 'wb' ) as dst_file : while True : buf = src_file . read ( BUFFER_SIZE ) if len ( buf ) > 0 : dst_file . write ( buf ) if len ( buf ) < BUFFER_SIZE : break return True except : return False
Copies a file from one place to another . Both the source and destination files must exist on the same machine .
98
23
228,021
def cp ( src_filename , dst_filename ) : src_dev , src_dev_filename = get_dev_and_path ( src_filename ) dst_dev , dst_dev_filename = get_dev_and_path ( dst_filename ) if src_dev is dst_dev : # src and dst are either on the same remote, or both are on the host return auto ( copy_file , src_filename , dst_dev_filename ) filesize = auto ( get_filesize , src_filename ) if dst_dev is None : # Copying from remote to host with open ( dst_dev_filename , 'wb' ) as dst_file : return src_dev . remote ( send_file_to_host , src_dev_filename , dst_file , filesize , xfer_func = recv_file_from_remote ) if src_dev is None : # Copying from host to remote with open ( src_dev_filename , 'rb' ) as src_file : return dst_dev . remote ( recv_file_from_host , src_file , dst_dev_filename , filesize , xfer_func = send_file_to_remote ) # Copying from remote A to remote B. We first copy the file # from remote A to the host and then from the host to remote B host_temp_file = tempfile . TemporaryFile ( ) if src_dev . remote ( send_file_to_host , src_dev_filename , host_temp_file , filesize , xfer_func = recv_file_from_remote ) : host_temp_file . seek ( 0 ) return dst_dev . remote ( recv_file_from_host , host_temp_file , dst_dev_filename , filesize , xfer_func = send_file_to_remote ) return False
Copies one file to another . The source file may be local or remote and the destination file may be local or remote .
405
25
228,022
def stat ( filename ) : import os try : # on the host, lstat won't try to follow symlinks rstat = os . lstat ( filename ) except : rstat = os . stat ( filename ) return rstat [ : 7 ] + tuple ( tim + TIME_OFFSET for tim in rstat [ 7 : ] )
Returns os . stat for a given file adjusting the timestamps as appropriate .
71
16
228,023
def listdir_matches ( match ) : import os last_slash = match . rfind ( '/' ) if last_slash == - 1 : dirname = '.' match_prefix = match result_prefix = '' else : match_prefix = match [ last_slash + 1 : ] if last_slash == 0 : dirname = '/' result_prefix = '/' else : dirname = match [ 0 : last_slash ] result_prefix = dirname + '/' def add_suffix_if_dir ( filename ) : try : if ( os . stat ( filename ) [ 0 ] & 0x4000 ) != 0 : return filename + '/' except FileNotFoundError : # This can happen when a symlink points to a non-existant file. pass return filename matches = [ add_suffix_if_dir ( result_prefix + filename ) for filename in os . listdir ( dirname ) if filename . startswith ( match_prefix ) ] return matches
Returns a list of filenames contained in the named directory . Only filenames which start with match will be returned . Directories will have a trailing slash .
216
33
228,024
def listdir_stat ( dirname , show_hidden = True ) : import os try : files = os . listdir ( dirname ) except OSError : return None if dirname == '/' : return list ( ( file , stat ( '/' + file ) ) for file in files if is_visible ( file ) or show_hidden ) return list ( ( file , stat ( dirname + '/' + file ) ) for file in files if is_visible ( file ) or show_hidden )
Returns a list of tuples for each file contained in the named directory or None if the directory does not exist . Each tuple contains the filename followed by the tuple returned by calling os . stat on the filename .
108
42
228,025
def remove_file ( filename , recursive = False , force = False ) : import os try : mode = os . stat ( filename ) [ 0 ] if mode & 0x4000 != 0 : # directory if recursive : for file in os . listdir ( filename ) : success = remove_file ( filename + '/' + file , recursive , force ) if not success and not force : return False os . rmdir ( filename ) # PGH Work like Unix: require recursive else : if not force : return False else : os . remove ( filename ) except : if not force : return False return True
Removes a file or directory .
125
7
228,026
def rm ( filename , recursive = False , force = False ) : return auto ( remove_file , filename , recursive , force )
Removes a file or directory tree .
27
8
228,027
def make_dir ( dst_dir , dry_run , print_func , recursed ) : parent = os . path . split ( dst_dir . rstrip ( '/' ) ) [ 0 ] # Check for nonexistent parent parent_files = auto ( listdir_stat , parent ) if parent else True # Relative dir if dry_run : if recursed : # Assume success: parent not actually created yet print_func ( "Creating directory {}" . format ( dst_dir ) ) elif parent_files is None : print_func ( "Unable to create {}" . format ( dst_dir ) ) return True if not mkdir ( dst_dir ) : print_err ( "Unable to create {}" . format ( dst_dir ) ) return False return True
Creates a directory . Produces information in case of dry run . Issues error where necessary .
166
19
228,028
def recv_file_from_host ( src_file , dst_filename , filesize , dst_mode = 'wb' ) : import sys import ubinascii if HAS_BUFFER : try : import pyb usb = pyb . USB_VCP ( ) except : try : import machine usb = machine . USB_VCP ( ) except : usb = None if usb and usb . isconnected ( ) : # We don't want 0x03 bytes in the data to be interpreted as a Control-C # This gets reset each time the REPL runs a line, so we don't need to # worry about resetting it ourselves usb . setinterrupt ( - 1 ) try : with open ( dst_filename , dst_mode ) as dst_file : bytes_remaining = filesize if not HAS_BUFFER : bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray ( buf_size ) read_buf = bytearray ( buf_size ) while bytes_remaining > 0 : # Send back an ack as a form of flow control sys . stdout . write ( '\x06' ) read_size = min ( bytes_remaining , buf_size ) buf_remaining = read_size buf_index = 0 while buf_remaining > 0 : if HAS_BUFFER : bytes_read = sys . stdin . buffer . readinto ( read_buf , read_size ) else : bytes_read = sys . stdin . readinto ( read_buf , read_size ) if bytes_read > 0 : write_buf [ buf_index : bytes_read ] = read_buf [ 0 : bytes_read ] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER : dst_file . write ( write_buf [ 0 : read_size ] ) else : dst_file . write ( ubinascii . unhexlify ( write_buf [ 0 : read_size ] ) ) bytes_remaining -= read_size return True except : return False
Function which runs on the pyboard . Matches up with send_file_to_remote .
459
20
228,029
def send_file_to_remote ( dev , src_file , dst_filename , filesize , dst_mode = 'wb' ) : bytes_remaining = filesize save_timeout = dev . timeout dev . timeout = 1 while bytes_remaining > 0 : # Wait for ack so we don't get too far ahead of the remote ack = dev . read ( 1 ) if ack is None or ack != b'\x06' : sys . stderr . write ( "timed out or error in transfer to remote\n" ) sys . exit ( 2 ) if HAS_BUFFER : buf_size = BUFFER_SIZE else : buf_size = BUFFER_SIZE // 2 read_size = min ( bytes_remaining , buf_size ) buf = src_file . read ( read_size ) #sys.stdout.write('\r%d/%d' % (filesize - bytes_remaining, filesize)) #sys.stdout.flush() if HAS_BUFFER : dev . write ( buf ) else : dev . write ( binascii . hexlify ( buf ) ) bytes_remaining -= read_size #sys.stdout.write('\r') dev . timeout = save_timeout
Intended to be passed to the remote function as the xfer_func argument . Matches up with recv_file_from_host .
273
30
228,030
def recv_file_from_remote ( dev , src_filename , dst_file , filesize ) : bytes_remaining = filesize if not HAS_BUFFER : bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray ( buf_size ) while bytes_remaining > 0 : read_size = min ( bytes_remaining , buf_size ) buf_remaining = read_size buf_index = 0 while buf_remaining > 0 : read_buf = dev . read ( buf_remaining ) bytes_read = len ( read_buf ) if bytes_read : write_buf [ buf_index : bytes_read ] = read_buf [ 0 : bytes_read ] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER : dst_file . write ( write_buf [ 0 : read_size ] ) else : dst_file . write ( binascii . unhexlify ( write_buf [ 0 : read_size ] ) ) # Send an ack to the remote as a form of flow control dev . write ( b'\x06' ) # ASCII ACK is 0x06 bytes_remaining -= read_size
Intended to be passed to the remote function as the xfer_func argument . Matches up with send_file_to_host .
279
29
228,031
def send_file_to_host ( src_filename , dst_file , filesize ) : import sys import ubinascii try : with open ( src_filename , 'rb' ) as src_file : bytes_remaining = filesize if HAS_BUFFER : buf_size = BUFFER_SIZE else : buf_size = BUFFER_SIZE // 2 while bytes_remaining > 0 : read_size = min ( bytes_remaining , buf_size ) buf = src_file . read ( read_size ) if HAS_BUFFER : sys . stdout . buffer . write ( buf ) else : sys . stdout . write ( ubinascii . hexlify ( buf ) ) bytes_remaining -= read_size # Wait for an ack so we don't get ahead of the remote while True : char = sys . stdin . read ( 1 ) if char : if char == '\x06' : break # This should only happen if an error occurs sys . stdout . write ( char ) return True except : return False
Function which runs on the pyboard . Matches up with recv_file_from_remote .
228
21
228,032
def print_cols ( words , print_func , termwidth = 79 ) : width = max ( [ word_len ( word ) for word in words ] ) nwords = len ( words ) ncols = max ( 1 , ( termwidth + 1 ) // ( width + 1 ) ) nrows = ( nwords + ncols - 1 ) // ncols for row in range ( nrows ) : for i in range ( row , nwords , nrows ) : word = words [ i ] if word [ 0 ] == '\x1b' : print_func ( '%-*s' % ( width + 11 , words [ i ] ) , end = '\n' if i + nrows >= nwords else ' ' ) else : print_func ( '%-*s' % ( width , words [ i ] ) , end = '\n' if i + nrows >= nwords else ' ' )
Takes a single column of words and prints it as multiple columns that will fit in termwidth columns .
200
21
228,033
def print_long ( filename , stat , print_func ) : size = stat_size ( stat ) mtime = stat_mtime ( stat ) file_mtime = time . localtime ( mtime ) curr_time = time . time ( ) if mtime > ( curr_time + SIX_MONTHS ) or mtime < ( curr_time - SIX_MONTHS ) : print_func ( '%6d %s %2d %04d %s' % ( size , MONTH [ file_mtime [ 1 ] ] , file_mtime [ 2 ] , file_mtime [ 0 ] , decorated_filename ( filename , stat ) ) ) else : print_func ( '%6d %s %2d %02d:%02d %s' % ( size , MONTH [ file_mtime [ 1 ] ] , file_mtime [ 2 ] , file_mtime [ 3 ] , file_mtime [ 4 ] , decorated_filename ( filename , stat ) ) )
Prints detailed information about the file passed in .
226
10
228,034
def connect ( port , baud = 115200 , user = 'micro' , password = 'python' , wait = 0 ) : try : ip_address = socket . gethostbyname ( port ) #print('Connecting to ip', ip_address) connect_telnet ( port , ip_address , user = user , password = password ) except socket . gaierror : # Doesn't look like a hostname or IP-address, assume its a serial port #print('connecting to serial', port) connect_serial ( port , baud = baud , wait = wait )
Tries to connect automagically via network or serial .
125
11
228,035
def connect_telnet ( name , ip_address = None , user = 'micro' , password = 'python' ) : if ip_address is None : try : ip_address = socket . gethostbyname ( name ) except socket . gaierror : ip_address = name if not QUIET : if name == ip_address : print ( 'Connecting to (%s) ...' % ip_address ) else : print ( 'Connecting to %s (%s) ...' % ( name , ip_address ) ) dev = DeviceNet ( name , ip_address , user , password ) add_device ( dev )
Connect to a MicroPython board via telnet .
135
10
228,036
def connect_serial ( port , baud = 115200 , wait = 0 ) : if not QUIET : print ( 'Connecting to %s (buffer-size %d)...' % ( port , BUFFER_SIZE ) ) try : dev = DeviceSerial ( port , baud , wait ) except DeviceError as err : sys . stderr . write ( str ( err ) ) sys . stderr . write ( '\n' ) return False add_device ( dev ) return True
Connect to a MicroPython board via a serial port .
106
11
228,037
def main ( ) : save_settings = None stdin_fd = - 1 try : import termios stdin_fd = sys . stdin . fileno ( ) save_settings = termios . tcgetattr ( stdin_fd ) except : pass try : real_main ( ) finally : if save_settings : termios . tcsetattr ( stdin_fd , termios . TCSANOW , save_settings )
This main function saves the stdin termios settings calls real_main and restores stdin termios settings when it returns .
93
25
228,038
def close ( self ) : if self . pyb and self . pyb . serial : self . pyb . serial . close ( ) self . pyb = None
Closes the serial port .
35
6
228,039
def is_root_path ( self , filename ) : test_filename = filename + '/' for root_dir in self . root_dirs : if test_filename . startswith ( root_dir ) : return True return False
Determines if filename corresponds to a directory on this device .
50
13
228,040
def read ( self , num_bytes ) : self . check_pyb ( ) try : return self . pyb . serial . read ( num_bytes ) except ( serial . serialutil . SerialException , TypeError ) : # Write failed - assume that we got disconnected self . close ( ) raise DeviceError ( 'serial port %s closed' % self . dev_name_short )
Reads data from the pyboard over the serial port .
82
12
228,041
def remote ( self , func , * args , xfer_func = None , * * kwargs ) : global HAS_BUFFER HAS_BUFFER = self . has_buffer if hasattr ( func , 'extra_funcs' ) : func_name = func . name func_lines = [ ] for extra_func in func . extra_funcs : func_lines += inspect . getsource ( extra_func ) . split ( '\n' ) func_lines += [ '' ] func_lines += filter ( lambda line : line [ : 1 ] != '@' , func . source . split ( '\n' ) ) func_src = '\n' . join ( func_lines ) else : func_name = func . __name__ func_src = inspect . getsource ( func ) args_arr = [ remote_repr ( i ) for i in args ] kwargs_arr = [ "{}={}" . format ( k , remote_repr ( v ) ) for k , v in kwargs . items ( ) ] func_src += 'output = ' + func_name + '(' func_src += ', ' . join ( args_arr + kwargs_arr ) func_src += ')\n' func_src += 'if output is None:\n' func_src += ' print("None")\n' func_src += 'else:\n' func_src += ' print(output)\n' time_offset = self . time_offset if self . adjust_for_timezone : time_offset -= time . localtime ( ) . tm_gmtoff func_src = func_src . replace ( 'TIME_OFFSET' , '{}' . format ( time_offset ) ) func_src = func_src . replace ( 'HAS_BUFFER' , '{}' . format ( HAS_BUFFER ) ) func_src = func_src . replace ( 'BUFFER_SIZE' , '{}' . format ( BUFFER_SIZE ) ) func_src = func_src . replace ( 'IS_UPY' , 'True' ) if DEBUG : print ( '----- About to send %d bytes of code to the pyboard -----' % len ( func_src ) ) print ( func_src ) print ( '-----' ) self . check_pyb ( ) try : self . pyb . enter_raw_repl ( ) self . check_pyb ( ) output = self . pyb . exec_raw_no_follow ( func_src ) if xfer_func : xfer_func ( self , * args , * * kwargs ) self . check_pyb ( ) output , _ = self . pyb . follow ( timeout = 20 ) self . check_pyb ( ) self . pyb . exit_raw_repl ( ) except ( serial . serialutil . SerialException , TypeError ) : self . close ( ) raise DeviceError ( 'serial port %s closed' % self . dev_name_short ) if DEBUG : print ( '-----Response-----' ) print ( output ) print ( '-----' ) return output
Calls func with the indicated args on the micropython board .
678
14
228,042
def sync_time ( self ) : now = time . localtime ( time . time ( ) ) self . remote ( set_time , ( now . tm_year , now . tm_mon , now . tm_mday , now . tm_wday + 1 , now . tm_hour , now . tm_min , now . tm_sec , 0 ) ) return now
Sets the time on the pyboard to match the time on the host .
88
16
228,043
def write ( self , buf ) : self . check_pyb ( ) try : return self . pyb . serial . write ( buf ) except ( serial . serialutil . SerialException , BrokenPipeError , TypeError ) : # Write failed - assume that we got disconnected self . close ( ) raise DeviceError ( '{} closed' . format ( self . dev_name_short ) )
Writes data to the pyboard over the serial port .
84
12
228,044
def timeout ( self , value ) : self . check_pyb ( ) try : self . pyb . serial . timeout = value except : # timeout is a property so it calls code, and that can fail # if the serial port is closed. pass
Sets the timeout associated with the serial port .
53
10
228,045
def onecmd ( self , line ) : if DEBUG : print ( 'Executing "%s"' % line ) self . line_num += 1 if line == "EOF" or line == 'exit' : if cmd . Cmd . use_rawinput : # This means that we printed a prompt, and we'll want to # print a newline to pretty things up for the caller. self . print ( '' ) return True # Strip comments comment_idx = line . find ( "#" ) if comment_idx >= 0 : line = line [ 0 : comment_idx ] line = line . strip ( ) # search multiple commands on the same line lexer = shlex . shlex ( line ) lexer . whitespace = '' for issemicolon , group in itertools . groupby ( lexer , lambda x : x == ";" ) : if not issemicolon : self . onecmd_exec ( "" . join ( group ) )
Override onecmd .
205
4
228,046
def print ( self , * args , end = '\n' , file = None ) : if file is None : file = self . stdout s = ' ' . join ( str ( arg ) for arg in args ) + end file . write ( s )
Convenience function so you don t need to remember to put the \ n at the end of the line .
55
23
228,047
def filename_complete ( self , text , line , begidx , endidx ) : try : return self . real_filename_complete ( text , line , begidx , endidx ) except : traceback . print_exc ( )
Wrapper for catching exceptions since cmd seems to silently absorb them .
53
13
228,048
def directory_complete ( self , text , line , begidx , endidx ) : return [ filename for filename in self . filename_complete ( text , line , begidx , endidx ) if filename [ - 1 ] == '/' ]
Figure out what directories match the completion .
54
8
228,049
def line_to_args ( self , line ) : # Note: using shlex.split causes quoted substrings to stay together. args = shlex . split ( line ) self . redirect_filename = '' self . redirect_dev = None redirect_index = - 1 if '>' in args : redirect_index = args . index ( '>' ) elif '>>' in args : redirect_index = args . index ( '>>' ) if redirect_index >= 0 : if redirect_index + 1 >= len ( args ) : raise ShellError ( "> requires a filename" ) self . redirect_filename = resolve_path ( args [ redirect_index + 1 ] ) rmode = auto ( get_mode , os . path . dirname ( self . redirect_filename ) ) if not mode_isdir ( rmode ) : raise ShellError ( "Unable to redirect to '%s', directory doesn't exist" % self . redirect_filename ) if args [ redirect_index ] == '>' : self . redirect_mode = 'w' if DEBUG : print ( 'Redirecting (write) to' , self . redirect_filename ) else : self . redirect_mode = 'a' if DEBUG : print ( 'Redirecting (append) to' , self . redirect_filename ) self . redirect_dev , self . redirect_filename = get_dev_and_path ( self . redirect_filename ) try : if self . redirect_dev is None : self . stdout = SmartFile ( open ( self . redirect_filename , self . redirect_mode ) ) else : # Redirecting to a remote device. We collect the results locally # and copy them to the remote device at the end of the command. self . stdout = SmartFile ( tempfile . TemporaryFile ( mode = 'w+' ) ) except OSError as err : raise ShellError ( err ) del args [ redirect_index + 1 ] del args [ redirect_index ] curr_cmd , _ , _ = self . parseline ( self . lastcmd ) parser = self . create_argparser ( curr_cmd ) if parser : args = parser . parse_args ( args ) return args
This will convert the line passed into the do_xxx functions into an array of arguments and handle the Output Redirection Operator .
471
25
228,050
def do_cat ( self , line ) : # note: when we get around to supporting cat from stdin, we'll need # to write stdin to a temp file, and then copy the file # since we need to know the filesize when copying to the pyboard. args = self . line_to_args ( line ) for filename in args : filename = resolve_path ( filename ) mode = auto ( get_mode , filename ) if not mode_exists ( mode ) : print_err ( "Cannot access '%s': No such file" % filename ) continue if not mode_isfile ( mode ) : print_err ( "'%s': is not a file" % filename ) continue cat ( filename , self . stdout )
cat FILENAME ...
159
5
228,051
def do_echo ( self , line ) : args = self . line_to_args ( line ) self . print ( * args )
echo TEXT ...
29
3
228,052
def do_mkdir ( self , line ) : args = self . line_to_args ( line ) for filename in args : filename = resolve_path ( filename ) if not mkdir ( filename ) : print_err ( 'Unable to create %s' % filename )
mkdir DIRECTORY ...
59
5
228,053
def repl_serial_to_stdout ( self , dev ) : with self . serial_reader_running : try : save_timeout = dev . timeout # Set a timeout so that the read returns periodically with no data # and allows us to check whether the main thread wants us to quit. dev . timeout = 1 while not self . quit_serial_reader : try : char = dev . read ( 1 ) except serial . serialutil . SerialException : # This happens if the pyboard reboots, or a USB port # goes away. return except TypeError : # This is a bug in serialposix.py starting with python 3.3 # which causes a TypeError during the handling of the # select.error. So we treat this the same as # serial.serialutil.SerialException: return except ConnectionResetError : # This happens over a telnet session, if it resets return if not char : # This means that the read timed out. We'll check the quit # flag and return if needed if self . quit_when_no_output : break continue self . stdout . write ( char ) self . stdout . flush ( ) dev . timeout = save_timeout except DeviceError : # The device is no longer present. return
Runs as a thread which has a sole purpose of readding bytes from the serial port and writing them to stdout . Used by do_repl .
262
31
228,054
def set_status ( self , value ) : if not self . _status == value : old = self . _status self . _status = value logger . info ( "{} changing status from {} to {}" . format ( self , old . name , value . name ) ) self . _statusChanged ( old , value )
Set the status of the motor to the specified value if not already set .
68
15
228,055
def stop ( self , * args , * * kwargs ) : if self . status in ( Status . stopping , Status . stopped ) : logger . debug ( "{} is already {}" . format ( self , self . status . name ) ) else : self . status = Status . stopping self . onStopping ( * args , * * kwargs ) self . status = Status . stopped
Set the status to Status . stopping and also call onStopping with the provided args and kwargs .
83
22
228,056
def next_primary_replica_name_for_master ( self , node_reg , node_ids ) : name = self . _next_primary_node_name_for_master ( node_reg , node_ids ) return name , Replica . generateName ( nodeName = name , instId = 0 )
Returns name and corresponding instance name of the next node which is supposed to be a new Primary . In fact it is not round - robin on this abstraction layer as currently the primary of master instance is pointed directly depending on view number instance id and total number of nodes . But since the view number is incremented by 1 before primary selection then current approach may be treated as round robin .
69
78
228,057
def next_primary_replica_name_for_backup ( self , instance_id , master_primary_rank , primaries , node_reg , node_ids ) : if node_reg is None : node_reg = self . node . nodeReg total_nodes = len ( node_reg ) rank = ( master_primary_rank + 1 ) % total_nodes name = self . node . get_name_by_rank ( rank , node_reg , node_ids ) while name in primaries : rank = ( rank + 1 ) % total_nodes name = self . node . get_name_by_rank ( rank , node_reg , node_ids ) return name , Replica . generateName ( nodeName = name , instId = instance_id )
Returns name and corresponding instance name of the next node which is supposed to be a new Primary for backup instance in round - robin fashion starting from primary of master instance .
168
34
228,058
def process_selection ( self , instance_count , node_reg , node_ids ) : # Select primaries for current view_no if instance_count == 0 : return [ ] primaries = [ ] primary_rank = None for i in range ( instance_count ) : if i == 0 : primary_name = self . _next_primary_node_name_for_master ( node_reg , node_ids ) primary_rank = self . node . get_rank_by_name ( primary_name , node_reg , node_ids ) if primary_rank is None : raise LogicError ( 'primary_rank must not be None' ) else : primary_name , _ = self . next_primary_replica_name_for_backup ( i , primary_rank , primaries , node_reg , node_ids ) primaries . append ( primary_name ) logger . display ( "{} selected primary {} for instance {} (view {})" . format ( PRIMARY_SELECTION_PREFIX , primary_name , i , self . viewNo ) , extra = { "cli" : "ANNOUNCE" , "tags" : [ "node-election" ] } ) if len ( primaries ) != instance_count : raise LogicError ( 'instances inconsistency' ) if len ( primaries ) != len ( set ( primaries ) ) : raise LogicError ( 'repeating instances' ) return primaries
Build a set of names of primaries it is needed to avoid duplicates of primary nodes for different replicas .
300
22
228,059
def take_ordereds_out_of_turn ( self ) -> tuple : for replica in self . _replicas . values ( ) : yield replica . instId , replica . _remove_ordered_from_queue ( )
Takes all Ordered messages from outbox out of turn
48
12
228,060
def _new_replica ( self , instance_id : int , is_master : bool , bls_bft : BlsBft ) -> Replica : return self . _replica_class ( self . _node , instance_id , self . _config , is_master , bls_bft , self . _metrics )
Create a new replica with the specified parameters .
75
9
228,061
def acquire ( self ) : now = self . get_current_time ( ) logger . debug ( "now: {}, len(actionsLog): {}" . format ( now , len ( self . actionsLog ) ) ) self . _trimActionsLog ( now ) logger . debug ( "after trim, len(actionsLog): {}" . format ( len ( self . actionsLog ) ) ) if len ( self . actionsLog ) == 0 : self . actionsLog . append ( now ) logger . debug ( "len(actionsLog) was 0, after append, len(actionsLog):" " {}" . format ( len ( self . actionsLog ) ) ) return True , 0.0 timeToWaitAfterPreviousTry = self . delayFunction ( len ( self . actionsLog ) ) timePassed = now - self . actionsLog [ - 1 ] logger . debug ( "timeToWaitAfterPreviousTry: {}, timePassed: {}" . format ( timeToWaitAfterPreviousTry , timePassed ) ) if timeToWaitAfterPreviousTry < timePassed : self . actionsLog . append ( now ) logger . debug ( "timeToWaitAfterPreviousTry < timePassed was true, after " "append, len(actionsLog): {}" . format ( len ( self . actionsLog ) ) ) return True , 0.0 else : logger . debug ( "timeToWaitAfterPreviousTry < timePassed was false, " "len(actionsLog): {}" . format ( len ( self . actionsLog ) ) ) return False , timeToWaitAfterPreviousTry - timePassed
Acquires lock for action .
340
7
228,062
def lazy_field ( prop ) : @ property def wrapper ( self ) : if self not in _lazy_value_cache : _lazy_value_cache [ self ] = { } self_cache = _lazy_value_cache [ self ] if prop in self_cache : return self_cache [ prop ] prop_value = prop ( self ) self_cache [ prop ] = prop_value return prop_value return wrapper
Decorator which helps in creating lazy properties
93
9
228,063
def getRemote ( self , name : str = None , ha : HA = None ) : return self . findInRemotesByName ( name ) if name else self . findInRemotesByHA ( ha )
Find the remote by name or ha .
45
8
228,064
def findInRemotesByName ( self , name : str ) : remotes = [ r for r in self . remotes . values ( ) if r . name == name ] if len ( remotes ) > 1 : raise DuplicateRemotes ( remotes ) if not remotes : raise RemoteNotFound ( name ) return remotes [ 0 ]
Find the remote by name .
74
6
228,065
def removeRemoteByName ( self , name : str ) -> int : remote = self . getRemote ( name ) rid = remote . uid self . removeRemote ( remote ) return rid
Remove the remote by name .
39
6
228,066
def sameAddr ( self , ha , ha2 ) -> bool : if ha == ha2 : return True if ha [ 1 ] != ha2 [ 1 ] : return False return ha [ 0 ] in self . localips and ha2 [ 0 ] in self . localips
Check whether the two arguments correspond to the same address
58
10
228,067
def remotesByConnected ( self ) : conns , disconns = [ ] , [ ] for r in self . remotes . values ( ) : array = conns if self . isRemoteConnected ( r ) else disconns array . append ( r ) return conns , disconns
Partitions the remotes into connected and disconnected
65
9
228,068
def addIdentifier ( self , identifier = None , seed = None , signer = None , alias = None , didMethodName = None ) : dm = self . didMethods . get ( didMethodName ) signer = signer or dm . newSigner ( identifier = identifier , seed = seed ) self . idsToSigners [ signer . identifier ] = signer if self . defaultId is None : # setting this signer as default signer to let use sign* methods # without explicit specification of signer self . defaultId = signer . identifier if alias : signer . alias = alias if signer . alias : self . aliasesToIds [ signer . alias ] = signer . identifier return signer . identifier , signer
Adds signer to the wallet . Requires complete signer identifier or seed .
161
15
228,069
def requiredIdr ( self , idr : Identifier = None , alias : str = None ) : # TODO Need to create a new Identifier type that supports DIDs and CIDs if idr : if ':' in idr : idr = idr . split ( ':' ) [ 1 ] else : idr = self . aliasesToIds [ alias ] if alias else self . defaultId if not idr : raise EmptyIdentifier return idr
Checks whether signer identifier specified or can it be inferred from alias or can be default used instead
98
20
228,070
def signMsg ( self , msg : Dict , identifier : Identifier = None , otherIdentifier : Identifier = None ) : idr = self . requiredIdr ( idr = identifier or otherIdentifier ) signer = self . _signerById ( idr ) signature = signer . sign ( msg ) return signature
Creates signature for message using specified signer
70
9
228,071
def signRequest ( self , req : Request , identifier : Identifier = None ) -> Request : idr = self . requiredIdr ( idr = identifier or req . _identifier ) # idData = self._getIdData(idr) req . _identifier = idr req . reqId = req . gen_req_id ( ) # req.digest = req.getDigest() # QUESTION: `self.ids[idr]` would be overwritten if same identifier # is used to send 2 requests, why is `IdData` persisted? # self.ids[idr] = IdData(idData.signer, req.reqId) req . signature = self . signMsg ( msg = req . signingPayloadState ( identifier = idr ) , identifier = idr , otherIdentifier = req . identifier ) return req
Signs request . Modifies reqId and signature . May modify identifier .
185
15
228,072
def signOp ( self , op : Dict , identifier : Identifier = None ) -> Request : request = Request ( operation = op , protocolVersion = CURRENT_PROTOCOL_VERSION ) return self . signRequest ( request , identifier )
Signs the message if a signer is configured
52
10
228,073
def listIds ( self , exclude = list ( ) ) : lst = list ( self . aliasesToIds . keys ( ) ) others = set ( self . idsToSigners . keys ( ) ) - set ( self . aliasesToIds . values ( ) ) lst . extend ( list ( others ) ) for x in exclude : lst . remove ( x ) return lst
For each signer in this wallet return its alias if present else return its identifier .
85
17
228,074
def saveWallet ( self , wallet , fpath ) : if not fpath : raise ValueError ( "empty path" ) _fpath = self . _normalize ( fpath ) _dpath = _fpath . parent try : _dpath . relative_to ( self . _baseDir ) except ValueError : raise ValueError ( "path {} is not is not relative to the keyrings {}" . format ( fpath , self . _baseDir ) ) self . _createDirIfNotExists ( _dpath ) # ensure permissions from the bottom of the directory hierarchy while _dpath != self . _baseDir : self . _ensurePermissions ( _dpath , self . dmode ) _dpath = _dpath . parent with _fpath . open ( "w" ) as wf : self . _ensurePermissions ( _fpath , self . fmode ) encodedWallet = self . encode ( wallet ) wf . write ( encodedWallet ) logger . debug ( "stored wallet '{}' in {}" . format ( wallet . name , _fpath ) ) return str ( _fpath )
Save wallet into specified localtion .
243
7
228,075
def loadWallet ( self , fpath ) : if not fpath : raise ValueError ( "empty path" ) _fpath = self . _normalize ( fpath ) _dpath = _fpath . parent try : _dpath . relative_to ( self . _baseDir ) except ValueError : raise ValueError ( "path {} is not is not relative to the wallets {}" . format ( fpath , self . _baseDir ) ) with _fpath . open ( ) as wf : wallet = self . decode ( wf . read ( ) ) return wallet
Load wallet from specified localtion .
123
7
228,076
def addVote ( self , prepare : Prepare , voter : str ) -> None : self . _add_msg ( prepare , voter )
Add the specified PREPARE to this replica s list of received PREPAREs .
28
18
228,077
def addVote ( self , commit : Commit , voter : str ) -> None : super ( ) . _add_msg ( commit , voter )
Add the specified COMMIT to this replica s list of received COMMITs .
30
16
228,078
def discard ( self , msg , reason , logMethod = logging . error , cliOutput = False ) : reason = "" if not reason else " because {}" . format ( reason ) logMethod ( "{} discarding message {}{}" . format ( self , msg , reason ) , extra = { "cli" : cliOutput } )
Discard a message and log a reason using the specified logMethod .
72
14
228,079
def toDict ( self , msg : Dict ) -> Dict : if isinstance ( msg , Request ) : tmsg = msg . as_dict elif hasattr ( msg , "_asdict" ) : tmsg = dict ( msg . _asdict ( ) ) elif hasattr ( msg , "__dict__" ) : tmsg = dict ( msg . __dict__ ) elif self . allowDictOnly : raise ValueError ( "Message cannot be converted to an appropriate " "format for transmission" ) else : tmsg = msg return tmsg
Return a dictionary form of the message
121
7
228,080
def update_freshness ( self , ledger_id , ts ) : if ledger_id in self . _ledger_freshness : self . _ledger_freshness [ ledger_id ] . last_updated = ts
Updates the time at which the ledger was updated . Should be called whenever a txn for the ledger is ordered .
48
24
228,081
def get_last_update_time ( self ) : last_updated = { ledger_id : freshness_state . last_updated for ledger_id , freshness_state in self . _ledger_freshness . items ( ) } return OrderedDict ( sorted ( last_updated . items ( ) , key = lambda item : ( item [ 1 ] , item [ 0 ] ) ) )
Gets the time at which each ledger was updated . Can be called at any time to get this information .
86
22
228,082
def serialize ( self , obj , level = 0 , objname = None , topLevelKeysToIgnore = None , toBytes = True ) : res = None if not isinstance ( obj , acceptableTypes ) : error ( "invalid type found {}: {}" . format ( objname , obj ) ) elif isinstance ( obj , str ) : res = obj elif isinstance ( obj , dict ) : if level > 0 : keys = list ( obj . keys ( ) ) else : topLevelKeysToIgnore = topLevelKeysToIgnore or [ ] keys = [ k for k in obj . keys ( ) if k not in topLevelKeysToIgnore ] keys . sort ( ) strs = [ ] for k in keys : onm = "." . join ( [ str ( objname ) , str ( k ) ] ) if objname else k strs . append ( str ( k ) + ":" + self . serialize ( obj [ k ] , level + 1 , onm , toBytes = False ) ) res = "|" . join ( strs ) elif isinstance ( obj , Iterable ) : strs = [ ] for o in obj : strs . append ( self . serialize ( o , level + 1 , objname , toBytes = False ) ) res = "," . join ( strs ) elif obj is None : res = "" else : res = str ( obj ) # logger.trace("serialized msg {} into {}".format(obj, res)) if not toBytes : return res return res . encode ( 'utf-8' )
Create a string representation of the given object .
342
9
228,083
def reset ( self ) : self . getsCounter = 0 # dictionary of processed requests for each client. Value for each # client is a dictionary with request id as key and transaction id as # value self . processedRequests = { } # type: Dict[str, Dict[int, str]] # dictionary of responses to be sent for each client. Value for each # client is an asyncio Queue self . responses = { } # type: Dict[str, asyncio.Queue] # dictionary with key as transaction id and `Reply` as # value self . transactions = { }
Clear the values of all attributes of the transaction store .
124
11
228,084
def stop ( self , timeout : int = 5 ) -> None : self . running = False start = time . perf_counter ( ) while True : if self . getsCounter == 0 : return True elif time . perf_counter ( ) <= start + timeout : time . sleep ( .1 ) else : raise StopTimeout ( "Stop timed out waiting for {} gets to " "complete." . format ( self . getsCounter ) )
Try to stop the transaction store in the given timeout or raise an exception .
90
15
228,085
def addToProcessedTxns ( self , identifier : str , txnId : str , reply : Reply ) -> None : self . transactions [ txnId ] = reply if identifier not in self . processedRequests : self . processedRequests [ identifier ] = { } self . processedRequests [ identifier ] [ reply . reqId ] = txnId
Add a client request to the transaction store s list of processed requests .
76
14
228,086
async def append ( self , reply : Reply ) -> None : result = reply . result identifier = result . get ( f . IDENTIFIER . nm ) txnId = result . get ( TXN_ID ) logger . debug ( "Reply being sent {}" . format ( reply ) ) if self . _isNewTxn ( identifier , reply , txnId ) : self . addToProcessedTxns ( identifier , txnId , reply ) if identifier not in self . responses : self . responses [ identifier ] = asyncio . Queue ( ) await self . responses [ identifier ] . put ( reply )
Add the given Reply to this transaction store s list of responses . Also add to processedRequests if not added previously .
132
24
228,087
def _isNewTxn ( self , identifier , reply , txnId ) -> bool : return ( identifier not in self . processedRequests or reply . reqId not in self . processedRequests [ identifier ] ) and txnId is not None
If client is not in processedRequests or requestId is not there in processed requests and txnId is present then its a new reply
53
28
228,088
def add ( self , req : Request ) : key = req . key if key not in self : self [ key ] = ReqState ( req ) return self [ key ]
Add the specified request to this request store .
37
9
228,089
def ordered_by_replica ( self , request_key ) : state = self . get ( request_key ) if not state : return state . unordered_by_replicas_num -= 1
Should be called by each replica when request is ordered or replica is removed .
43
15
228,090
def mark_as_forwarded ( self , req : Request , to : int ) : self [ req . key ] . forwarded = True self [ req . key ] . forwardedTo = to self [ req . key ] . unordered_by_replicas_num = to
Works together with mark_as_executed and free methods .
58
13
228,091
def add_propagate ( self , req : Request , sender : str ) : data = self . add ( req ) data . propagates [ sender ] = req
Add the specified request to the list of received PROPAGATEs .
34
15
228,092
def votes ( self , req ) -> int : try : votes = len ( self [ req . key ] . propagates ) except KeyError : votes = 0 return votes
Get the number of propagates for a given reqId and identifier .
35
14
228,093
def mark_as_executed ( self , req : Request ) : state = self [ req . key ] state . executed = True self . _clean ( state )
Works together with mark_as_forwarded and free methods .
35
13
228,094
def free ( self , request_key ) : state = self . get ( request_key ) if not state : return state . forwardedTo -= 1 self . _clean ( state )
Works together with mark_as_forwarded and mark_as_executed methods .
38
18
228,095
def has_propagated ( self , req : Request , sender : str ) -> bool : return req . key in self and sender in self [ req . key ] . propagates
Check whether the request specified has already been propagated .
38
11
228,096
def propagate ( self , request : Request , clientName ) : if self . requests . has_propagated ( request , self . name ) : logger . trace ( "{} already propagated {}" . format ( self , request ) ) else : with self . metrics . measure_time ( MetricsName . SEND_PROPAGATE_TIME ) : self . requests . add_propagate ( request , self . name ) propagate = self . createPropagate ( request , clientName ) logger . debug ( "{} propagating request {} from client {}" . format ( self , request . key , clientName ) , extra = { "cli" : True , "tags" : [ "node-propagate" ] } ) self . send ( propagate )
Broadcast a PROPAGATE to all other nodes
160
11
228,097
def createPropagate ( request : Union [ Request , dict ] , client_name ) -> Propagate : if not isinstance ( request , ( Request , dict ) ) : logger . error ( "{}Request not formatted properly to create propagate" . format ( THREE_PC_PREFIX ) ) return logger . trace ( "Creating PROPAGATE for REQUEST {}" . format ( request ) ) request = request . as_dict if isinstance ( request , Request ) else request if isinstance ( client_name , bytes ) : client_name = client_name . decode ( ) return Propagate ( request , client_name )
Create a new PROPAGATE for the given REQUEST .
133
13
228,098
def forward ( self , request : Request ) : key = request . key num_replicas = self . replicas . num_replicas logger . debug ( '{} forwarding request {} to {} replicas' . format ( self , key , num_replicas ) ) self . replicas . pass_message ( ReqKey ( key ) ) self . monitor . requestUnOrdered ( key ) self . requests . mark_as_forwarded ( request , num_replicas )
Forward the specified client REQUEST to the other replicas on this node
102
14
228,099
def recordAndPropagate ( self , request : Request , clientName ) : self . requests . add ( request ) self . propagate ( request , clientName ) self . tryForwarding ( request )
Record the request in the list of requests and propagate .
41
11