idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
40,200
def verify ( self , signature , msg ) : if not self . key : return False try : self . key . verify ( signature + msg ) except ValueError : return False return True
Verify the message
40,201
def encrypt ( self , plaintext , nonce , encoder = encoding . RawEncoder ) : if len ( nonce ) != self . NONCE_SIZE : raise ValueError ( "The nonce must be exactly %s bytes long" % self . NONCE_SIZE ) ciphertext = libnacl . crypto_box_afternm ( plaintext , nonce , self . _shared_key , ) encoded_nonce = encoder . encode ( nonce ) encoded_ciphertext = encoder . encode ( ciphertext ) return EncryptedMessage . _from_parts ( encoded_nonce , encoded_ciphertext , encoder . encode ( nonce + ciphertext ) , )
Encrypts the plaintext message using the given nonce and returns the ciphertext encoded with the encoder .
40,202
def decrypt ( self , ciphertext , nonce = None , encoder = encoding . RawEncoder ) : ciphertext = encoder . decode ( ciphertext ) if nonce is None : nonce = ciphertext [ : self . NONCE_SIZE ] ciphertext = ciphertext [ self . NONCE_SIZE : ] if len ( nonce ) != self . NONCE_SIZE : raise ValueError ( "The nonce must be exactly %s bytes long" % self . NONCE_SIZE ) plaintext = libnacl . crypto_box_open_afternm ( ciphertext , nonce , self . _shared_key , ) return plaintext
Decrypts the ciphertext using the given nonce and returns the plaintext message .
40,203
def decrypt ( self , cipher , nonce , pubkey , dehex = False ) : if not isinstance ( pubkey , PublicKey ) : if len ( pubkey ) == 32 : pubkey = PublicKey ( pubkey , encoding . RawEncoder ) else : pubkey = PublicKey ( pubkey , encoding . HexEncoder ) box = Box ( self . key , pubkey ) decoder = encoding . HexEncoder if dehex else encoding . RawEncoder if dehex and len ( nonce ) != box . NONCE_SIZE : nonce = decoder . decode ( nonce ) return box . decrypt ( cipher , nonce , decoder )
Return decrypted msg contained in cypher using nonce and shared key generated from . key and pubkey . If pubkey is hex encoded it is converted first If dehex is True then use HexEncoder otherwise use RawEncoder
40,204
def getInstalledConfig ( installDir , configFile ) : configPath = os . path . join ( installDir , configFile ) if not os . path . exists ( configPath ) : raise FileNotFoundError ( "No file found at location {}" . format ( configPath ) ) spec = spec_from_file_location ( configFile , configPath ) config = module_from_spec ( spec ) spec . loader . exec_module ( config ) return config
Reads config from the installation directory of Plenum .
40,205
def _getConfig ( general_config_dir : str = None ) : stp_config = STPConfig ( ) plenum_config = import_module ( "plenum.config" ) config = stp_config config . __dict__ . update ( plenum_config . __dict__ ) if general_config_dir : config . GENERAL_CONFIG_DIR = general_config_dir if not config . GENERAL_CONFIG_DIR : raise Exception ( 'GENERAL_CONFIG_DIR must be set' ) extend_with_external_config ( config , ( config . GENERAL_CONFIG_DIR , config . GENERAL_CONFIG_FILE ) ) if not hasattr ( config , 'unsafe' ) : setattr ( config , 'unsafe' , set ( ) ) return config
Reads a file called config . py in the project directory
40,206
def getFunc ( self , o : Any ) -> Callable : for cls , func in self . routes . items ( ) : if isinstance ( o , cls ) : return func logger . error ( "Unhandled msg {}, available handlers are:" . format ( o ) ) for cls in self . routes . keys ( ) : logger . error ( " {}" . format ( cls ) ) raise RuntimeError ( "unhandled msg: {}" . format ( o ) )
Get the next function from the list of routes that is capable of processing o s type .
40,207
def handleSync ( self , msg : Any ) -> Any : if isinstance ( msg , tuple ) and len ( msg ) == 2 and not hasattr ( msg , '_field_types' ) : return self . getFunc ( msg [ 0 ] ) ( * msg ) else : return self . getFunc ( msg ) ( msg )
Pass the message as an argument to the function defined in routes . If the msg is a tuple pass the values as multiple arguments to the function .
40,208
async def handle ( self , msg : Any ) -> Any : res = self . handleSync ( msg ) if isawaitable ( res ) : return await res else : return res
Handle both sync and async functions .
40,209
async def handleAll ( self , deq : deque , limit = None ) -> int : count = 0 while deq and ( not limit or count < limit ) : count += 1 item = deq . popleft ( ) await self . handle ( item ) return count
Handle all items in a deque . Can call asynchronous handlers .
40,210
def handleAllSync ( self , deq : deque , limit = None ) -> int : count = 0 while deq and ( not limit or count < limit ) : count += 1 msg = deq . popleft ( ) self . handleSync ( msg ) return count
Synchronously handle all items in a deque .
40,211
def load ( self , other : merkle_tree . MerkleTree ) : self . _update ( other . tree_size , other . hashes )
Load this tree from a dumb data object for serialisation .
40,212
def save ( self , other : merkle_tree . MerkleTree ) : other . __tree_size = self . __tree_size other . __hashes = self . __hashes
Save this tree into a dumb data object for serialisation .
40,213
def append ( self , new_leaf : bytes ) -> List [ bytes ] : auditPath = list ( reversed ( self . __hashes ) ) self . _push_subtree ( [ new_leaf ] ) return auditPath
Append a new leaf onto the end of this tree and return the audit path
40,214
def extend ( self , new_leaves : List [ bytes ] ) : size = len ( new_leaves ) final_size = self . tree_size + size idx = 0 while True : max_h = self . __mintree_height max_size = 1 << ( max_h - 1 ) if max_h > 0 else 0 if max_h > 0 and size - idx >= max_size : self . _push_subtree ( new_leaves [ idx : idx + max_size ] ) idx += max_size else : break if idx < size : root_hash , hashes = self . __hasher . _hash_full ( new_leaves , idx , size ) self . _update ( final_size , self . hashes + hashes ) assert self . tree_size == final_size
Extend this tree with new_leaves on the end .
40,215
def extended ( self , new_leaves : List [ bytes ] ) : new_tree = self . __copy__ ( ) new_tree . extend ( new_leaves ) return new_tree
Returns a new tree equal to this tree extended with new_leaves .
40,216
def verify_consistency ( self , expected_leaf_count ) -> bool : if expected_leaf_count != self . leafCount : raise ConsistencyVerificationFailed ( ) if self . get_expected_node_count ( self . leafCount ) != self . nodeCount : raise ConsistencyVerificationFailed ( ) return True
Check that the tree has same leaf count as expected and the number of nodes are also as expected
40,217
def isHex ( val : str ) -> bool : if isinstance ( val , bytes ) : try : val = val . decode ( ) except ValueError : return False return isinstance ( val , str ) and all ( c in string . hexdigits for c in val )
Return whether the given str represents a hex value or not
40,218
def _accumulate ( self , old_accum , next_val ) : return old_accum * ( 1 - self . alpha ) + next_val * self . alpha
Implement exponential moving average
40,219
def getNodePosition ( cls , start , height = None ) -> int : pwr = highest_bit_set ( start ) - 1 height = height or pwr if count_bits_set ( start ) == 1 : adj = height - pwr return start - 1 + adj else : c = pow ( 2 , pwr ) return cls . getNodePosition ( c , pwr ) + cls . getNodePosition ( start - c , height )
Calculates node position based on start and height
40,220
def getPath ( cls , seqNo , offset = 0 ) : if offset >= seqNo : raise ValueError ( "Offset should be less than serial number" ) pwr = highest_bit_set ( seqNo - 1 - offset ) - 1 if pwr <= 0 : if seqNo % 2 == 0 : return [ seqNo - 1 ] , [ ] else : return [ ] , [ ] c = pow ( 2 , pwr ) + offset leafs , nodes = cls . getPath ( seqNo , c ) nodes . append ( cls . getNodePosition ( c , pwr ) ) return leafs , nodes
Get the audit path of the leaf at the position specified by serNo .
40,221
def readNodeByTree ( self , start , height = None ) : pos = self . getNodePosition ( start , height ) return self . readNode ( pos )
Fetches nodeHash based on start leaf and height of the node in the tree .
40,222
def is_consistent ( self ) -> bool : from ledger . compact_merkle_tree import CompactMerkleTree return self . nodeCount == CompactMerkleTree . get_expected_node_count ( self . leafCount )
Returns True if number of nodes are consistent with number of leaves
40,223
def _startNextChunk ( self ) -> None : if self . currentChunk is None : self . _useLatestChunk ( ) else : self . _useChunk ( self . currentChunkIndex + self . chunkSize )
Close current and start next chunk
40,224
def get ( self , key ) -> str : chunk_no , offset = self . _get_key_location ( key ) with self . _openChunk ( chunk_no ) as chunk : return chunk . get ( str ( offset ) )
Determines the file to retrieve the data from and retrieves the data .
40,225
def reset ( self ) -> None : self . close ( ) for f in os . listdir ( self . dataDir ) : os . remove ( os . path . join ( self . dataDir , f ) ) self . _useLatestChunk ( )
Clear all data in file storage .
40,226
def _listChunks ( self ) : chunks = [ ] for fileName in os . listdir ( self . dataDir ) : index = ChunkedFileStore . _fileNameToChunkIndex ( fileName ) if index is not None : chunks . append ( index ) return sorted ( chunks )
Lists stored chunks
40,227
def filterMsgs ( self , wrappedMsgs : deque ) -> deque : filtered = deque ( ) while wrappedMsgs : wrappedMsg = wrappedMsgs . popleft ( ) msg , sender = wrappedMsg if hasattr ( msg , f . VIEW_NO . nm ) : reqViewNo = getattr ( msg , f . VIEW_NO . nm ) if reqViewNo == self . viewNo : filtered . append ( wrappedMsg ) else : self . discard ( wrappedMsg , "its view no {} is less than the elector's {}" . format ( reqViewNo , self . viewNo ) , logger . debug ) else : filtered . append ( wrappedMsg ) return filtered
Filters messages by view number so that only the messages that have the current view number are retained .
40,228
def get ( self , name ) : try : return self . plugins [ name ] except KeyError : raise RuntimeError ( "plugin {} does not exist" . format ( name ) )
Retrieve a plugin by name .
40,229
def cmp ( cls , v1 : 'VersionBase' , v2 : 'VersionBase' ) -> int : if v1 . _version > v2 . _version : return 1 elif v1 . _version == v2 . _version : return 0 else : return - 1
Compares two instances .
40,230
def maintainConnections ( self , force = False ) : now = time . perf_counter ( ) if now < self . nextCheck and not force : return False self . nextCheck = now + ( self . config . RETRY_TIMEOUT_NOT_RESTRICTED if self . isKeySharing else self . config . RETRY_TIMEOUT_RESTRICTED ) missing = self . connectToMissing ( ) self . retryDisconnected ( exclude = missing ) logger . trace ( "{} next check for retries in {:.2f} seconds" . format ( self , self . nextCheck - now ) ) return True
Ensure appropriate connections .
40,231
def connectToMissing ( self ) -> set : missing = self . reconcileNodeReg ( ) if not missing : return missing logger . info ( "{}{} found the following missing connections: {}" . format ( CONNECTION_PREFIX , self , ", " . join ( missing ) ) ) for name in missing : try : self . connect ( name , ha = self . registry [ name ] ) except ( ValueError , KeyError , PublicKeyNotFoundOnDisk , VerKeyNotFoundOnDisk ) as ex : logger . warning ( '{}{} cannot connect to {} due to {}' . format ( CONNECTION_PREFIX , self , name , ex ) ) return missing
Try to connect to the missing nodes
40,232
def emit ( self , record ) : should_cb = None attr_val = None if hasattr ( record , self . typestr ) : attr_val = getattr ( record , self . typestr ) should_cb = bool ( attr_val ) if should_cb is None and record . levelno >= logging . INFO : should_cb = True if hasattr ( record , 'tags' ) : for t in record . tags : if t in self . tags : if self . tags [ t ] : should_cb = True continue else : should_cb = False break if should_cb : self . callback ( record , attr_val )
Passes the log record back to the CLI for rendering
40,233
def conns ( self , value : Set [ str ] ) -> None : if not self . _conns == value : old = self . _conns self . _conns = value ins = value - old outs = old - value logger . display ( "{}'s connections changed from {} to {}" . format ( self , old , value ) ) self . _connsChanged ( ins , outs )
Updates the connection count of this node if not already done .
40,234
def findInNodeRegByHA ( self , remoteHa ) : regName = [ nm for nm , ha in self . registry . items ( ) if self . sameAddr ( ha , remoteHa ) ] if len ( regName ) > 1 : raise RuntimeError ( "more than one node registry entry with the " "same ha {}: {}" . format ( remoteHa , regName ) ) if regName : return regName [ 0 ] return None
Returns the name of the remote by HA if found in the node registry else returns None
40,235
def getRemoteName ( self , remote ) : if remote . name not in self . registry : find = [ name for name , ha in self . registry . items ( ) if ha == remote . ha ] assert len ( find ) == 1 return find [ 0 ] return remote . name
Returns the name of the remote object if found in node registry .
40,236
def notConnectedNodes ( self ) -> Set [ str ] : return set ( self . registry . keys ( ) ) - self . conns
Returns the names of nodes in the registry this node is NOT connected to .
40,237
def start ( self ) : self . zap_socket = self . context . socket ( zmq . REP ) self . zap_socket . linger = 1 zapLoc = 'inproc://zeromq.zap.{}' . format ( MultiZapAuthenticator . count ) self . zap_socket . bind ( zapLoc ) self . log . debug ( 'Starting ZAP at {}' . format ( zapLoc ) )
Create and bind the ZAP socket
40,238
def stop ( self ) : if self . zap_socket : self . log . debug ( 'Stopping ZAP at {}' . format ( self . zap_socket . LAST_ENDPOINT ) ) super ( ) . stop ( )
Close the ZAP socket
40,239
def start ( self ) : super ( ) . start ( ) self . __poller = zmq . asyncio . Poller ( ) self . __poller . register ( self . zap_socket , zmq . POLLIN ) self . __task = asyncio . ensure_future ( self . __handle_zap ( ) )
Start ZAP authentication
40,240
def stop ( self ) : if self . __task : self . __task . cancel ( ) if self . __poller : self . __poller . unregister ( self . zap_socket ) self . __poller = None super ( ) . stop ( )
Stop ZAP authentication
40,241
def randomString ( size : int = 20 ) -> str : def randomStr ( size ) : if not isinstance ( size , int ) : raise PlenumTypeError ( 'size' , size , int ) if not size > 0 : raise PlenumValueError ( 'size' , size , '> 0' ) rv = randombytes ( size // 2 ) . hex ( ) return rv if size % 2 == 0 else rv + hex ( randombytes_uniform ( 15 ) ) [ - 1 ] return randomStr ( size )
Generate a random string in hex of the specified size
40,242
def mostCommonElement ( elements : Iterable [ T ] , to_hashable_f : Callable = None ) : class _Hashable ( collections . abc . Hashable ) : def __init__ ( self , orig ) : self . orig = orig if isinstance ( orig , collections . Hashable ) : self . hashable = orig elif to_hashable_f is not None : self . hashable = to_hashable_f ( orig ) else : self . hashable = json . dumps ( orig , sort_keys = True ) def __eq__ ( self , other ) : return self . hashable == other . hashable def __hash__ ( self ) : return hash ( self . hashable ) _elements = ( _Hashable ( el ) for el in elements ) most_common , counter = Counter ( _elements ) . most_common ( n = 1 ) [ 0 ] return most_common . orig , counter
Find the most frequent element of a collection .
40,243
def objSearchReplace ( obj : Any , toFrom : Dict [ Any , Any ] , checked : Set [ Any ] = None , logMsg : str = None , deepLevel : int = None ) -> None : if checked is None : checked = set ( ) checked . add ( id ( obj ) ) pairs = [ ( i , getattr ( obj , i ) ) for i in dir ( obj ) if not i . startswith ( "__" ) ] if isinstance ( obj , Mapping ) : pairs += [ x for x in iteritems ( obj ) ] elif isinstance ( obj , ( Sequence , Set ) ) and not isinstance ( obj , string_types ) : pairs += [ x for x in enumerate ( obj ) ] for nm , o in pairs : if id ( o ) not in checked : mutated = False for old , new in toFrom . items ( ) : if id ( o ) == id ( old ) : logging . debug ( "{}in object {}, attribute {} changed from {} to {}" . format ( logMsg + ": " if logMsg else "" , obj , nm , old , new ) ) if isinstance ( obj , dict ) : obj [ nm ] = new else : setattr ( obj , nm , new ) mutated = True if not mutated : if deepLevel is not None and deepLevel == 0 : continue objSearchReplace ( o , toFrom , checked , logMsg , deepLevel - 1 if deepLevel is not None else deepLevel ) checked . remove ( id ( obj ) )
Search for an attribute in an object and replace it with another .
40,244
async def runall ( corogen ) : results = [ ] for c in corogen : result = await c results . append ( result ) return results
Run an array of coroutines
40,245
def prime_gen ( ) -> int : D = { } yield 2 for q in itertools . islice ( itertools . count ( 3 ) , 0 , None , 2 ) : p = D . pop ( q , None ) if p is None : D [ q * q ] = 2 * q yield q else : x = p + q while x in D : x += p D [ x ] = p
A generator for prime numbers starting from 2 .
40,246
async def untilTrue ( condition , * args , timeout = 5 ) -> bool : result = False start = time . perf_counter ( ) elapsed = 0 while elapsed < timeout : result = condition ( * args ) if result : break await asyncio . sleep ( .1 ) elapsed = time . perf_counter ( ) - start return result
Keep checking the condition till it is true or a timeout is reached
40,247
def transmitToClient ( self , msg : Any , remoteName : str ) : payload = self . prepForSending ( msg ) try : if isinstance ( remoteName , str ) : remoteName = remoteName . encode ( ) self . send ( payload , remoteName ) except Exception as ex : logger . error ( "{}{} unable to send message {} to client {}; Exception: {}" . format ( CONNECTION_PREFIX , self , msg , remoteName , ex . __repr__ ( ) ) )
Transmit the specified message to the remote client specified by remoteName .
40,248
def _has_valid_catchup_replies ( self , seq_no : int , txns_to_process : List [ Tuple [ int , Any ] ] ) -> Tuple [ bool , str , int ] : assert seq_no == txns_to_process [ 0 ] [ 0 ] node_name , catchup_rep = self . _find_catchup_reply_for_seq_no ( seq_no ) txns = catchup_rep . txns txns = [ self . _provider . transform_txn_for_ledger ( txn ) for s , txn in txns_to_process [ : len ( txns ) ] if str ( s ) in txns ] temp_tree = self . _ledger . treeWithAppliedTxns ( txns ) proof = catchup_rep . consProof final_size = self . _catchup_till . final_size final_hash = self . _catchup_till . final_hash try : logger . info ( "{} verifying proof for {}, {}, {}, {}, {}" . format ( self , temp_tree . tree_size , final_size , temp_tree . root_hash , final_hash , proof ) ) verified = self . _provider . verifier ( self . _ledger_id ) . verify_tree_consistency ( temp_tree . tree_size , final_size , temp_tree . root_hash , Ledger . strToHash ( final_hash ) , [ Ledger . strToHash ( p ) for p in proof ] ) except Exception as ex : logger . info ( "{} could not verify catchup reply {} since {}" . format ( self , catchup_rep , ex ) ) verified = False return bool ( verified ) , node_name , len ( txns )
Transforms transactions for ledger!
40,249
def _parse_pool_transaction_file ( ledger , nodeReg , cliNodeReg , nodeKeys , activeValidators , ledger_size = None ) : for _ , txn in ledger . getAllTxn ( to = ledger_size ) : if get_type ( txn ) == NODE : txn_data = get_payload_data ( txn ) nodeName = txn_data [ DATA ] [ ALIAS ] clientStackName = nodeName + CLIENT_STACK_SUFFIX nHa = ( txn_data [ DATA ] [ NODE_IP ] , txn_data [ DATA ] [ NODE_PORT ] ) if ( NODE_IP in txn_data [ DATA ] and NODE_PORT in txn_data [ DATA ] ) else None cHa = ( txn_data [ DATA ] [ CLIENT_IP ] , txn_data [ DATA ] [ CLIENT_PORT ] ) if ( CLIENT_IP in txn_data [ DATA ] and CLIENT_PORT in txn_data [ DATA ] ) else None if nHa : nodeReg [ nodeName ] = HA ( * nHa ) if cHa : cliNodeReg [ clientStackName ] = HA ( * cHa ) try : key_type = 'verkey' verkey = cryptonymToHex ( str ( txn_data [ TARGET_NYM ] ) ) key_type = 'identifier' cryptonymToHex ( get_from ( txn ) ) except ValueError : logger . exception ( 'Invalid {}. Rebuild pool transactions.' . format ( key_type ) ) exit ( 'Invalid {}. Rebuild pool transactions.' . format ( key_type ) ) nodeKeys [ nodeName ] = verkey services = txn_data [ DATA ] . get ( SERVICES ) if isinstance ( services , list ) : if VALIDATOR in services : activeValidators . add ( nodeName ) else : activeValidators . discard ( nodeName )
helper function for parseLedgerForHaAndKeys
40,250
def verify_tree_consistency ( self , old_tree_size : int , new_tree_size : int , old_root : bytes , new_root : bytes , proof : Sequence [ bytes ] ) : old_size = old_tree_size new_size = new_tree_size if old_size < 0 or new_size < 0 : raise ValueError ( "Negative tree size" ) if old_size > new_size : raise ValueError ( "Older tree has bigger size (%d vs %d), did " "you supply inputs in the wrong order?" % ( old_size , new_size ) ) if old_size == new_size : if old_root == new_root : if proof : logging . debug ( "Trees are identical, ignoring proof" ) return True else : raise error . ConsistencyError ( "Inconsistency: different root " "hashes for the same tree size" ) if old_size == 0 : if proof : logging . debug ( "Ignoring non-empty consistency proof for " "empty tree." ) return True node = old_size - 1 last_node = new_size - 1 while node % 2 : node //= 2 last_node //= 2 p = iter ( proof ) try : if node : new_hash = old_hash = next ( p ) else : new_hash = old_hash = old_root while node : if node % 2 : next_node = next ( p ) old_hash = self . hasher . hash_children ( next_node , old_hash ) new_hash = self . hasher . hash_children ( next_node , new_hash ) elif node < last_node : new_hash = self . hasher . hash_children ( new_hash , next ( p ) ) node //= 2 last_node //= 2 while last_node : n = next ( p ) new_hash = self . hasher . hash_children ( new_hash , n ) last_node //= 2 if new_hash != new_root : raise error . ProofError ( "Bad Merkle proof: second root hash " "does not match. Expected hash: %s " ", computed hash: %s" % ( hexlify ( new_root ) . strip ( ) , hexlify ( new_hash ) . strip ( ) ) ) elif old_hash != old_root : raise error . ConsistencyError ( "Inconsistency: first root hash " "does not match. Expected hash: " "%s, computed hash: %s" % ( hexlify ( old_root ) . strip ( ) , hexlify ( old_hash ) . strip ( ) ) ) except StopIteration : raise error . ProofError ( "Merkle proof is too short" ) try : next ( p ) except StopIteration : pass else : logging . debug ( "Proof has extra nodes" ) return True
Verify the consistency between two root hashes .
40,251
def _schedule ( self , action : Callable , seconds : int = 0 ) -> int : self . aid += 1 if seconds > 0 : nxt = time . perf_counter ( ) + seconds if nxt < self . aqNextCheck : self . aqNextCheck = nxt logger . trace ( "{} scheduling action {} with id {} to run in {} " "seconds" . format ( self , get_func_name ( action ) , self . aid , seconds ) ) self . aqStash . append ( ( nxt , ( action , self . aid ) ) ) else : logger . trace ( "{} scheduling action {} with id {} to run now" . format ( self , get_func_name ( action ) , self . aid ) ) self . actionQueue . append ( ( action , self . aid ) ) if action not in self . scheduled : self . scheduled [ action ] = [ ] self . scheduled [ action ] . append ( self . aid ) return self . aid
Schedule an action to be executed after seconds seconds .
40,252
def _cancel ( self , action : Callable = None , aid : int = None ) : if action is not None : if action in self . scheduled : logger . trace ( "{} cancelling all events for action {}, ids: {}" "" . format ( self , action , self . scheduled [ action ] ) ) self . scheduled [ action ] . clear ( ) elif aid is not None : for action , aids in self . scheduled . items ( ) : try : aids . remove ( aid ) except ValueError : pass else : logger . trace ( "{} cancelled action {} with id {}" . format ( self , action , aid ) ) break
Cancel scheduled events
40,253
def _serviceActions ( self ) -> int : if self . aqStash : tm = time . perf_counter ( ) if tm > self . aqNextCheck : earliest = float ( 'inf' ) for d in list ( self . aqStash ) : nxt , action = d if tm > nxt : self . actionQueue . appendleft ( action ) self . aqStash . remove ( d ) if nxt < earliest : earliest = nxt self . aqNextCheck = earliest count = len ( self . actionQueue ) while self . actionQueue : action , aid = self . actionQueue . popleft ( ) assert action in self . scheduled if aid in self . scheduled [ action ] : self . scheduled [ action ] . remove ( aid ) logger . trace ( "{} running action {} with id {}" . format ( self , get_func_name ( action ) , aid ) ) action ( ) else : logger . trace ( "{} not running cancelled action {} with id {}" . format ( self , get_func_name ( action ) , aid ) ) return count
Run all pending actions in the action queue .
40,254
def execute_pool_txns ( self , three_pc_batch ) -> List : committed_txns = self . default_executer ( three_pc_batch ) for txn in committed_txns : self . poolManager . onPoolMembershipChange ( txn ) return committed_txns
Execute a transaction that involves consensus pool management like adding a node client or a steward .
40,255
def init_state_from_ledger ( self , state : State , ledger : Ledger , reqHandler ) : if state . isEmpty : logger . info ( '{} found state to be empty, recreating from ' 'ledger' . format ( self ) ) for seq_no , txn in ledger . getAllTxn ( ) : txn = self . update_txn_with_extra_data ( txn ) reqHandler . updateState ( [ txn , ] , isCommitted = True ) state . commit ( rootHash = state . headHash )
If the trie is empty then initialize it by applying txns from ledger .
40,256
def on_view_change_start ( self ) : self . view_changer . start_view_change_ts = self . utc_epoch ( ) for replica in self . replicas . values ( ) : replica . on_view_change_start ( ) logger . info ( "{} resetting monitor stats at view change start" . format ( self ) ) self . monitor . reset ( ) self . processStashedMsgsForView ( self . viewNo ) self . backup_instance_faulty_processor . restore_replicas ( ) self . drop_primaries ( ) pop_keys ( self . msgsForFutureViews , lambda x : x <= self . viewNo ) self . logNodeInfo ( ) logger . info ( '{}{} changed to view {}, will start catchup now' . format ( VIEW_CHANGE_PREFIX , self , self . viewNo ) ) self . _cancel ( self . _check_view_change_completed ) self . _schedule ( action = self . _check_view_change_completed , seconds = self . _view_change_timeout ) self . catchup_rounds_without_txns = 0 self . last_sent_pp_store_helper . erase_last_sent_pp_seq_no ( )
Notifies node about the fact that view changed to let it prepare for election
40,257
def on_view_change_complete ( self ) : self . future_primaries_handler . set_node_state ( ) if not self . replicas . all_instances_have_primary : raise LogicError ( "{} Not all replicas have " "primaries: {}" . format ( self , self . replicas . primary_name_by_inst_id ) ) self . _cancel ( self . _check_view_change_completed ) for replica in self . replicas . values ( ) : replica . on_view_change_done ( ) self . view_changer . last_completed_view_no = self . view_changer . view_no for replica in self . replicas . values ( ) : replica . clear_requests_and_fix_last_ordered ( ) self . monitor . reset ( )
View change completes for a replica when it has been decided which was the last ppSeqNo and state and txn root for previous view
40,258
def onStopping ( self ) : if self . config . STACK_COMPANION == 1 : add_stop_time ( self . ledger_dir , self . utc_epoch ( ) ) self . logstats ( ) self . reset ( ) for ledger in self . ledgers : try : ledger . stop ( ) except Exception as ex : logger . exception ( '{} got exception while stopping ledger: {}' . format ( self , ex ) ) self . nodestack . stop ( ) self . clientstack . stop ( ) self . closeAllKVStores ( ) self . _info_tool . stop ( ) self . mode = None self . execute_hook ( NodeHooks . POST_NODE_STOPPED )
Actions to be performed on stopping the node .
40,259
async def prod ( self , limit : int = None ) -> int : c = 0 if self . last_prod_started : self . metrics . add_event ( MetricsName . LOOPER_RUN_TIME_SPENT , time . perf_counter ( ) - self . last_prod_started ) self . last_prod_started = time . perf_counter ( ) self . quota_control . update_state ( { 'request_queue_size' : len ( self . monitor . requestTracker . unordered ( ) ) } ) if self . status is not Status . stopped : c += await self . serviceReplicas ( limit ) c += await self . serviceNodeMsgs ( limit ) c += await self . serviceClientMsgs ( limit ) with self . metrics . measure_time ( MetricsName . SERVICE_NODE_ACTIONS_TIME ) : c += self . _serviceActions ( ) with self . metrics . measure_time ( MetricsName . SERVICE_TIMERS_TIME ) : self . timer . service ( ) with self . metrics . measure_time ( MetricsName . SERVICE_MONITOR_ACTIONS_TIME ) : c += self . monitor . _serviceActions ( ) c += await self . serviceViewChanger ( limit ) c += await self . service_observable ( limit ) c += await self . service_observer ( limit ) with self . metrics . measure_time ( MetricsName . FLUSH_OUTBOXES_TIME ) : self . nodestack . flushOutBoxes ( ) if self . isGoing ( ) : with self . metrics . measure_time ( MetricsName . SERVICE_NODE_LIFECYCLE_TIME ) : self . nodestack . serviceLifecycle ( ) with self . metrics . measure_time ( MetricsName . SERVICE_CLIENT_STACK_TIME ) : self . clientstack . serviceClientStack ( ) return c
. opened This function is executed by the node each time it gets its share of CPU time from the event loop .
40,260
async def serviceNodeMsgs ( self , limit : int ) -> int : with self . metrics . measure_time ( MetricsName . SERVICE_NODE_STACK_TIME ) : n = await self . nodestack . service ( limit , self . quota_control . node_quota ) self . metrics . add_event ( MetricsName . NODE_STACK_MESSAGES_PROCESSED , n ) await self . processNodeInBox ( ) return n
Process limit number of messages from the nodeInBox .
40,261
async def serviceClientMsgs ( self , limit : int ) -> int : c = await self . clientstack . service ( limit , self . quota_control . client_quota ) self . metrics . add_event ( MetricsName . CLIENT_STACK_MESSAGES_PROCESSED , c ) await self . processClientInBox ( ) return c
Process limit number of messages from the clientInBox .
40,262
async def serviceViewChanger ( self , limit ) -> int : if not self . isReady ( ) : return 0 o = self . serviceViewChangerOutBox ( limit ) i = await self . serviceViewChangerInbox ( limit ) return o + i
Service the view_changer s inBox outBox and action queues .
40,263
async def service_observable ( self , limit ) -> int : if not self . isReady ( ) : return 0 o = self . _service_observable_out_box ( limit ) i = await self . _observable . serviceQueues ( limit ) return o + i
Service the observable s inBox and outBox
40,264
def _service_observable_out_box ( self , limit : int = None ) -> int : msg_count = 0 while True : if limit and msg_count >= limit : break msg = self . _observable . get_output ( ) if not msg : break msg_count += 1 msg , observer_ids = msg self . sendToNodes ( msg , observer_ids ) return msg_count
Service at most limit number of messages from the observable s outBox .
40,265
async def service_observer ( self , limit ) -> int : if not self . isReady ( ) : return 0 return await self . _observer . serviceQueues ( limit )
Service the observer s inBox and outBox
40,266
def _ask_for_ledger_status ( self , node_name : str , ledger_id ) : self . request_msg ( LEDGER_STATUS , { f . LEDGER_ID . nm : ledger_id } , [ node_name , ] ) logger . info ( "{} asking {} for ledger status of ledger {}" . format ( self , node_name , ledger_id ) )
Ask other node for LedgerStatus
40,267
def checkInstances ( self ) -> None : logger . debug ( "{} choosing to start election on the basis of count {} and nodes {}" . format ( self , self . connectedNodeCount , self . nodestack . conns ) )
Check if this node has the minimum required number of protocol instances i . e . f + 1 . If not add a replica . If no election is in progress this node will try to nominate one of its replicas as primary . This method is called whenever a connection with a new node is established .
40,268
def adjustReplicas ( self , old_required_number_of_instances : int , new_required_number_of_instances : int ) : replica_num = old_required_number_of_instances while replica_num < new_required_number_of_instances : self . replicas . add_replica ( replica_num ) self . processStashedMsgsForReplica ( replica_num ) replica_num += 1 while replica_num > new_required_number_of_instances : replica_num -= 1 self . replicas . remove_replica ( replica_num ) pop_keys ( self . msgsForFutureReplicas , lambda inst_id : inst_id < new_required_number_of_instances ) if len ( self . primaries_disconnection_times ) < new_required_number_of_instances : self . primaries_disconnection_times . extend ( [ None ] * ( new_required_number_of_instances - len ( self . primaries_disconnection_times ) ) ) elif len ( self . primaries_disconnection_times ) > new_required_number_of_instances : self . primaries_disconnection_times = self . primaries_disconnection_times [ : new_required_number_of_instances ]
Add or remove replicas depending on f
40,269
def _check_view_change_completed ( self ) : logger . info ( '{} running the scheduled check for view change completion' . format ( self ) ) if not self . view_changer . view_change_in_progress : logger . info ( '{} already completion view change' . format ( self ) ) return False self . view_changer . on_view_change_not_completed_in_time ( ) return True
This thing checks whether new primary was elected . If it was not - starts view change again
40,270
def service_replicas_outbox ( self , limit : int = None ) -> int : num_processed = 0 for message in self . replicas . get_output ( limit ) : num_processed += 1 if isinstance ( message , ( PrePrepare , Prepare , Commit , Checkpoint ) ) : self . send ( message ) elif isinstance ( message , Ordered ) : self . try_processing_ordered ( message ) elif isinstance ( message , tuple ) and isinstance ( message [ 1 ] , Reject ) : with self . metrics . measure_time ( MetricsName . NODE_SEND_REJECT_TIME ) : digest , reject = message result_reject = Reject ( reject . identifier , reject . reqId , self . reasonForClientFromException ( reject . reason ) ) if digest in self . requestSender : self . transmitToClient ( result_reject , self . requestSender [ digest ] ) self . doneProcessingReq ( digest ) elif isinstance ( message , Exception ) : self . processEscalatedException ( message ) else : logger . error ( "Received msg {} and don't " "know how to handle it" . format ( message ) ) return num_processed
Process limit number of replica messages
40,271
def master_primary_name ( self ) -> Optional [ str ] : master_primary_name = self . master_replica . primaryName if master_primary_name : return self . master_replica . getNodeName ( master_primary_name ) return None
Return the name of the primary node of the master instance
40,272
def msgHasAcceptableInstId ( self , msg , frm ) -> bool : instId = getattr ( msg , f . INST_ID . nm , None ) if not ( isinstance ( instId , int ) and instId >= 0 ) : return False if instId >= self . requiredNumberOfInstances : if instId not in self . msgsForFutureReplicas : self . msgsForFutureReplicas [ instId ] = deque ( ) self . msgsForFutureReplicas [ instId ] . append ( ( msg , frm ) ) logger . debug ( "{} queueing message {} for future protocol instance {}" . format ( self , msg , instId ) ) return False return True
Return true if the instance id of message corresponds to a correct replica .
40,273
def sendToReplica ( self , msg , frm ) : if self . msgHasAcceptableInstId ( msg , frm ) : self . replicas . pass_message ( ( msg , frm ) , msg . instId )
Send the message to the intended replica .
40,274
def sendToViewChanger ( self , msg , frm ) : if ( isinstance ( msg , InstanceChange ) or self . msgHasAcceptableViewNo ( msg , frm ) ) : logger . debug ( "{} sending message to view changer: {}" . format ( self , ( msg , frm ) ) ) self . msgsToViewChanger . append ( ( msg , frm ) )
Send the message to the intended view changer .
40,275
def send_to_observer ( self , msg , frm ) : logger . debug ( "{} sending message to observer: {}" . format ( self , ( msg , frm ) ) ) self . _observer . append_input ( msg , frm )
Send the message to the observer .
40,276
def handleOneNodeMsg ( self , wrappedMsg ) : try : vmsg = self . validateNodeMsg ( wrappedMsg ) if vmsg : logger . trace ( "{} msg validated {}" . format ( self , wrappedMsg ) , extra = { "tags" : [ "node-msg-validation" ] } ) self . unpackNodeMsg ( * vmsg ) else : logger . debug ( "{} invalidated msg {}" . format ( self , wrappedMsg ) , extra = { "tags" : [ "node-msg-validation" ] } ) except SuspiciousNode as ex : self . reportSuspiciousNodeEx ( ex ) except Exception as ex : msg , frm = wrappedMsg self . discard ( msg , ex , logger . info )
Validate and process one message from a node .
40,277
def validateNodeMsg ( self , wrappedMsg ) : msg , frm = wrappedMsg if self . isNodeBlacklisted ( frm ) : self . discard ( str ( msg ) [ : 256 ] , "received from blacklisted node {}" . format ( frm ) , logger . display ) return None with self . metrics . measure_time ( MetricsName . INT_VALIDATE_NODE_MSG_TIME ) : try : message = node_message_factory . get_instance ( ** msg ) except ( MissingNodeOp , InvalidNodeOp ) as ex : raise ex except Exception as ex : raise InvalidNodeMsg ( str ( ex ) ) try : self . verifySignature ( message ) except BaseExc as ex : raise SuspiciousNode ( frm , ex , message ) from ex logger . debug ( "{} received node message from {}: {}" . format ( self , frm , message ) , extra = { "cli" : False } ) return message , frm
Validate another node s message sent to this node .
40,278
def unpackNodeMsg ( self , msg , frm ) -> None : if isinstance ( msg , Batch ) : logger . trace ( "{} processing a batch {}" . format ( self , msg ) ) with self . metrics . measure_time ( MetricsName . UNPACK_BATCH_TIME ) : for m in msg . messages : try : m = self . nodestack . deserializeMsg ( m ) except Exception as ex : logger . warning ( "Got error {} while processing {} message" . format ( ex , m ) ) continue self . handleOneNodeMsg ( ( m , frm ) ) else : self . postToNodeInBox ( msg , frm )
If the message is a batch message validate each message in the batch otherwise add the message to the node s inbox .
40,279
def postToNodeInBox ( self , msg , frm ) : logger . trace ( "{} appending to nodeInbox {}" . format ( self , msg ) ) self . nodeInBox . append ( ( msg , frm ) )
Append the message to the node inbox
40,280
async def processNodeInBox ( self ) : while self . nodeInBox : m = self . nodeInBox . popleft ( ) await self . process_one_node_message ( m )
Process the messages in the node inbox asynchronously .
40,281
def handleOneClientMsg ( self , wrappedMsg ) : try : vmsg = self . validateClientMsg ( wrappedMsg ) if vmsg : self . unpackClientMsg ( * vmsg ) except BlowUp : raise except Exception as ex : msg , frm = wrappedMsg friendly = friendlyEx ( ex ) if isinstance ( ex , SuspiciousClient ) : self . reportSuspiciousClient ( frm , friendly ) self . handleInvalidClientMsg ( ex , wrappedMsg )
Validate and process a client message
40,282
async def processClientInBox ( self ) : while self . clientInBox : m = self . clientInBox . popleft ( ) req , frm = m logger . debug ( "{} processing {} request {}" . format ( self . clientstack . name , frm , req ) , extra = { "cli" : True , "tags" : [ "node-msg-processing" ] } ) try : await self . clientMsgRouter . handle ( m ) except InvalidClientMessageException as ex : self . handleInvalidClientMsg ( ex , m )
Process the messages in the node s clientInBox asynchronously . All messages in the inBox have already been validated including signature check .
40,283
def is_catchup_needed_during_view_change ( self ) -> bool : if self . caught_up_for_current_view ( ) : logger . info ( '{} is caught up for the current view {}' . format ( self , self . viewNo ) ) return False logger . info ( '{} is not caught up for the current view {}' . format ( self , self . viewNo ) ) if self . num_txns_caught_up_in_last_catchup ( ) == 0 : if self . has_ordered_till_last_prepared_certificate ( ) : logger . info ( '{} ordered till last prepared certificate' . format ( self ) ) return False if self . is_catch_up_limit ( self . config . MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE ) : self . master_replica . last_prepared_before_view_change = None return False return True
Check if received a quorum of view change done messages and if yes check if caught up till the Check if all requests ordered till last prepared certificate Check if last catchup resulted in no txns
40,284
def doDynamicValidation ( self , request : Request ) : self . execute_hook ( NodeHooks . PRE_DYNAMIC_VALIDATION , request = request ) ledger_id , seq_no = self . seqNoDB . get_by_payload_digest ( request . payload_digest ) if ledger_id is not None and seq_no is not None : raise SuspiciousPrePrepare ( 'Trying to order already ordered request' ) ledger = self . getLedger ( self . ledger_id_for_request ( request ) ) for txn in ledger . uncommittedTxns : if get_payload_digest ( txn ) == request . payload_digest : raise SuspiciousPrePrepare ( 'Trying to order already ordered request' ) operation = request . operation req_handler = self . get_req_handler ( txn_type = operation [ TXN_TYPE ] ) req_handler . validate ( request ) self . execute_hook ( NodeHooks . POST_DYNAMIC_VALIDATION , request = request )
State based validation
40,285
def applyReq ( self , request : Request , cons_time : int ) : self . execute_hook ( NodeHooks . PRE_REQUEST_APPLICATION , request = request , cons_time = cons_time ) req_handler = self . get_req_handler ( txn_type = request . operation [ TXN_TYPE ] ) seq_no , txn = req_handler . apply ( request , cons_time ) ledger_id = self . ledger_id_for_request ( request ) self . execute_hook ( NodeHooks . POST_REQUEST_APPLICATION , request = request , cons_time = cons_time , ledger_id = ledger_id , seq_no = seq_no , txn = txn )
Apply request to appropriate ledger and state . cons_time is the UTC epoch at which consensus was reached .
40,286
def processRequest ( self , request : Request , frm : str ) : logger . debug ( "{} received client request: {} from {}" . format ( self . name , request , frm ) ) self . nodeRequestSpikeMonitorData [ 'accum' ] += 1 txn_type = request . operation [ TXN_TYPE ] if self . is_action ( txn_type ) : self . process_action ( request , frm ) elif txn_type == GET_TXN : self . handle_get_txn_req ( request , frm ) self . total_read_request_number += 1 elif self . is_query ( txn_type ) : self . process_query ( request , frm ) self . total_read_request_number += 1 elif self . can_write_txn ( txn_type ) : reply = self . getReplyFromLedgerForRequest ( request ) if reply : logger . debug ( "{} returning reply from already processed " "REQUEST: {}" . format ( self , request ) ) self . transmitToClient ( reply , frm ) return if not self . isProcessingReq ( request . key ) : self . startedProcessingReq ( request . key , frm ) self . handle_request_if_forced ( request ) self . recordAndPropagate ( request , frm ) self . send_ack_to_client ( ( request . identifier , request . reqId ) , frm ) else : raise InvalidClientRequest ( request . identifier , request . reqId , 'Pool is in readonly mode, try again in 60 seconds' )
Handle a REQUEST from the client . If the request has already been executed the node re - sends the reply to the client . Otherwise the node acknowledges the client request adds it to its list of client requests and sends a PROPAGATE to the remaining nodes .
40,287
def processPropagate ( self , msg : Propagate , frm ) : logger . debug ( "{} received propagated request: {}" . format ( self . name , msg ) ) request = TxnUtilConfig . client_request_class ( ** msg . request ) clientName = msg . senderClient if not self . isProcessingReq ( request . key ) : ledger_id , seq_no = self . seqNoDB . get_by_payload_digest ( request . payload_digest ) if ledger_id is not None and seq_no is not None : self . _clean_req_from_verified ( request ) logger . debug ( "{} ignoring propagated request {} " "since it has been already ordered" . format ( self . name , msg ) ) return self . startedProcessingReq ( request . key , clientName ) self . handle_request_if_forced ( request ) else : if clientName is not None and not self . is_sender_known_for_req ( request . key ) : self . set_sender_for_req ( request . key , clientName ) self . requests . add_propagate ( request , frm ) self . propagate ( request , clientName ) self . tryForwarding ( request )
Process one propagateRequest sent to this node asynchronously
40,288
def handle_get_txn_req ( self , request : Request , frm : str ) : ledger_id = request . operation . get ( f . LEDGER_ID . nm , DOMAIN_LEDGER_ID ) if ledger_id not in self . ledger_to_req_handler : self . send_nack_to_client ( ( request . identifier , request . reqId ) , 'Invalid ledger id {}' . format ( ledger_id ) , frm ) return seq_no = request . operation . get ( DATA ) self . send_ack_to_client ( ( request . identifier , request . reqId ) , frm ) ledger = self . getLedger ( ledger_id ) try : txn = self . getReplyFromLedger ( ledger , seq_no ) except KeyError : txn = None if txn is None : logger . debug ( "{} can not handle GET_TXN request: ledger doesn't " "have txn with seqNo={}" . format ( self , str ( seq_no ) ) ) result = { f . IDENTIFIER . nm : request . identifier , f . REQ_ID . nm : request . reqId , TXN_TYPE : request . operation [ TXN_TYPE ] , DATA : None } if txn : result [ DATA ] = txn . result result [ f . SEQ_NO . nm ] = get_seq_no ( txn . result ) self . transmitToClient ( Reply ( result ) , frm )
Handle GET_TXN request
40,289
def processOrdered ( self , ordered : Ordered ) : if ordered . instId not in self . instances . ids : logger . warning ( '{} got ordered request for instance {} which ' 'does not exist' . format ( self , ordered . instId ) ) return False if ordered . instId != self . instances . masterId : logger . trace ( "{} got ordered requests from backup replica {}" . format ( self , ordered . instId ) ) with self . metrics . measure_time ( MetricsName . MONITOR_REQUEST_ORDERED_TIME ) : self . monitor . requestOrdered ( ordered . valid_reqIdr + ordered . invalid_reqIdr , ordered . instId , self . requests , byMaster = False ) return False logger . trace ( "{} got ordered requests from master replica" . format ( self ) ) logger . debug ( "{} executing Ordered batch {} {} of {} requests; state root {}; txn root {}" . format ( self . name , ordered . viewNo , ordered . ppSeqNo , len ( ordered . valid_reqIdr ) , ordered . stateRootHash , ordered . txnRootHash ) ) three_pc_batch = ThreePcBatch . from_ordered ( ordered ) if self . db_manager . ledgers [ AUDIT_LEDGER_ID ] . uncommittedRootHash is None : logger . info ( "{} applying stashed requests for batch {} {} of {} requests; state root {}; txn root {}" . format ( self . name , three_pc_batch . view_no , three_pc_batch . pp_seq_no , len ( three_pc_batch . valid_digests ) , three_pc_batch . state_root , three_pc_batch . txn_root ) ) self . apply_stashed_reqs ( three_pc_batch ) self . executeBatch ( three_pc_batch , ordered . valid_reqIdr , ordered . invalid_reqIdr , ordered . auditTxnRootHash ) with self . metrics . measure_time ( MetricsName . MONITOR_REQUEST_ORDERED_TIME ) : self . monitor . requestOrdered ( ordered . valid_reqIdr + ordered . invalid_reqIdr , ordered . instId , self . requests , byMaster = True ) return True
Execute ordered request
40,290
def force_process_ordered ( self ) : for instance_id , messages in self . replicas . take_ordereds_out_of_turn ( ) : num_processed = 0 for message in messages : self . try_processing_ordered ( message ) num_processed += 1 logger . info ( '{} processed {} Ordered batches for instance {} ' 'before starting catch up' . format ( self , num_processed , instance_id ) )
Take any messages from replica that have been ordered and process them this should be done rarely like before catchup starts so a more current LedgerStatus can be sent . can be called either 1 . when node is participating this happens just before catchup starts so the node can have the latest ledger status or 2 . when node is not participating but a round of catchup is about to be started here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks
40,291
def processEscalatedException ( self , ex ) : if isinstance ( ex , SuspiciousNode ) : self . reportSuspiciousNodeEx ( ex ) else : raise RuntimeError ( "unhandled replica-escalated exception" ) from ex
Process an exception escalated from a Replica
40,292
def lost_master_primary ( self ) : self . primaries_disconnection_times [ self . master_replica . instId ] = time . perf_counter ( ) self . _schedule_view_change ( )
Schedule an primary connection check which in turn can send a view change message
40,293
def executeBatch ( self , three_pc_batch : ThreePcBatch , valid_reqs_keys : List , invalid_reqs_keys : List , audit_txn_root ) -> None : three_pc_batch . txn_root = Ledger . hashToStr ( three_pc_batch . txn_root ) three_pc_batch . state_root = Ledger . hashToStr ( three_pc_batch . state_root ) for req_key in valid_reqs_keys : self . execute_hook ( NodeHooks . PRE_REQUEST_COMMIT , req_key = req_key , pp_time = three_pc_batch . pp_time , state_root = three_pc_batch . state_root , txn_root = three_pc_batch . txn_root ) self . execute_hook ( NodeHooks . PRE_BATCH_COMMITTED , ledger_id = three_pc_batch . ledger_id , pp_time = three_pc_batch . pp_time , reqs_keys = valid_reqs_keys , state_root = three_pc_batch . state_root , txn_root = three_pc_batch . txn_root ) try : committedTxns = self . get_executer ( three_pc_batch . ledger_id ) ( three_pc_batch ) except Exception as exc : logger . error ( "{} commit failed for batch request, error {}, view no {}, " "ppSeqNo {}, ledger {}, state root {}, txn root {}, " "requests: {}" . format ( self , repr ( exc ) , three_pc_batch . view_no , three_pc_batch . pp_seq_no , three_pc_batch . ledger_id , three_pc_batch . state_root , three_pc_batch . txn_root , [ req_idr for req_idr in valid_reqs_keys ] ) ) raise for req_key in valid_reqs_keys + invalid_reqs_keys : if req_key in self . requests : self . mark_request_as_executed ( self . requests [ req_key ] . request ) else : logger . debug ( '{} normally executed request {} which object has been dropped ' 'from the requests queue' . format ( self , req_key ) ) pass if not committedTxns : return logger . debug ( "{} committed batch request, view no {}, ppSeqNo {}, " "ledger {}, state root {}, txn root {}, requests: {}" . format ( self , three_pc_batch . view_no , three_pc_batch . pp_seq_no , three_pc_batch . ledger_id , three_pc_batch . state_root , three_pc_batch . txn_root , [ key for key in valid_reqs_keys ] ) ) for txn in committedTxns : self . execute_hook ( NodeHooks . POST_REQUEST_COMMIT , txn = txn , pp_time = three_pc_batch . pp_time , state_root = three_pc_batch . state_root , txn_root = three_pc_batch . txn_root ) first_txn_seq_no = get_seq_no ( committedTxns [ 0 ] ) last_txn_seq_no = get_seq_no ( committedTxns [ - 1 ] ) reqs = [ ] reqs_list_built = True for req_key in valid_reqs_keys : if req_key in self . requests : reqs . append ( self . requests [ req_key ] . request . as_dict ) else : logger . warning ( "Could not build requests list for observers due to non-existent requests" ) reqs_list_built = False break if reqs_list_built : batch_committed_msg = BatchCommitted ( reqs , three_pc_batch . ledger_id , 0 , three_pc_batch . view_no , three_pc_batch . pp_seq_no , three_pc_batch . pp_time , three_pc_batch . state_root , three_pc_batch . txn_root , first_txn_seq_no , last_txn_seq_no , audit_txn_root , three_pc_batch . primaries ) self . _observable . append_input ( batch_committed_msg , self . name )
Execute the REQUEST sent to this Node
40,294
def addNewRole ( self , txn ) : if isinstance ( self . clientAuthNr . core_authenticator , SimpleAuthNr ) : txn_data = get_payload_data ( txn ) identifier = txn_data [ TARGET_NYM ] verkey = txn_data . get ( VERKEY ) v = DidVerifier ( verkey , identifier = identifier ) if identifier not in self . clientAuthNr . core_authenticator . clients : role = txn_data . get ( ROLE ) if role not in ( STEWARD , TRUSTEE , None ) : logger . debug ( "Role if present must be {} and not {}" . format ( Roles . STEWARD . name , role ) ) return self . clientAuthNr . core_authenticator . addIdr ( identifier , verkey = v . verkey , role = role )
Adds a new client or steward to this node based on transaction type .
40,295
def ensureKeysAreSetup ( self ) : if not areKeysSetup ( self . name , self . keys_dir ) : raise REx ( REx . reason . format ( self . name ) + self . keygenScript )
Check whether the keys are setup in the local STP keep . Raises KeysNotFoundException if not found .
40,296
def reportSuspiciousNodeEx ( self , ex : SuspiciousNode ) : self . reportSuspiciousNode ( ex . node , ex . reason , ex . code , ex . offendingMsg )
Report suspicion on a node on the basis of an exception
40,297
def reportSuspiciousNode ( self , nodeName : str , reason = None , code : int = None , offendingMsg = None ) : logger . warning ( "{} raised suspicion on node {} for {}; suspicion code " "is {}" . format ( self , nodeName , reason , code ) ) if code in ( s . code for s in ( Suspicions . PPR_DIGEST_WRONG , Suspicions . PPR_REJECT_WRONG , Suspicions . PPR_TXN_WRONG , Suspicions . PPR_STATE_WRONG , Suspicions . PPR_PLUGIN_EXCEPTION , Suspicions . PPR_SUB_SEQ_NO_WRONG , Suspicions . PPR_NOT_FINAL , Suspicions . PPR_WITH_ORDERED_REQUEST , Suspicions . PPR_AUDIT_TXN_ROOT_HASH_WRONG , Suspicions . PPR_BLS_MULTISIG_WRONG , Suspicions . PPR_TIME_WRONG , ) ) : logger . display ( '{}{} got one of primary suspicions codes {}' . format ( VIEW_CHANGE_PREFIX , self , code ) ) self . view_changer . on_suspicious_primary ( Suspicions . get_by_code ( code ) ) if offendingMsg : self . discard ( offendingMsg , reason , logger . debug )
Report suspicion on a node and add it to this node s blacklist .
40,298
def reportSuspiciousClient ( self , clientName : str , reason ) : logger . warning ( "{} raised suspicion on client {} for {}" . format ( self , clientName , reason ) ) self . blacklistClient ( clientName )
Report suspicion on a client and add it to this node s blacklist .
40,299
def blacklistClient ( self , clientName : str , reason : str = None , code : int = None ) : msg = "{} blacklisting client {}" . format ( self , clientName ) if reason : msg += " for reason {}" . format ( reason ) logger . display ( msg ) self . clientBlacklister . blacklist ( clientName )
Add the client specified by clientName to this node s blacklist