idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
228,200
async def service_observer ( self , limit ) -> int : if not self . isReady ( ) : return 0 return await self . _observer . serviceQueues ( limit )
Service the observer s inBox and outBox
40
9
228,201
def _ask_for_ledger_status ( self , node_name : str , ledger_id ) : self . request_msg ( LEDGER_STATUS , { f . LEDGER_ID . nm : ledger_id } , [ node_name , ] ) logger . info ( "{} asking {} for ledger status of ledger {}" . format ( self , node_name , ledger_id ) )
Ask other node for LedgerStatus
87
7
228,202
def checkInstances ( self ) -> None : # TODO: Is this method really needed? logger . debug ( "{} choosing to start election on the basis of count {} and nodes {}" . format ( self , self . connectedNodeCount , self . nodestack . conns ) )
Check if this node has the minimum required number of protocol instances i . e . f + 1 . If not add a replica . If no election is in progress this node will try to nominate one of its replicas as primary . This method is called whenever a connection with a new node is established .
61
60
228,203
def adjustReplicas ( self , old_required_number_of_instances : int , new_required_number_of_instances : int ) : # TODO: refactor this replica_num = old_required_number_of_instances while replica_num < new_required_number_of_instances : self . replicas . add_replica ( replica_num ) self . processStashedMsgsForReplica ( replica_num ) replica_num += 1 while replica_num > new_required_number_of_instances : replica_num -= 1 self . replicas . remove_replica ( replica_num ) pop_keys ( self . msgsForFutureReplicas , lambda inst_id : inst_id < new_required_number_of_instances ) if len ( self . primaries_disconnection_times ) < new_required_number_of_instances : self . primaries_disconnection_times . extend ( [ None ] * ( new_required_number_of_instances - len ( self . primaries_disconnection_times ) ) ) elif len ( self . primaries_disconnection_times ) > new_required_number_of_instances : self . primaries_disconnection_times = self . primaries_disconnection_times [ : new_required_number_of_instances ]
Add or remove replicas depending on f
292
8
228,204
def _check_view_change_completed ( self ) : logger . info ( '{} running the scheduled check for view change completion' . format ( self ) ) if not self . view_changer . view_change_in_progress : logger . info ( '{} already completion view change' . format ( self ) ) return False self . view_changer . on_view_change_not_completed_in_time ( ) return True
This thing checks whether new primary was elected . If it was not - starts view change again
98
18
228,205
def service_replicas_outbox ( self , limit : int = None ) -> int : # TODO: rewrite this using Router num_processed = 0 for message in self . replicas . get_output ( limit ) : num_processed += 1 if isinstance ( message , ( PrePrepare , Prepare , Commit , Checkpoint ) ) : self . send ( message ) elif isinstance ( message , Ordered ) : self . try_processing_ordered ( message ) elif isinstance ( message , tuple ) and isinstance ( message [ 1 ] , Reject ) : with self . metrics . measure_time ( MetricsName . NODE_SEND_REJECT_TIME ) : digest , reject = message result_reject = Reject ( reject . identifier , reject . reqId , self . reasonForClientFromException ( reject . reason ) ) # TODO: What the case when reqKey will be not in requestSender dict if digest in self . requestSender : self . transmitToClient ( result_reject , self . requestSender [ digest ] ) self . doneProcessingReq ( digest ) elif isinstance ( message , Exception ) : self . processEscalatedException ( message ) else : # TODO: should not this raise exception? logger . error ( "Received msg {} and don't " "know how to handle it" . format ( message ) ) return num_processed
Process limit number of replica messages
304
6
228,206
def master_primary_name ( self ) -> Optional [ str ] : master_primary_name = self . master_replica . primaryName if master_primary_name : return self . master_replica . getNodeName ( master_primary_name ) return None
Return the name of the primary node of the master instance
57
11
228,207
def msgHasAcceptableInstId ( self , msg , frm ) -> bool : # TODO: refactor this! this should not do anything except checking! instId = getattr ( msg , f . INST_ID . nm , None ) if not ( isinstance ( instId , int ) and instId >= 0 ) : return False if instId >= self . requiredNumberOfInstances : if instId not in self . msgsForFutureReplicas : self . msgsForFutureReplicas [ instId ] = deque ( ) self . msgsForFutureReplicas [ instId ] . append ( ( msg , frm ) ) logger . debug ( "{} queueing message {} for future protocol instance {}" . format ( self , msg , instId ) ) return False return True
Return true if the instance id of message corresponds to a correct replica .
168
14
228,208
def sendToReplica ( self , msg , frm ) : # TODO: discard or stash messages here instead of doing # this in msgHas* methods!!! if self . msgHasAcceptableInstId ( msg , frm ) : self . replicas . pass_message ( ( msg , frm ) , msg . instId )
Send the message to the intended replica .
71
8
228,209
def sendToViewChanger ( self , msg , frm ) : if ( isinstance ( msg , InstanceChange ) or self . msgHasAcceptableViewNo ( msg , frm ) ) : logger . debug ( "{} sending message to view changer: {}" . format ( self , ( msg , frm ) ) ) self . msgsToViewChanger . append ( ( msg , frm ) )
Send the message to the intended view changer .
89
10
228,210
def send_to_observer ( self , msg , frm ) : logger . debug ( "{} sending message to observer: {}" . format ( self , ( msg , frm ) ) ) self . _observer . append_input ( msg , frm )
Send the message to the observer .
57
7
228,211
def handleOneNodeMsg ( self , wrappedMsg ) : try : vmsg = self . validateNodeMsg ( wrappedMsg ) if vmsg : logger . trace ( "{} msg validated {}" . format ( self , wrappedMsg ) , extra = { "tags" : [ "node-msg-validation" ] } ) self . unpackNodeMsg ( * vmsg ) else : logger . debug ( "{} invalidated msg {}" . format ( self , wrappedMsg ) , extra = { "tags" : [ "node-msg-validation" ] } ) except SuspiciousNode as ex : self . reportSuspiciousNodeEx ( ex ) except Exception as ex : msg , frm = wrappedMsg self . discard ( msg , ex , logger . info )
Validate and process one message from a node .
162
10
228,212
def validateNodeMsg ( self , wrappedMsg ) : msg , frm = wrappedMsg if self . isNodeBlacklisted ( frm ) : self . discard ( str ( msg ) [ : 256 ] , "received from blacklisted node {}" . format ( frm ) , logger . display ) return None with self . metrics . measure_time ( MetricsName . INT_VALIDATE_NODE_MSG_TIME ) : try : message = node_message_factory . get_instance ( * * msg ) except ( MissingNodeOp , InvalidNodeOp ) as ex : raise ex except Exception as ex : raise InvalidNodeMsg ( str ( ex ) ) try : self . verifySignature ( message ) except BaseExc as ex : raise SuspiciousNode ( frm , ex , message ) from ex logger . debug ( "{} received node message from {}: {}" . format ( self , frm , message ) , extra = { "cli" : False } ) return message , frm
Validate another node s message sent to this node .
211
11
228,213
def unpackNodeMsg ( self , msg , frm ) -> None : # TODO: why do we unpack batches here? Batching is a feature of # a transport, it should be encapsulated. if isinstance ( msg , Batch ) : logger . trace ( "{} processing a batch {}" . format ( self , msg ) ) with self . metrics . measure_time ( MetricsName . UNPACK_BATCH_TIME ) : for m in msg . messages : try : m = self . nodestack . deserializeMsg ( m ) except Exception as ex : logger . warning ( "Got error {} while processing {} message" . format ( ex , m ) ) continue self . handleOneNodeMsg ( ( m , frm ) ) else : self . postToNodeInBox ( msg , frm )
If the message is a batch message validate each message in the batch otherwise add the message to the node s inbox .
176
23
228,214
def postToNodeInBox ( self , msg , frm ) : logger . trace ( "{} appending to nodeInbox {}" . format ( self , msg ) ) self . nodeInBox . append ( ( msg , frm ) )
Append the message to the node inbox
52
8
228,215
async def processNodeInBox ( self ) : while self . nodeInBox : m = self . nodeInBox . popleft ( ) await self . process_one_node_message ( m )
Process the messages in the node inbox asynchronously .
44
11
228,216
def handleOneClientMsg ( self , wrappedMsg ) : try : vmsg = self . validateClientMsg ( wrappedMsg ) if vmsg : self . unpackClientMsg ( * vmsg ) except BlowUp : raise except Exception as ex : msg , frm = wrappedMsg friendly = friendlyEx ( ex ) if isinstance ( ex , SuspiciousClient ) : self . reportSuspiciousClient ( frm , friendly ) self . handleInvalidClientMsg ( ex , wrappedMsg )
Validate and process a client message
101
7
228,217
async def processClientInBox ( self ) : while self . clientInBox : m = self . clientInBox . popleft ( ) req , frm = m logger . debug ( "{} processing {} request {}" . format ( self . clientstack . name , frm , req ) , extra = { "cli" : True , "tags" : [ "node-msg-processing" ] } ) try : await self . clientMsgRouter . handle ( m ) except InvalidClientMessageException as ex : self . handleInvalidClientMsg ( ex , m )
Process the messages in the node s clientInBox asynchronously . All messages in the inBox have already been validated including signature check .
121
28
228,218
def is_catchup_needed_during_view_change ( self ) -> bool : if self . caught_up_for_current_view ( ) : logger . info ( '{} is caught up for the current view {}' . format ( self , self . viewNo ) ) return False logger . info ( '{} is not caught up for the current view {}' . format ( self , self . viewNo ) ) if self . num_txns_caught_up_in_last_catchup ( ) == 0 : if self . has_ordered_till_last_prepared_certificate ( ) : logger . info ( '{} ordered till last prepared certificate' . format ( self ) ) return False if self . is_catch_up_limit ( self . config . MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE ) : # No more 3PC messages will be processed since maximum catchup # rounds have been done self . master_replica . last_prepared_before_view_change = None return False return True
Check if received a quorum of view change done messages and if yes check if caught up till the Check if all requests ordered till last prepared certificate Check if last catchup resulted in no txns
235
39
228,219
def doDynamicValidation ( self , request : Request ) : self . execute_hook ( NodeHooks . PRE_DYNAMIC_VALIDATION , request = request ) # Digest validation ledger_id , seq_no = self . seqNoDB . get_by_payload_digest ( request . payload_digest ) if ledger_id is not None and seq_no is not None : raise SuspiciousPrePrepare ( 'Trying to order already ordered request' ) ledger = self . getLedger ( self . ledger_id_for_request ( request ) ) for txn in ledger . uncommittedTxns : if get_payload_digest ( txn ) == request . payload_digest : raise SuspiciousPrePrepare ( 'Trying to order already ordered request' ) operation = request . operation req_handler = self . get_req_handler ( txn_type = operation [ TXN_TYPE ] ) req_handler . validate ( request ) self . execute_hook ( NodeHooks . POST_DYNAMIC_VALIDATION , request = request )
State based validation
236
3
228,220
def applyReq ( self , request : Request , cons_time : int ) : self . execute_hook ( NodeHooks . PRE_REQUEST_APPLICATION , request = request , cons_time = cons_time ) req_handler = self . get_req_handler ( txn_type = request . operation [ TXN_TYPE ] ) seq_no , txn = req_handler . apply ( request , cons_time ) ledger_id = self . ledger_id_for_request ( request ) self . execute_hook ( NodeHooks . POST_REQUEST_APPLICATION , request = request , cons_time = cons_time , ledger_id = ledger_id , seq_no = seq_no , txn = txn )
Apply request to appropriate ledger and state . cons_time is the UTC epoch at which consensus was reached .
164
21
228,221
def processRequest ( self , request : Request , frm : str ) : logger . debug ( "{} received client request: {} from {}" . format ( self . name , request , frm ) ) self . nodeRequestSpikeMonitorData [ 'accum' ] += 1 # TODO: What if client sends requests with same request id quickly so # before reply for one is generated, the other comes. In that # case we need to keep track of what requests ids node has seen # in-memory and once request with a particular request id is processed, # it should be removed from that in-memory DS. # If request is already processed(there is a reply for the # request in # the node's transaction store then return the reply from the # transaction store) # TODO: What if the reply was a REQNACK? Its not gonna be found in the # replies. txn_type = request . operation [ TXN_TYPE ] if self . is_action ( txn_type ) : self . process_action ( request , frm ) elif txn_type == GET_TXN : self . handle_get_txn_req ( request , frm ) self . total_read_request_number += 1 elif self . is_query ( txn_type ) : self . process_query ( request , frm ) self . total_read_request_number += 1 elif self . can_write_txn ( txn_type ) : reply = self . getReplyFromLedgerForRequest ( request ) if reply : logger . debug ( "{} returning reply from already processed " "REQUEST: {}" . format ( self , request ) ) self . transmitToClient ( reply , frm ) return # If the node is not already processing the request if not self . isProcessingReq ( request . key ) : self . startedProcessingReq ( request . key , frm ) # forced request should be processed before consensus self . handle_request_if_forced ( request ) # If not already got the propagate request(PROPAGATE) for the # corresponding client request(REQUEST) self . recordAndPropagate ( request , frm ) self . send_ack_to_client ( ( request . identifier , request . reqId ) , frm ) else : raise InvalidClientRequest ( request . identifier , request . reqId , 'Pool is in readonly mode, try again in 60 seconds' )
Handle a REQUEST from the client . If the request has already been executed the node re - sends the reply to the client . Otherwise the node acknowledges the client request adds it to its list of client requests and sends a PROPAGATE to the remaining nodes .
523
53
228,222
def processPropagate ( self , msg : Propagate , frm ) : logger . debug ( "{} received propagated request: {}" . format ( self . name , msg ) ) request = TxnUtilConfig . client_request_class ( * * msg . request ) clientName = msg . senderClient if not self . isProcessingReq ( request . key ) : ledger_id , seq_no = self . seqNoDB . get_by_payload_digest ( request . payload_digest ) if ledger_id is not None and seq_no is not None : self . _clean_req_from_verified ( request ) logger . debug ( "{} ignoring propagated request {} " "since it has been already ordered" . format ( self . name , msg ) ) return self . startedProcessingReq ( request . key , clientName ) # forced request should be processed before consensus self . handle_request_if_forced ( request ) else : if clientName is not None and not self . is_sender_known_for_req ( request . key ) : # Since some propagates might not include the client name self . set_sender_for_req ( request . key , clientName ) self . requests . add_propagate ( request , frm ) self . propagate ( request , clientName ) self . tryForwarding ( request )
Process one propagateRequest sent to this node asynchronously
295
11
228,223
def handle_get_txn_req ( self , request : Request , frm : str ) : ledger_id = request . operation . get ( f . LEDGER_ID . nm , DOMAIN_LEDGER_ID ) if ledger_id not in self . ledger_to_req_handler : self . send_nack_to_client ( ( request . identifier , request . reqId ) , 'Invalid ledger id {}' . format ( ledger_id ) , frm ) return seq_no = request . operation . get ( DATA ) self . send_ack_to_client ( ( request . identifier , request . reqId ) , frm ) ledger = self . getLedger ( ledger_id ) try : txn = self . getReplyFromLedger ( ledger , seq_no ) except KeyError : txn = None if txn is None : logger . debug ( "{} can not handle GET_TXN request: ledger doesn't " "have txn with seqNo={}" . format ( self , str ( seq_no ) ) ) result = { f . IDENTIFIER . nm : request . identifier , f . REQ_ID . nm : request . reqId , TXN_TYPE : request . operation [ TXN_TYPE ] , DATA : None } if txn : result [ DATA ] = txn . result result [ f . SEQ_NO . nm ] = get_seq_no ( txn . result ) self . transmitToClient ( Reply ( result ) , frm )
Handle GET_TXN request
326
6
228,224
def processOrdered ( self , ordered : Ordered ) : if ordered . instId not in self . instances . ids : logger . warning ( '{} got ordered request for instance {} which ' 'does not exist' . format ( self , ordered . instId ) ) return False if ordered . instId != self . instances . masterId : # Requests from backup replicas are not executed logger . trace ( "{} got ordered requests from backup replica {}" . format ( self , ordered . instId ) ) with self . metrics . measure_time ( MetricsName . MONITOR_REQUEST_ORDERED_TIME ) : self . monitor . requestOrdered ( ordered . valid_reqIdr + ordered . invalid_reqIdr , ordered . instId , self . requests , byMaster = False ) return False logger . trace ( "{} got ordered requests from master replica" . format ( self ) ) logger . debug ( "{} executing Ordered batch {} {} of {} requests; state root {}; txn root {}" . format ( self . name , ordered . viewNo , ordered . ppSeqNo , len ( ordered . valid_reqIdr ) , ordered . stateRootHash , ordered . txnRootHash ) ) three_pc_batch = ThreePcBatch . from_ordered ( ordered ) if self . db_manager . ledgers [ AUDIT_LEDGER_ID ] . uncommittedRootHash is None : # if we order request during view change # in between catchup rounds, then the 3PC batch will not be applied, # since it was reverted before catchup started, and only COMMITs were # processed in between catchup that led to this ORDERED msg logger . info ( "{} applying stashed requests for batch {} {} of {} requests; state root {}; txn root {}" . format ( self . name , three_pc_batch . view_no , three_pc_batch . pp_seq_no , len ( three_pc_batch . valid_digests ) , three_pc_batch . state_root , three_pc_batch . txn_root ) ) self . apply_stashed_reqs ( three_pc_batch ) self . executeBatch ( three_pc_batch , ordered . valid_reqIdr , ordered . invalid_reqIdr , ordered . auditTxnRootHash ) with self . metrics . measure_time ( MetricsName . MONITOR_REQUEST_ORDERED_TIME ) : self . monitor . requestOrdered ( ordered . valid_reqIdr + ordered . invalid_reqIdr , ordered . instId , self . requests , byMaster = True ) return True
Execute ordered request
572
4
228,225
def force_process_ordered ( self ) : for instance_id , messages in self . replicas . take_ordereds_out_of_turn ( ) : num_processed = 0 for message in messages : self . try_processing_ordered ( message ) num_processed += 1 logger . info ( '{} processed {} Ordered batches for instance {} ' 'before starting catch up' . format ( self , num_processed , instance_id ) )
Take any messages from replica that have been ordered and process them this should be done rarely like before catchup starts so a more current LedgerStatus can be sent . can be called either 1 . when node is participating this happens just before catchup starts so the node can have the latest ledger status or 2 . when node is not participating but a round of catchup is about to be started here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks
99
107
228,226
def processEscalatedException ( self , ex ) : if isinstance ( ex , SuspiciousNode ) : self . reportSuspiciousNodeEx ( ex ) else : raise RuntimeError ( "unhandled replica-escalated exception" ) from ex
Process an exception escalated from a Replica
52
8
228,227
def lost_master_primary ( self ) : self . primaries_disconnection_times [ self . master_replica . instId ] = time . perf_counter ( ) self . _schedule_view_change ( )
Schedule an primary connection check which in turn can send a view change message
48
15
228,228
def addNewRole ( self , txn ) : # If the client authenticator is a simple authenticator then add verkey. # For a custom authenticator, handle appropriately. # NOTE: The following code should not be used in production if isinstance ( self . clientAuthNr . core_authenticator , SimpleAuthNr ) : txn_data = get_payload_data ( txn ) identifier = txn_data [ TARGET_NYM ] verkey = txn_data . get ( VERKEY ) v = DidVerifier ( verkey , identifier = identifier ) if identifier not in self . clientAuthNr . core_authenticator . clients : role = txn_data . get ( ROLE ) if role not in ( STEWARD , TRUSTEE , None ) : logger . debug ( "Role if present must be {} and not {}" . format ( Roles . STEWARD . name , role ) ) return self . clientAuthNr . core_authenticator . addIdr ( identifier , verkey = v . verkey , role = role )
Adds a new client or steward to this node based on transaction type .
232
15
228,229
def ensureKeysAreSetup ( self ) : if not areKeysSetup ( self . name , self . keys_dir ) : raise REx ( REx . reason . format ( self . name ) + self . keygenScript )
Check whether the keys are setup in the local STP keep . Raises KeysNotFoundException if not found .
48
23
228,230
def reportSuspiciousNodeEx ( self , ex : SuspiciousNode ) : self . reportSuspiciousNode ( ex . node , ex . reason , ex . code , ex . offendingMsg )
Report suspicion on a node on the basis of an exception
42
11
228,231
def reportSuspiciousNode ( self , nodeName : str , reason = None , code : int = None , offendingMsg = None ) : logger . warning ( "{} raised suspicion on node {} for {}; suspicion code " "is {}" . format ( self , nodeName , reason , code ) ) # TODO need a more general solution here # TODO: Should not blacklist client on a single InvalidSignature. # Should track if a lot of requests with incorrect signatures have been # made in a short amount of time, only then blacklist client. # if code == InvalidSignature.code: # self.blacklistNode(nodeName, # reason=InvalidSignature.reason, # code=InvalidSignature.code) # TODO: Consider blacklisting nodes again. # if code in self.suspicions: # self.blacklistNode(nodeName, # reason=self.suspicions[code], # code=code) if code in ( s . code for s in ( Suspicions . PPR_DIGEST_WRONG , Suspicions . PPR_REJECT_WRONG , Suspicions . PPR_TXN_WRONG , Suspicions . PPR_STATE_WRONG , Suspicions . PPR_PLUGIN_EXCEPTION , Suspicions . PPR_SUB_SEQ_NO_WRONG , Suspicions . PPR_NOT_FINAL , Suspicions . PPR_WITH_ORDERED_REQUEST , Suspicions . PPR_AUDIT_TXN_ROOT_HASH_WRONG , Suspicions . PPR_BLS_MULTISIG_WRONG , Suspicions . PPR_TIME_WRONG , ) ) : logger . display ( '{}{} got one of primary suspicions codes {}' . format ( VIEW_CHANGE_PREFIX , self , code ) ) self . view_changer . on_suspicious_primary ( Suspicions . get_by_code ( code ) ) if offendingMsg : self . discard ( offendingMsg , reason , logger . debug )
Report suspicion on a node and add it to this node s blacklist .
458
14
228,232
def reportSuspiciousClient ( self , clientName : str , reason ) : logger . warning ( "{} raised suspicion on client {} for {}" . format ( self , clientName , reason ) ) self . blacklistClient ( clientName )
Report suspicion on a client and add it to this node s blacklist .
50
14
228,233
def blacklistClient ( self , clientName : str , reason : str = None , code : int = None ) : msg = "{} blacklisting client {}" . format ( self , clientName ) if reason : msg += " for reason {}" . format ( reason ) logger . display ( msg ) self . clientBlacklister . blacklist ( clientName )
Add the client specified by clientName to this node s blacklist
74
12
228,234
def blacklistNode ( self , nodeName : str , reason : str = None , code : int = None ) : msg = "{} blacklisting node {}" . format ( self , nodeName ) if reason : msg += " for reason {}" . format ( reason ) if code : msg += " for code {}" . format ( code ) logger . display ( msg ) self . nodeBlacklister . blacklist ( nodeName )
Add the node specified by nodeName to this node s blacklist
89
12
228,235
def logstats ( self ) : lines = [ "node {} current stats" . format ( self ) , "--------------------------------------------------------" , "node inbox size : {}" . format ( len ( self . nodeInBox ) ) , "client inbox size : {}" . format ( len ( self . clientInBox ) ) , "age (seconds) : {}" . format ( time . time ( ) - self . created ) , "next check for reconnect: {}" . format ( time . perf_counter ( ) - self . nodestack . nextCheck ) , "node connections : {}" . format ( self . nodestack . conns ) , "f : {}" . format ( self . f ) , "master instance : {}" . format ( self . instances . masterId ) , "replicas : {}" . format ( len ( self . replicas ) ) , "view no : {}" . format ( self . viewNo ) , "rank : {}" . format ( self . rank ) , "msgs to replicas : {}" . format ( self . replicas . sum_inbox_len ) , "msgs to view changer : {}" . format ( len ( self . msgsToViewChanger ) ) , "action queue : {} {}" . format ( len ( self . actionQueue ) , id ( self . actionQueue ) ) , "action queue stash : {} {}" . format ( len ( self . aqStash ) , id ( self . aqStash ) ) , ] logger . info ( "\n" . join ( lines ) , extra = { "cli" : False } )
Print the node s current statistics to log .
346
9
228,236
def logNodeInfo ( self ) : self . nodeInfo [ 'data' ] = self . collectNodeInfo ( ) with closing ( open ( os . path . join ( self . ledger_dir , 'node_info' ) , 'w' ) ) as logNodeInfoFile : logNodeInfoFile . write ( json . dumps ( self . nodeInfo [ 'data' ] ) )
Print the node s info to log for the REST backend to read .
82
14
228,237
def get_collection_sizes ( obj , collections : Optional [ Tuple ] = None , get_only_non_empty = False ) : from pympler import asizeof collections = collections or ( list , dict , set , deque , abc . Sized ) if not isinstance ( collections , tuple ) : collections = tuple ( collections ) result = [ ] for attr_name in dir ( obj ) : attr = getattr ( obj , attr_name ) if isinstance ( attr , collections ) and ( not get_only_non_empty or len ( attr ) > 0 ) : result . append ( ( attr_name , len ( attr ) , asizeof . asizeof ( attr , detail = 1 ) ) ) return result
Iterates over collections of the gives object and gives its byte size and number of items in collection
167
19
228,238
def returns_true_or_raises ( f ) : @ functools . wraps ( f ) def wrapped ( * args , * * kwargs ) : ret = f ( * args , * * kwargs ) if ret is not True : raise RuntimeError ( "Unexpected return value %r" % ret ) return True return wrapped
A safety net .
73
4
228,239
def backupIds ( self ) -> Sequence [ int ] : return [ id for id in self . started . keys ( ) if id != 0 ]
Return the list of replicas that don t belong to the master protocol instance
31
15
228,240
def _hasViewChangeQuorum ( self ) : # This method should just be present for master instance. num_of_ready_nodes = len ( self . _view_change_done ) diff = self . quorum - num_of_ready_nodes if diff > 0 : logger . info ( '{} needs {} ViewChangeDone messages' . format ( self , diff ) ) return False logger . info ( "{} got view change quorum ({} >= {})" . format ( self . name , num_of_ready_nodes , self . quorum ) ) return True
Checks whether n - f nodes completed view change and whether one of them is the next primary
126
19
228,241
def process_instance_change_msg ( self , instChg : InstanceChange , frm : str ) -> None : if frm not in self . provider . connected_nodes ( ) : self . provider . discard ( instChg , "received instance change request: {} from {} " "which is not in connected list: {}" . format ( instChg , frm , self . provider . connected_nodes ( ) ) , logger . info ) return logger . info ( "{} received instance change request: {} from {}" . format ( self , instChg , frm ) ) # TODO: add sender to blacklist? if not isinstance ( instChg . viewNo , int ) : self . provider . discard ( instChg , "{}field view_no has incorrect type: {}" . format ( VIEW_CHANGE_PREFIX , type ( instChg . viewNo ) ) ) elif instChg . viewNo <= self . view_no : self . provider . discard ( instChg , "Received instance change request with view no {} " "which is not more than its view no {}" . format ( instChg . viewNo , self . view_no ) , logger . info ) else : # Record instance changes for views but send instance change # only when found master to be degraded. if quorum of view changes # found then change view even if master not degraded self . _on_verified_instance_change_msg ( instChg , frm ) if self . instance_changes . has_inst_chng_from ( instChg . viewNo , self . name ) : logger . info ( "{} received instance change message {} but has already " "sent an instance change message" . format ( self , instChg ) ) elif not self . provider . is_master_degraded ( ) : logger . info ( "{} received instance change message {} but did not " "find the master to be slow" . format ( self , instChg ) ) else : logger . display ( "{}{} found master degraded after receiving instance change" " message from {}" . format ( VIEW_CHANGE_PREFIX , self , frm ) ) self . sendInstanceChange ( instChg . viewNo )
Validate and process an instance change request .
485
9
228,242
def process_vchd_msg ( self , msg : ViewChangeDone , sender : str ) -> bool : logger . info ( "{}'s primary selector started processing of ViewChangeDone msg from {} : {}" . format ( self . name , sender , msg ) ) view_no = msg . viewNo if self . view_no != view_no : self . provider . discard ( msg , '{} got Primary from {} for view no {} ' 'whereas current view no is {}' . format ( self , sender , view_no , self . view_no ) , logMethod = logger . info ) return False new_primary_name = msg . name if new_primary_name == self . previous_master_primary : self . provider . discard ( msg , '{} got Primary from {} for {} who was primary of ' 'master in previous view too' . format ( self , sender , new_primary_name ) , logMethod = logger . info ) return False # Since a node can send ViewChangeDone more than one time self . _on_verified_view_change_done_msg ( msg , sender ) # TODO why do we check that after the message tracking if self . provider . has_primary ( ) : self . provider . discard ( msg , "it already decided primary which is {}" . format ( self . provider . current_primary_name ( ) ) , logger . info ) return False self . _start_selection ( )
Processes ViewChangeDone messages . Once n - f messages have been received decides on a primary for specific replica .
311
23
228,243
def sendInstanceChange ( self , view_no : int , suspicion = Suspicions . PRIMARY_DEGRADED ) : # If not found any sent instance change messages in last # `ViewChangeWindowSize` seconds or the last sent instance change # message was sent long enough ago then instance change message can be # sent otherwise no. canSendInsChange , cooldown = self . insChngThrottler . acquire ( ) if canSendInsChange : logger . info ( "{}{} sending an instance change with view_no {}" " since {}" . format ( VIEW_CHANGE_PREFIX , self , view_no , suspicion . reason ) ) logger . info ( "{}{} metrics for monitor: {}" . format ( MONITORING_PREFIX , self , self . provider . pretty_metrics ( ) ) ) msg = self . _create_instance_change_msg ( view_no , suspicion . code ) self . send ( msg ) # record instance change vote for self and try to change the view # if quorum is reached self . _on_verified_instance_change_msg ( msg , self . name ) else : logger . info ( "{} cannot send instance change sooner then {} seconds" . format ( self , cooldown ) )
Broadcast an instance change request to all the remaining nodes
272
11
228,244
def _canViewChange ( self , proposedViewNo : int ) -> ( bool , str ) : msg = None quorum = self . quorums . view_change . value if not self . instance_changes . has_quorum ( proposedViewNo , quorum ) : msg = '{} has no quorum for view {}' . format ( self , proposedViewNo ) elif not proposedViewNo > self . view_no : msg = '{} is in higher view more than {}' . format ( self , proposedViewNo ) return not bool ( msg ) , msg
Return whether there s quorum for view change for the proposed view number and its view is less than or equal to the proposed view
124
26
228,245
def start_view_change ( self , proposed_view_no : int , continue_vc = False ) : # TODO: consider moving this to pool manager # TODO: view change is a special case, which can have different # implementations - we need to make this logic pluggable if self . pre_vc_strategy and ( not continue_vc ) : self . pre_view_change_in_progress = True self . pre_vc_strategy . prepare_view_change ( proposed_view_no ) return elif self . pre_vc_strategy : self . pre_vc_strategy . on_strategy_complete ( ) self . previous_view_no = self . view_no self . view_no = proposed_view_no self . pre_view_change_in_progress = False self . view_change_in_progress = True self . previous_master_primary = self . provider . current_primary_name ( ) self . set_defaults ( ) self . _process_vcd_for_future_view ( ) self . initInsChngThrottling ( ) self . provider . notify_view_change_start ( ) self . provider . start_catchup ( )
Trigger the view change process .
266
6
228,246
def _verify_primary ( self , new_primary , ledger_info ) : expected_primary = self . provider . next_primary_name ( ) if new_primary != expected_primary : logger . error ( "{}{} expected next primary to be {}, but majority " "declared {} instead for view {}" . format ( PRIMARY_SELECTION_PREFIX , self . name , expected_primary , new_primary , self . view_no ) ) return False self . _primary_verified = True return True
This method is called when sufficient number of ViewChangeDone received and makes steps to switch to the new primary
113
21
228,247
def _send_view_change_done_message ( self ) : new_primary_name = self . provider . next_primary_name ( ) ledger_summary = self . provider . ledger_summary ( ) message = ViewChangeDone ( self . view_no , new_primary_name , ledger_summary ) logger . info ( "{} is sending ViewChangeDone msg to all : {}" . format ( self , message ) ) self . send ( message ) self . _on_verified_view_change_done_msg ( message , self . name )
Sends ViewChangeDone message to other protocol participants
119
10
228,248
def get_msgs_for_lagged_nodes ( self ) -> List [ ViewChangeDone ] : # Should not return a list, only done for compatibility with interface # TODO: Consider a case where more than one node joins immediately, # then one of the node might not have an accepted # ViewChangeDone message messages = [ ] accepted = self . _accepted_view_change_done_message if accepted : messages . append ( ViewChangeDone ( self . last_completed_view_no , * accepted ) ) elif self . name in self . _view_change_done : messages . append ( ViewChangeDone ( self . last_completed_view_no , * self . _view_change_done [ self . name ] ) ) else : logger . info ( '{} has no ViewChangeDone message to send for view {}' . format ( self , self . view_no ) ) return messages
Returns the last accepted ViewChangeDone message . If no view change has happened returns ViewChangeDone with view no 0 to a newly joined node
198
28
228,249
def validate ( self , val ) : if self . nullable and val is None : return type_er = self . __type_check ( val ) if type_er : return type_er spec_err = self . _specific_validation ( val ) if spec_err : return spec_err
Performs basic validation of field value and then passes it for specific validation .
64
15
228,250
def sign ( self , msg : Dict ) -> Dict : ser = serialize_msg_for_signing ( msg , topLevelKeysToIgnore = [ f . SIG . nm ] ) bsig = self . naclSigner . signature ( ser ) sig = base58 . b58encode ( bsig ) . decode ( "utf-8" ) return sig
Return a signature for the given message .
83
8
228,251
def lastPrePrepareSeqNo ( self , n ) : if n > self . _lastPrePrepareSeqNo : self . _lastPrePrepareSeqNo = n else : self . logger . debug ( '{} cannot set lastPrePrepareSeqNo to {} as its ' 'already {}' . format ( self , n , self . _lastPrePrepareSeqNo ) )
This will _lastPrePrepareSeqNo to values greater than its previous values else it will not . To forcefully override as in case of revert directly set self . _lastPrePrepareSeqNo
89
42
228,252
def primaryName ( self , value : Optional [ str ] ) -> None : if value is not None : self . warned_no_primary = False self . primaryNames [ self . viewNo ] = value self . compact_primary_names ( ) if value != self . _primaryName : self . _primaryName = value self . logger . info ( "{} setting primaryName for view no {} to: {}" . format ( self , self . viewNo , value ) ) if value is None : # Since the GC needs to happen after a primary has been # decided. return self . _gc_before_new_view ( ) if self . __should_reset_watermarks_before_new_view ( ) : self . _reset_watermarks_before_new_view ( )
Set the value of isPrimary .
167
7
228,253
def get_lowest_probable_prepared_certificate_in_view ( self , view_no ) -> Optional [ int ] : # TODO: Naive implementation, dont need to iterate over the complete # data structures, fix this later seq_no_pp = SortedList ( ) # pp_seq_no of PRE-PREPAREs # pp_seq_no of PREPAREs with count of PREPAREs for each seq_no_p = set ( ) for ( v , p ) in self . prePreparesPendingPrevPP : if v == view_no : seq_no_pp . add ( p ) if v > view_no : break for ( v , p ) , pr in self . preparesWaitingForPrePrepare . items ( ) : if v == view_no and len ( pr ) >= self . quorums . prepare . value : seq_no_p . add ( p ) for n in seq_no_pp : if n in seq_no_p : return n return None
Return lowest pp_seq_no of the view for which can be prepared but choose from unprocessed PRE - PREPAREs and PREPAREs .
226
33
228,254
def is_primary_in_view ( self , viewNo : int ) -> Optional [ bool ] : if viewNo not in self . primaryNames : return False return self . primaryNames [ viewNo ] == self . name
Return whether this replica was primary in the given view
47
10
228,255
def processReqDuringBatch ( self , req : Request , cons_time : int ) : if self . isMaster : self . node . doDynamicValidation ( req ) self . node . applyReq ( req , cons_time )
This method will do dynamic validation and apply requests . If there is any errors during validation it would be raised
52
21
228,256
def serviceQueues ( self , limit = None ) : # TODO should handle SuspiciousNode here r = self . dequeue_pre_prepares ( ) r += self . inBoxRouter . handleAllSync ( self . inBox , limit ) r += self . send_3pc_batch ( ) r += self . _serviceActions ( ) return r
Process limit number of messages in the inBox .
78
10
228,257
def tryPrepare ( self , pp : PrePrepare ) : rv , msg = self . canPrepare ( pp ) if rv : self . doPrepare ( pp ) else : self . logger . debug ( "{} cannot send PREPARE since {}" . format ( self , msg ) )
Try to send the Prepare message if the PrePrepare message is ready to be passed into the Prepare phase .
65
22
228,258
def processPrepare ( self , prepare : Prepare , sender : str ) -> None : key = ( prepare . viewNo , prepare . ppSeqNo ) self . logger . debug ( "{} received PREPARE{} from {}" . format ( self , key , sender ) ) # TODO move this try/except up higher try : if self . validatePrepare ( prepare , sender ) : self . addToPrepares ( prepare , sender ) self . stats . inc ( TPCStat . PrepareRcvd ) self . logger . debug ( "{} processed incoming PREPARE {}" . format ( self , ( prepare . viewNo , prepare . ppSeqNo ) ) ) else : # TODO let's have isValidPrepare throw an exception that gets # handled and possibly logged higher self . logger . trace ( "{} cannot process incoming PREPARE" . format ( self ) ) except SuspiciousNode as ex : self . report_suspicious_node ( ex )
Validate and process the PREPARE specified . If validation is successful create a COMMIT and broadcast it .
207
22
228,259
def processCommit ( self , commit : Commit , sender : str ) -> None : self . logger . debug ( "{} received COMMIT{} from {}" . format ( self , ( commit . viewNo , commit . ppSeqNo ) , sender ) ) if self . validateCommit ( commit , sender ) : self . stats . inc ( TPCStat . CommitRcvd ) self . addToCommits ( commit , sender ) self . logger . debug ( "{} processed incoming COMMIT{}" . format ( self , ( commit . viewNo , commit . ppSeqNo ) ) )
Validate and process the COMMIT specified . If validation is successful return the message to the node .
128
20
228,260
def tryCommit ( self , prepare : Prepare ) : rv , reason = self . canCommit ( prepare ) if rv : self . doCommit ( prepare ) else : self . logger . debug ( "{} cannot send COMMIT since {}" . format ( self , reason ) )
Try to commit if the Prepare message is ready to be passed into the commit phase .
62
17
228,261
def tryOrder ( self , commit : Commit ) : canOrder , reason = self . canOrder ( commit ) if canOrder : self . logger . trace ( "{} returning request to node" . format ( self ) ) self . doOrder ( commit ) else : self . logger . debug ( "{} cannot return request to node: {}" . format ( self , reason ) ) return canOrder
Try to order if the Commit message is ready to be ordered .
82
13
228,262
def nonFinalisedReqs ( self , reqKeys : List [ Tuple [ str , int ] ] ) : return { key for key in reqKeys if not self . requests . is_finalised ( key ) }
Check if there are any requests which are not finalised i . e for which there are not enough PROPAGATEs
46
25
228,263
def _can_process_pre_prepare ( self , pre_prepare : PrePrepare , sender : str ) -> Optional [ int ] : # TODO: Check whether it is rejecting PRE-PREPARE from previous view # PRE-PREPARE should not be sent from non primary if not self . isMsgFromPrimary ( pre_prepare , sender ) : return PP_CHECK_NOT_FROM_PRIMARY # Already has a PRE-PREPARE with same 3 phase key if ( pre_prepare . viewNo , pre_prepare . ppSeqNo ) in self . prePrepares : return PP_CHECK_DUPLICATE if not self . is_pre_prepare_time_acceptable ( pre_prepare , sender ) : return PP_CHECK_WRONG_TIME if compare_3PC_keys ( ( pre_prepare . viewNo , pre_prepare . ppSeqNo ) , self . __last_pp_3pc ) > 0 : return PP_CHECK_OLD # ignore old pre-prepare if self . nonFinalisedReqs ( pre_prepare . reqIdr ) : return PP_CHECK_REQUEST_NOT_FINALIZED if not self . __is_next_pre_prepare ( pre_prepare . viewNo , pre_prepare . ppSeqNo ) : return PP_CHECK_NOT_NEXT if f . POOL_STATE_ROOT_HASH . nm in pre_prepare and pre_prepare . poolStateRootHash != self . stateRootHash ( POOL_LEDGER_ID ) : return PP_CHECK_INCORRECT_POOL_STATE_ROOT # BLS multi-sig: status = self . _bls_bft_replica . validate_pre_prepare ( pre_prepare , sender ) if status is not None : return status return None
Decide whether this replica is eligible to process a PRE - PREPARE .
411
16
228,264
def addToPrePrepares ( self , pp : PrePrepare ) -> None : key = ( pp . viewNo , pp . ppSeqNo ) self . prePrepares [ key ] = pp self . lastPrePrepareSeqNo = pp . ppSeqNo self . last_accepted_pre_prepare_time = pp . ppTime self . dequeue_prepares ( * key ) self . dequeue_commits ( * key ) self . stats . inc ( TPCStat . PrePrepareRcvd ) self . tryPrepare ( pp )
Add the specified PRE - PREPARE to this replica s list of received PRE - PREPAREs and try sending PREPARE
124
27
228,265
def canPrepare ( self , ppReq ) -> ( bool , str ) : if self . has_sent_prepare ( ppReq ) : return False , 'has already sent PREPARE for {}' . format ( ppReq ) return True , ''
Return whether the batch of requests in the PRE - PREPARE can proceed to the PREPARE step .
57
22
228,266
def validatePrepare ( self , prepare : Prepare , sender : str ) -> bool : key = ( prepare . viewNo , prepare . ppSeqNo ) primaryStatus = self . isPrimaryForMsg ( prepare ) ppReq = self . getPrePrepare ( * key ) # If a non primary replica and receiving a PREPARE request before a # PRE-PREPARE request, then proceed # PREPARE should not be sent from primary if self . isMsgFromPrimary ( prepare , sender ) : raise SuspiciousNode ( sender , Suspicions . PR_FRM_PRIMARY , prepare ) # If non primary replica if primaryStatus is False : if self . prepares . hasPrepareFrom ( prepare , sender ) : raise SuspiciousNode ( sender , Suspicions . DUPLICATE_PR_SENT , prepare ) # If PRE-PREPARE not received for the PREPARE, might be slow # network if not ppReq : self . enqueue_prepare ( prepare , sender ) self . _setup_last_ordered_for_non_master ( ) return False # If primary replica if primaryStatus is True : if self . prepares . hasPrepareFrom ( prepare , sender ) : raise SuspiciousNode ( sender , Suspicions . DUPLICATE_PR_SENT , prepare ) # If PRE-PREPARE was not sent for this PREPARE, certainly # malicious behavior elif not ppReq : raise SuspiciousNode ( sender , Suspicions . UNKNOWN_PR_SENT , prepare ) if primaryStatus is None and not ppReq : self . enqueue_prepare ( prepare , sender ) self . _setup_last_ordered_for_non_master ( ) return False if prepare . digest != ppReq . digest : raise SuspiciousNode ( sender , Suspicions . PR_DIGEST_WRONG , prepare ) elif prepare . stateRootHash != ppReq . stateRootHash : raise SuspiciousNode ( sender , Suspicions . PR_STATE_WRONG , prepare ) elif prepare . txnRootHash != ppReq . txnRootHash : raise SuspiciousNode ( sender , Suspicions . PR_TXN_WRONG , prepare ) elif prepare . auditTxnRootHash != ppReq . auditTxnRootHash : raise SuspiciousNode ( sender , Suspicions . PR_AUDIT_TXN_ROOT_HASH_WRONG , prepare ) try : self . execute_hook ( ReplicaHooks . VALIDATE_PR , prepare , ppReq ) except Exception as ex : self . logger . warning ( '{} encountered exception in replica ' 'hook {} : {}' . format ( self , ReplicaHooks . VALIDATE_PR , ex ) ) raise SuspiciousNode ( sender , Suspicions . PR_PLUGIN_EXCEPTION , prepare ) # BLS multi-sig: self . _bls_bft_replica . validate_prepare ( prepare , sender ) return True
Return whether the PREPARE specified is valid .
650
10
228,267
def addToPrepares ( self , prepare : Prepare , sender : str ) : # BLS multi-sig: self . _bls_bft_replica . process_prepare ( prepare , sender ) self . prepares . addVote ( prepare , sender ) self . dequeue_commits ( prepare . viewNo , prepare . ppSeqNo ) self . tryCommit ( prepare )
Add the specified PREPARE to this replica s list of received PREPAREs and try sending COMMIT
85
22
228,268
def canCommit ( self , prepare : Prepare ) -> ( bool , str ) : quorum = self . quorums . prepare . value if not self . prepares . hasQuorum ( prepare , quorum ) : return False , 'does not have prepare quorum for {}' . format ( prepare ) if self . hasCommitted ( prepare ) : return False , 'has already sent COMMIT for {}' . format ( prepare ) return True , ''
Return whether the specified PREPARE can proceed to the Commit step .
95
14
228,269
def validateCommit ( self , commit : Commit , sender : str ) -> bool : key = ( commit . viewNo , commit . ppSeqNo ) if not self . has_prepared ( key ) : self . enqueue_commit ( commit , sender ) return False if self . commits . hasCommitFrom ( commit , sender ) : raise SuspiciousNode ( sender , Suspicions . DUPLICATE_CM_SENT , commit ) # BLS multi-sig: pre_prepare = self . getPrePrepare ( commit . viewNo , commit . ppSeqNo ) why_not = self . _bls_bft_replica . validate_commit ( commit , sender , pre_prepare ) if why_not == BlsBftReplica . CM_BLS_SIG_WRONG : self . logger . warning ( "{} discard Commit message from " "{}:{}" . format ( self , sender , commit ) ) raise SuspiciousNode ( sender , Suspicions . CM_BLS_SIG_WRONG , commit ) elif why_not is not None : self . logger . warning ( "Unknown error code returned for bls commit " "validation {}" . format ( why_not ) ) return True
Return whether the COMMIT specified is valid .
270
9
228,270
def addToCommits ( self , commit : Commit , sender : str ) : # BLS multi-sig: self . _bls_bft_replica . process_commit ( commit , sender ) self . commits . addVote ( commit , sender ) self . tryOrder ( commit )
Add the specified COMMIT to this replica s list of received commit requests .
63
15
228,271
def canOrder ( self , commit : Commit ) -> Tuple [ bool , Optional [ str ] ] : quorum = self . quorums . commit . value if not self . commits . hasQuorum ( commit , quorum ) : return False , "no quorum ({}): {} commits where f is {}" . format ( quorum , commit , self . f ) key = ( commit . viewNo , commit . ppSeqNo ) if self . has_already_ordered ( * key ) : return False , "already ordered" if commit . ppSeqNo > 1 and not self . all_prev_ordered ( commit ) : viewNo , ppSeqNo = commit . viewNo , commit . ppSeqNo if viewNo not in self . stashed_out_of_order_commits : self . stashed_out_of_order_commits [ viewNo ] = { } self . stashed_out_of_order_commits [ viewNo ] [ ppSeqNo ] = commit self . startRepeating ( self . process_stashed_out_of_order_commits , self . config . PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL ) return False , "stashing {} since out of order" . format ( commit ) return True , None
Return whether the specified commitRequest can be returned to the node .
289
13
228,272
def all_prev_ordered ( self , commit : Commit ) : # TODO: This method does a lot of work, choose correct data # structures to make it efficient. viewNo , ppSeqNo = commit . viewNo , commit . ppSeqNo if self . last_ordered_3pc == ( viewNo , ppSeqNo - 1 ) : # Last ordered was in same view as this COMMIT return True # if some PREPAREs/COMMITs were completely missed in the same view toCheck = set ( ) toCheck . update ( set ( self . sentPrePrepares . keys ( ) ) ) toCheck . update ( set ( self . prePrepares . keys ( ) ) ) toCheck . update ( set ( self . prepares . keys ( ) ) ) toCheck . update ( set ( self . commits . keys ( ) ) ) for ( v , p ) in toCheck : if v < viewNo and ( v , p ) not in self . ordered : # Have commits from previous view that are unordered. return False if v == viewNo and p < ppSeqNo and ( v , p ) not in self . ordered : # If unordered commits are found with lower ppSeqNo then this # cannot be ordered. return False return True
Return True if all previous COMMITs have been ordered
271
11
228,273
def process_checkpoint ( self , msg : Checkpoint , sender : str ) -> bool : self . logger . info ( '{} processing checkpoint {} from {}' . format ( self , msg , sender ) ) result , reason = self . validator . validate_checkpoint_msg ( msg ) if result == DISCARD : self . discard ( msg , "{} discard message {} from {} " "with the reason: {}" . format ( self , msg , sender , reason ) , self . logger . trace ) elif result == PROCESS : self . _do_process_checkpoint ( msg , sender ) else : self . logger . debug ( "{} stashing checkpoint message {} with " "the reason: {}" . format ( self , msg , reason ) ) self . stasher . stash ( ( msg , sender ) , result ) return False return True
Process checkpoint messages
182
3
228,274
def _process_stashed_pre_prepare_for_time_if_possible ( self , key : Tuple [ int , int ] ) : self . logger . debug ( '{} going to process stashed PRE-PREPAREs with ' 'incorrect times' . format ( self ) ) q = self . quorums . f if len ( self . preparesWaitingForPrePrepare [ key ] ) > q : times = [ pr . ppTime for ( pr , _ ) in self . preparesWaitingForPrePrepare [ key ] ] most_common_time , freq = mostCommonElement ( times ) if self . quorums . timestamp . is_reached ( freq ) : self . logger . debug ( '{} found sufficient PREPAREs for the ' 'PRE-PREPARE{}' . format ( self , key ) ) stashed_pp = self . pre_prepares_stashed_for_incorrect_time pp , sender , done = stashed_pp [ key ] if done : self . logger . debug ( '{} already processed PRE-PREPARE{}' . format ( self , key ) ) return True # True is set since that will indicate to `is_pre_prepare_time_acceptable` # that sufficient PREPAREs are received stashed_pp [ key ] = ( pp , sender , True ) self . process_three_phase_msg ( pp , sender ) return True return False
Check if any PRE - PREPAREs that were stashed since their time was not acceptable can now be accepted since enough PREPAREs are received
319
31
228,275
def _remove_till_caught_up_3pc ( self , last_caught_up_3PC ) : outdated_pre_prepares = { } for key , pp in self . prePrepares . items ( ) : if compare_3PC_keys ( key , last_caught_up_3PC ) >= 0 : outdated_pre_prepares [ key ] = pp for key , pp in self . sentPrePrepares . items ( ) : if compare_3PC_keys ( key , last_caught_up_3PC ) >= 0 : outdated_pre_prepares [ key ] = pp self . logger . trace ( '{} going to remove messages for {} 3PC keys' . format ( self , len ( outdated_pre_prepares ) ) ) for key , pp in outdated_pre_prepares . items ( ) : self . batches . pop ( key , None ) self . sentPrePrepares . pop ( key , None ) self . prePrepares . pop ( key , None ) self . prepares . pop ( key , None ) self . commits . pop ( key , None ) self . _discard_ordered_req_keys ( pp )
Remove any 3 phase messages till the last ordered key and also remove any corresponding request keys
256
17
228,276
def _remove_ordered_from_queue ( self , last_caught_up_3PC = None ) : to_remove = [ ] for i , msg in enumerate ( self . outBox ) : if isinstance ( msg , Ordered ) and ( not last_caught_up_3PC or compare_3PC_keys ( ( msg . viewNo , msg . ppSeqNo ) , last_caught_up_3PC ) >= 0 ) : to_remove . append ( i ) self . logger . trace ( '{} going to remove {} Ordered messages from outbox' . format ( self , len ( to_remove ) ) ) # Removing Ordered from queue but returning `Ordered` in order that # they should be processed. removed = [ ] for i in reversed ( to_remove ) : removed . insert ( 0 , self . outBox [ i ] ) del self . outBox [ i ] return removed
Remove any Ordered that the replica might be sending to node which is less than or equal to last_caught_up_3PC if last_caught_up_3PC is passed else remove all ordered needed in catchup
203
48
228,277
def _remove_stashed_checkpoints ( self , till_3pc_key = None ) : if till_3pc_key is None : self . stashedRecvdCheckpoints . clear ( ) self . logger . info ( '{} removing all stashed checkpoints' . format ( self ) ) return for view_no in list ( self . stashedRecvdCheckpoints . keys ( ) ) : if view_no < till_3pc_key [ 0 ] : self . logger . info ( '{} removing stashed checkpoints for view {}' . format ( self , view_no ) ) del self . stashedRecvdCheckpoints [ view_no ] elif view_no == till_3pc_key [ 0 ] : for ( s , e ) in list ( self . stashedRecvdCheckpoints [ view_no ] . keys ( ) ) : if e <= till_3pc_key [ 1 ] : self . logger . info ( '{} removing stashed checkpoints: ' 'viewNo={}, seqNoStart={}, seqNoEnd={}' . format ( self , view_no , s , e ) ) del self . stashedRecvdCheckpoints [ view_no ] [ ( s , e ) ] if len ( self . stashedRecvdCheckpoints [ view_no ] ) == 0 : del self . stashedRecvdCheckpoints [ view_no ]
Remove stashed received checkpoints up to till_3pc_key if provided otherwise remove all stashed received checkpoints
300
22
228,278
def checkPortAvailable ( ha ) : # Not sure why OS would allow binding to one type and not other. # Checking for port available for TCP and UDP. sockTypes = ( socket . SOCK_DGRAM , socket . SOCK_STREAM ) for typ in sockTypes : sock = socket . socket ( socket . AF_INET , typ ) try : sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) sock . bind ( ha ) if typ == socket . SOCK_STREAM : l_onoff = 1 l_linger = 0 sock . setsockopt ( socket . SOL_SOCKET , socket . SO_LINGER , struct . pack ( 'ii' , l_onoff , l_linger ) ) except OSError as exc : if exc . errno in [ errno . EADDRINUSE , errno . EADDRNOTAVAIL , WS_SOCKET_BIND_ERROR_ALREADY_IN_USE , WS_SOCKET_BIND_ERROR_NOT_AVAILABLE ] : raise PortNotAvailable ( ha ) else : raise exc finally : sock . close ( )
Checks whether the given port is available
261
8
228,279
def evenCompare ( a : str , b : str ) -> bool : ab = a . encode ( 'utf-8' ) bb = b . encode ( 'utf-8' ) ac = crypto_hash_sha256 ( ab ) bc = crypto_hash_sha256 ( bb ) return ac < bc
A deterministic but more evenly distributed comparator than simple alphabetical . Useful when comparing consecutive strings and an even distribution is needed . Provides an even chance of returning true as often as false
67
37
228,280
def center_band ( close_data , high_data , low_data , period ) : tp = typical_price ( close_data , high_data , low_data ) cb = sma ( tp , period ) return cb
Center Band .
53
3
228,281
def simple_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) # Mean of Empty Slice RuntimeWarning doesn't affect output so it is # supressed with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" , category = RuntimeWarning ) sma = [ np . mean ( data [ idx - ( period - 1 ) : idx + 1 ] ) for idx in range ( 0 , len ( data ) ) ] sma = fill_for_noncomputable_vals ( data , sma ) return sma
Simple Moving Average .
130
4
228,282
def average_true_range_percent ( close_data , period ) : catch_errors . check_for_period_error ( close_data , period ) atrp = ( atr ( close_data , period ) / np . array ( close_data ) ) * 100 return atrp
Average True Range Percent .
64
5
228,283
def on_balance_volume ( close_data , volume ) : catch_errors . check_for_input_len_diff ( close_data , volume ) obv = np . zeros ( len ( volume ) ) obv [ 0 ] = 1 for idx in range ( 1 , len ( obv ) ) : if close_data [ idx ] > close_data [ idx - 1 ] : obv [ idx ] = obv [ idx - 1 ] + volume [ idx ] elif close_data [ idx ] < close_data [ idx - 1 ] : obv [ idx ] = obv [ idx - 1 ] - volume [ idx ] elif close_data [ idx ] == close_data [ idx - 1 ] : obv [ idx ] = obv [ idx - 1 ] return obv
On Balance Volume .
188
4
228,284
def rate_of_change ( data , period ) : catch_errors . check_for_period_error ( data , period ) rocs = [ ( ( data [ idx ] - data [ idx - ( period - 1 ) ] ) / data [ idx - ( period - 1 ) ] ) * 100 for idx in range ( period - 1 , len ( data ) ) ] rocs = fill_for_noncomputable_vals ( data , rocs ) return rocs
Rate of Change .
104
4
228,285
def average_true_range ( close_data , period ) : tr = true_range ( close_data , period ) atr = smoothed_moving_average ( tr , period ) atr [ 0 : period - 1 ] = tr [ 0 : period - 1 ] return atr
Average True Range .
61
4
228,286
def relative_strength_index ( data , period ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) changes = [ data_tup [ 1 ] - data_tup [ 0 ] for data_tup in zip ( data [ : : 1 ] , data [ 1 : : 1 ] ) ] filtered_gain = [ val < 0 for val in changes ] gains = [ 0 if filtered_gain [ idx ] is True else changes [ idx ] for idx in range ( 0 , len ( filtered_gain ) ) ] filtered_loss = [ val > 0 for val in changes ] losses = [ 0 if filtered_loss [ idx ] is True else abs ( changes [ idx ] ) for idx in range ( 0 , len ( filtered_loss ) ) ] avg_gain = np . mean ( gains [ : period ] ) avg_loss = np . mean ( losses [ : period ] ) rsi = [ ] if avg_loss == 0 : rsi . append ( 100 ) else : rs = avg_gain / avg_loss rsi . append ( 100 - ( 100 / ( 1 + rs ) ) ) for idx in range ( 1 , len ( data ) - period ) : avg_gain = ( ( avg_gain * ( period - 1 ) + gains [ idx + ( period - 1 ) ] ) / period ) avg_loss = ( ( avg_loss * ( period - 1 ) + losses [ idx + ( period - 1 ) ] ) / period ) if avg_loss == 0 : rsi . append ( 100 ) else : rs = avg_gain / avg_loss rsi . append ( 100 - ( 100 / ( 1 + rs ) ) ) rsi = fill_for_noncomputable_vals ( data , rsi ) return rsi
Relative Strength Index .
393
5
228,287
def vertical_horizontal_filter ( data , period ) : catch_errors . check_for_period_error ( data , period ) vhf = [ abs ( np . max ( data [ idx + 1 - period : idx + 1 ] ) - np . min ( data [ idx + 1 - period : idx + 1 ] ) ) / sum ( [ abs ( data [ idx + 1 - period : idx + 1 ] [ i ] - data [ idx + 1 - period : idx + 1 ] [ i - 1 ] ) for i in range ( 0 , len ( data [ idx + 1 - period : idx + 1 ] ) ) ] ) for idx in range ( period - 1 , len ( data ) ) ] vhf = fill_for_noncomputable_vals ( data , vhf ) return vhf
Vertical Horizontal Filter .
187
6
228,288
def buying_pressure ( close_data , low_data ) : catch_errors . check_for_input_len_diff ( close_data , low_data ) bp = [ close_data [ idx ] - np . min ( [ low_data [ idx ] , close_data [ idx - 1 ] ] ) for idx in range ( 1 , len ( close_data ) ) ] bp = fill_for_noncomputable_vals ( close_data , bp ) return bp
Buying Pressure .
111
4
228,289
def ultimate_oscillator ( close_data , low_data ) : a7 = 4 * average_7 ( close_data , low_data ) a14 = 2 * average_14 ( close_data , low_data ) a28 = average_28 ( close_data , low_data ) uo = 100 * ( ( a7 + a14 + a28 ) / 7 ) return uo
Ultimate Oscillator .
87
5
228,290
def aroon_up ( data , period ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) a_up = [ ( ( period - list ( reversed ( data [ idx + 1 - period : idx + 1 ] ) ) . index ( np . max ( data [ idx + 1 - period : idx + 1 ] ) ) ) / float ( period ) ) * 100 for idx in range ( period - 1 , len ( data ) ) ] a_up = fill_for_noncomputable_vals ( data , a_up ) return a_up
Aroon Up .
135
5
228,291
def aroon_down ( data , period ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) a_down = [ ( ( period - list ( reversed ( data [ idx + 1 - period : idx + 1 ] ) ) . index ( np . min ( data [ idx + 1 - period : idx + 1 ] ) ) ) / float ( period ) ) * 100 for idx in range ( period - 1 , len ( data ) ) ] a_down = fill_for_noncomputable_vals ( data , a_down ) return a_down
Aroon Down .
135
5
228,292
def upper_price_channel ( data , period , upper_percent ) : catch_errors . check_for_period_error ( data , period ) emas = ema ( data , period ) upper_channel = [ val * ( 1 + float ( upper_percent ) / 100 ) for val in emas ] return upper_channel
Upper Price Channel .
71
5
228,293
def lower_price_channel ( data , period , lower_percent ) : catch_errors . check_for_period_error ( data , period ) emas = ema ( data , period ) lower_channel = [ val * ( 1 - float ( lower_percent ) / 100 ) for val in emas ] return lower_channel
Lower Price Channel .
71
4
228,294
def exponential_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) emas = [ exponential_moving_average_helper ( data [ idx - period + 1 : idx + 1 ] , period ) for idx in range ( period - 1 , len ( data ) ) ] emas = fill_for_noncomputable_vals ( data , emas ) return emas
Exponential Moving Average .
95
5
228,295
def commodity_channel_index ( close_data , high_data , low_data , period ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data ) catch_errors . check_for_period_error ( close_data , period ) tp = typical_price ( close_data , high_data , low_data ) cci = ( ( tp - sma ( tp , period ) ) / ( 0.015 * np . mean ( np . absolute ( tp - np . mean ( tp ) ) ) ) ) return cci
Commodity Channel Index .
133
6
228,296
def williams_percent_r ( close_data ) : highest_high = np . max ( close_data ) lowest_low = np . min ( close_data ) wr = [ ( ( highest_high - close ) / ( highest_high - lowest_low ) ) * - 100 for close in close_data ] return wr
Williams %R .
72
4
228,297
def moving_average_convergence_divergence ( data , short_period , long_period ) : catch_errors . check_for_period_error ( data , short_period ) catch_errors . check_for_period_error ( data , long_period ) macd = ema ( data , short_period ) - ema ( data , long_period ) return macd
Moving Average Convergence Divergence .
85
7
228,298
def money_flow_index ( close_data , high_data , low_data , volume , period ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data , volume ) catch_errors . check_for_period_error ( close_data , period ) mf = money_flow ( close_data , high_data , low_data , volume ) tp = typical_price ( close_data , high_data , low_data ) flow = [ tp [ idx ] > tp [ idx - 1 ] for idx in range ( 1 , len ( tp ) ) ] pf = [ mf [ idx ] if flow [ idx ] else 0 for idx in range ( 0 , len ( flow ) ) ] nf = [ mf [ idx ] if not flow [ idx ] else 0 for idx in range ( 0 , len ( flow ) ) ] pmf = [ sum ( pf [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( pf ) ) ] nmf = [ sum ( nf [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( nf ) ) ] # Dividing by 0 is not an issue, it turns the value into NaN which we would # want in that case with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" , category = RuntimeWarning ) money_ratio = np . array ( pmf ) / np . array ( nmf ) mfi = 100 - ( 100 / ( 1 + money_ratio ) ) mfi = fill_for_noncomputable_vals ( close_data , mfi ) return mfi
Money Flow Index .
396
4
228,299
def typical_price ( close_data , high_data , low_data ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data ) tp = [ ( high_data [ idx ] + low_data [ idx ] + close_data [ idx ] ) / 3 for idx in range ( 0 , len ( close_data ) ) ] return np . array ( tp )
Typical Price .
99
4