idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
40,300
def blacklistNode ( self , nodeName : str , reason : str = None , code : int = None ) : msg = "{} blacklisting node {}" . format ( self , nodeName ) if reason : msg += " for reason {}" . format ( reason ) if code : msg += " for code {}" . format ( code ) logger . display ( msg ) self . nodeBlacklister . blacklist ( nodeName )
Add the node specified by nodeName to this node s blacklist
40,301
def logstats ( self ) : lines = [ "node {} current stats" . format ( self ) , "--------------------------------------------------------" , "node inbox size : {}" . format ( len ( self . nodeInBox ) ) , "client inbox size : {}" . format ( len ( self . clientInBox ) ) , "age (seconds) : {}" . format ( time . time ( ) - self . created ) , "next check for reconnect: {}" . format ( time . perf_counter ( ) - self . nodestack . nextCheck ) , "node connections : {}" . format ( self . nodestack . conns ) , "f : {}" . format ( self . f ) , "master instance : {}" . format ( self . instances . masterId ) , "replicas : {}" . format ( len ( self . replicas ) ) , "view no : {}" . format ( self . viewNo ) , "rank : {}" . format ( self . rank ) , "msgs to replicas : {}" . format ( self . replicas . sum_inbox_len ) , "msgs to view changer : {}" . format ( len ( self . msgsToViewChanger ) ) , "action queue : {} {}" . format ( len ( self . actionQueue ) , id ( self . actionQueue ) ) , "action queue stash : {} {}" . format ( len ( self . aqStash ) , id ( self . aqStash ) ) , ] logger . info ( "\n" . join ( lines ) , extra = { "cli" : False } )
Print the node s current statistics to log .
40,302
def logNodeInfo ( self ) : self . nodeInfo [ 'data' ] = self . collectNodeInfo ( ) with closing ( open ( os . path . join ( self . ledger_dir , 'node_info' ) , 'w' ) ) as logNodeInfoFile : logNodeInfoFile . write ( json . dumps ( self . nodeInfo [ 'data' ] ) )
Print the node s info to log for the REST backend to read .
40,303
def get_collection_sizes ( obj , collections : Optional [ Tuple ] = None , get_only_non_empty = False ) : from pympler import asizeof collections = collections or ( list , dict , set , deque , abc . Sized ) if not isinstance ( collections , tuple ) : collections = tuple ( collections ) result = [ ] for attr_name in dir ( obj ) : attr = getattr ( obj , attr_name ) if isinstance ( attr , collections ) and ( not get_only_non_empty or len ( attr ) > 0 ) : result . append ( ( attr_name , len ( attr ) , asizeof . asizeof ( attr , detail = 1 ) ) ) return result
Iterates over collections of the gives object and gives its byte size and number of items in collection
40,304
def returns_true_or_raises ( f ) : @ functools . wraps ( f ) def wrapped ( * args , ** kwargs ) : ret = f ( * args , ** kwargs ) if ret is not True : raise RuntimeError ( "Unexpected return value %r" % ret ) return True return wrapped
A safety net .
40,305
def backupIds ( self ) -> Sequence [ int ] : return [ id for id in self . started . keys ( ) if id != 0 ]
Return the list of replicas that don t belong to the master protocol instance
40,306
def _hasViewChangeQuorum ( self ) : num_of_ready_nodes = len ( self . _view_change_done ) diff = self . quorum - num_of_ready_nodes if diff > 0 : logger . info ( '{} needs {} ViewChangeDone messages' . format ( self , diff ) ) return False logger . info ( "{} got view change quorum ({} >= {})" . format ( self . name , num_of_ready_nodes , self . quorum ) ) return True
Checks whether n - f nodes completed view change and whether one of them is the next primary
40,307
def process_instance_change_msg ( self , instChg : InstanceChange , frm : str ) -> None : if frm not in self . provider . connected_nodes ( ) : self . provider . discard ( instChg , "received instance change request: {} from {} " "which is not in connected list: {}" . format ( instChg , frm , self . provider . connected_nodes ( ) ) , logger . info ) return logger . info ( "{} received instance change request: {} from {}" . format ( self , instChg , frm ) ) if not isinstance ( instChg . viewNo , int ) : self . provider . discard ( instChg , "{}field view_no has incorrect type: {}" . format ( VIEW_CHANGE_PREFIX , type ( instChg . viewNo ) ) ) elif instChg . viewNo <= self . view_no : self . provider . discard ( instChg , "Received instance change request with view no {} " "which is not more than its view no {}" . format ( instChg . viewNo , self . view_no ) , logger . info ) else : self . _on_verified_instance_change_msg ( instChg , frm ) if self . instance_changes . has_inst_chng_from ( instChg . viewNo , self . name ) : logger . info ( "{} received instance change message {} but has already " "sent an instance change message" . format ( self , instChg ) ) elif not self . provider . is_master_degraded ( ) : logger . info ( "{} received instance change message {} but did not " "find the master to be slow" . format ( self , instChg ) ) else : logger . display ( "{}{} found master degraded after receiving instance change" " message from {}" . format ( VIEW_CHANGE_PREFIX , self , frm ) ) self . sendInstanceChange ( instChg . viewNo )
Validate and process an instance change request .
40,308
def process_vchd_msg ( self , msg : ViewChangeDone , sender : str ) -> bool : logger . info ( "{}'s primary selector started processing of ViewChangeDone msg from {} : {}" . format ( self . name , sender , msg ) ) view_no = msg . viewNo if self . view_no != view_no : self . provider . discard ( msg , '{} got Primary from {} for view no {} ' 'whereas current view no is {}' . format ( self , sender , view_no , self . view_no ) , logMethod = logger . info ) return False new_primary_name = msg . name if new_primary_name == self . previous_master_primary : self . provider . discard ( msg , '{} got Primary from {} for {} who was primary of ' 'master in previous view too' . format ( self , sender , new_primary_name ) , logMethod = logger . info ) return False self . _on_verified_view_change_done_msg ( msg , sender ) if self . provider . has_primary ( ) : self . provider . discard ( msg , "it already decided primary which is {}" . format ( self . provider . current_primary_name ( ) ) , logger . info ) return False self . _start_selection ( )
Processes ViewChangeDone messages . Once n - f messages have been received decides on a primary for specific replica .
40,309
def sendInstanceChange ( self , view_no : int , suspicion = Suspicions . PRIMARY_DEGRADED ) : canSendInsChange , cooldown = self . insChngThrottler . acquire ( ) if canSendInsChange : logger . info ( "{}{} sending an instance change with view_no {}" " since {}" . format ( VIEW_CHANGE_PREFIX , self , view_no , suspicion . reason ) ) logger . info ( "{}{} metrics for monitor: {}" . format ( MONITORING_PREFIX , self , self . provider . pretty_metrics ( ) ) ) msg = self . _create_instance_change_msg ( view_no , suspicion . code ) self . send ( msg ) self . _on_verified_instance_change_msg ( msg , self . name ) else : logger . info ( "{} cannot send instance change sooner then {} seconds" . format ( self , cooldown ) )
Broadcast an instance change request to all the remaining nodes
40,310
def _canViewChange ( self , proposedViewNo : int ) -> ( bool , str ) : msg = None quorum = self . quorums . view_change . value if not self . instance_changes . has_quorum ( proposedViewNo , quorum ) : msg = '{} has no quorum for view {}' . format ( self , proposedViewNo ) elif not proposedViewNo > self . view_no : msg = '{} is in higher view more than {}' . format ( self , proposedViewNo ) return not bool ( msg ) , msg
Return whether there s quorum for view change for the proposed view number and its view is less than or equal to the proposed view
40,311
def start_view_change ( self , proposed_view_no : int , continue_vc = False ) : if self . pre_vc_strategy and ( not continue_vc ) : self . pre_view_change_in_progress = True self . pre_vc_strategy . prepare_view_change ( proposed_view_no ) return elif self . pre_vc_strategy : self . pre_vc_strategy . on_strategy_complete ( ) self . previous_view_no = self . view_no self . view_no = proposed_view_no self . pre_view_change_in_progress = False self . view_change_in_progress = True self . previous_master_primary = self . provider . current_primary_name ( ) self . set_defaults ( ) self . _process_vcd_for_future_view ( ) self . initInsChngThrottling ( ) self . provider . notify_view_change_start ( ) self . provider . start_catchup ( )
Trigger the view change process .
40,312
def _verify_primary ( self , new_primary , ledger_info ) : expected_primary = self . provider . next_primary_name ( ) if new_primary != expected_primary : logger . error ( "{}{} expected next primary to be {}, but majority " "declared {} instead for view {}" . format ( PRIMARY_SELECTION_PREFIX , self . name , expected_primary , new_primary , self . view_no ) ) return False self . _primary_verified = True return True
This method is called when sufficient number of ViewChangeDone received and makes steps to switch to the new primary
40,313
def _send_view_change_done_message ( self ) : new_primary_name = self . provider . next_primary_name ( ) ledger_summary = self . provider . ledger_summary ( ) message = ViewChangeDone ( self . view_no , new_primary_name , ledger_summary ) logger . info ( "{} is sending ViewChangeDone msg to all : {}" . format ( self , message ) ) self . send ( message ) self . _on_verified_view_change_done_msg ( message , self . name )
Sends ViewChangeDone message to other protocol participants
40,314
def get_msgs_for_lagged_nodes ( self ) -> List [ ViewChangeDone ] : messages = [ ] accepted = self . _accepted_view_change_done_message if accepted : messages . append ( ViewChangeDone ( self . last_completed_view_no , * accepted ) ) elif self . name in self . _view_change_done : messages . append ( ViewChangeDone ( self . last_completed_view_no , * self . _view_change_done [ self . name ] ) ) else : logger . info ( '{} has no ViewChangeDone message to send for view {}' . format ( self , self . view_no ) ) return messages
Returns the last accepted ViewChangeDone message . If no view change has happened returns ViewChangeDone with view no 0 to a newly joined node
40,315
def validate ( self , val ) : if self . nullable and val is None : return type_er = self . __type_check ( val ) if type_er : return type_er spec_err = self . _specific_validation ( val ) if spec_err : return spec_err
Performs basic validation of field value and then passes it for specific validation .
40,316
def sign ( self , msg : Dict ) -> Dict : ser = serialize_msg_for_signing ( msg , topLevelKeysToIgnore = [ f . SIG . nm ] ) bsig = self . naclSigner . signature ( ser ) sig = base58 . b58encode ( bsig ) . decode ( "utf-8" ) return sig
Return a signature for the given message .
40,317
def lastPrePrepareSeqNo ( self , n ) : if n > self . _lastPrePrepareSeqNo : self . _lastPrePrepareSeqNo = n else : self . logger . debug ( '{} cannot set lastPrePrepareSeqNo to {} as its ' 'already {}' . format ( self , n , self . _lastPrePrepareSeqNo ) )
This will _lastPrePrepareSeqNo to values greater than its previous values else it will not . To forcefully override as in case of revert directly set self . _lastPrePrepareSeqNo
40,318
def primaryName ( self , value : Optional [ str ] ) -> None : if value is not None : self . warned_no_primary = False self . primaryNames [ self . viewNo ] = value self . compact_primary_names ( ) if value != self . _primaryName : self . _primaryName = value self . logger . info ( "{} setting primaryName for view no {} to: {}" . format ( self , self . viewNo , value ) ) if value is None : return self . _gc_before_new_view ( ) if self . __should_reset_watermarks_before_new_view ( ) : self . _reset_watermarks_before_new_view ( )
Set the value of isPrimary .
40,319
def get_lowest_probable_prepared_certificate_in_view ( self , view_no ) -> Optional [ int ] : seq_no_pp = SortedList ( ) seq_no_p = set ( ) for ( v , p ) in self . prePreparesPendingPrevPP : if v == view_no : seq_no_pp . add ( p ) if v > view_no : break for ( v , p ) , pr in self . preparesWaitingForPrePrepare . items ( ) : if v == view_no and len ( pr ) >= self . quorums . prepare . value : seq_no_p . add ( p ) for n in seq_no_pp : if n in seq_no_p : return n return None
Return lowest pp_seq_no of the view for which can be prepared but choose from unprocessed PRE - PREPAREs and PREPAREs .
40,320
def is_primary_in_view ( self , viewNo : int ) -> Optional [ bool ] : if viewNo not in self . primaryNames : return False return self . primaryNames [ viewNo ] == self . name
Return whether this replica was primary in the given view
40,321
def processReqDuringBatch ( self , req : Request , cons_time : int ) : if self . isMaster : self . node . doDynamicValidation ( req ) self . node . applyReq ( req , cons_time )
This method will do dynamic validation and apply requests . If there is any errors during validation it would be raised
40,322
def serviceQueues ( self , limit = None ) : r = self . dequeue_pre_prepares ( ) r += self . inBoxRouter . handleAllSync ( self . inBox , limit ) r += self . send_3pc_batch ( ) r += self . _serviceActions ( ) return r
Process limit number of messages in the inBox .
40,323
def tryPrepare ( self , pp : PrePrepare ) : rv , msg = self . canPrepare ( pp ) if rv : self . doPrepare ( pp ) else : self . logger . debug ( "{} cannot send PREPARE since {}" . format ( self , msg ) )
Try to send the Prepare message if the PrePrepare message is ready to be passed into the Prepare phase .
40,324
def processPrepare ( self , prepare : Prepare , sender : str ) -> None : key = ( prepare . viewNo , prepare . ppSeqNo ) self . logger . debug ( "{} received PREPARE{} from {}" . format ( self , key , sender ) ) try : if self . validatePrepare ( prepare , sender ) : self . addToPrepares ( prepare , sender ) self . stats . inc ( TPCStat . PrepareRcvd ) self . logger . debug ( "{} processed incoming PREPARE {}" . format ( self , ( prepare . viewNo , prepare . ppSeqNo ) ) ) else : self . logger . trace ( "{} cannot process incoming PREPARE" . format ( self ) ) except SuspiciousNode as ex : self . report_suspicious_node ( ex )
Validate and process the PREPARE specified . If validation is successful create a COMMIT and broadcast it .
40,325
def processCommit ( self , commit : Commit , sender : str ) -> None : self . logger . debug ( "{} received COMMIT{} from {}" . format ( self , ( commit . viewNo , commit . ppSeqNo ) , sender ) ) if self . validateCommit ( commit , sender ) : self . stats . inc ( TPCStat . CommitRcvd ) self . addToCommits ( commit , sender ) self . logger . debug ( "{} processed incoming COMMIT{}" . format ( self , ( commit . viewNo , commit . ppSeqNo ) ) )
Validate and process the COMMIT specified . If validation is successful return the message to the node .
40,326
def tryCommit ( self , prepare : Prepare ) : rv , reason = self . canCommit ( prepare ) if rv : self . doCommit ( prepare ) else : self . logger . debug ( "{} cannot send COMMIT since {}" . format ( self , reason ) )
Try to commit if the Prepare message is ready to be passed into the commit phase .
40,327
def tryOrder ( self , commit : Commit ) : canOrder , reason = self . canOrder ( commit ) if canOrder : self . logger . trace ( "{} returning request to node" . format ( self ) ) self . doOrder ( commit ) else : self . logger . debug ( "{} cannot return request to node: {}" . format ( self , reason ) ) return canOrder
Try to order if the Commit message is ready to be ordered .
40,328
def nonFinalisedReqs ( self , reqKeys : List [ Tuple [ str , int ] ] ) : return { key for key in reqKeys if not self . requests . is_finalised ( key ) }
Check if there are any requests which are not finalised i . e for which there are not enough PROPAGATEs
40,329
def _can_process_pre_prepare ( self , pre_prepare : PrePrepare , sender : str ) -> Optional [ int ] : if not self . isMsgFromPrimary ( pre_prepare , sender ) : return PP_CHECK_NOT_FROM_PRIMARY if ( pre_prepare . viewNo , pre_prepare . ppSeqNo ) in self . prePrepares : return PP_CHECK_DUPLICATE if not self . is_pre_prepare_time_acceptable ( pre_prepare , sender ) : return PP_CHECK_WRONG_TIME if compare_3PC_keys ( ( pre_prepare . viewNo , pre_prepare . ppSeqNo ) , self . __last_pp_3pc ) > 0 : return PP_CHECK_OLD if self . nonFinalisedReqs ( pre_prepare . reqIdr ) : return PP_CHECK_REQUEST_NOT_FINALIZED if not self . __is_next_pre_prepare ( pre_prepare . viewNo , pre_prepare . ppSeqNo ) : return PP_CHECK_NOT_NEXT if f . POOL_STATE_ROOT_HASH . nm in pre_prepare and pre_prepare . poolStateRootHash != self . stateRootHash ( POOL_LEDGER_ID ) : return PP_CHECK_INCORRECT_POOL_STATE_ROOT status = self . _bls_bft_replica . validate_pre_prepare ( pre_prepare , sender ) if status is not None : return status return None
Decide whether this replica is eligible to process a PRE - PREPARE .
40,330
def addToPrePrepares ( self , pp : PrePrepare ) -> None : key = ( pp . viewNo , pp . ppSeqNo ) self . prePrepares [ key ] = pp self . lastPrePrepareSeqNo = pp . ppSeqNo self . last_accepted_pre_prepare_time = pp . ppTime self . dequeue_prepares ( * key ) self . dequeue_commits ( * key ) self . stats . inc ( TPCStat . PrePrepareRcvd ) self . tryPrepare ( pp )
Add the specified PRE - PREPARE to this replica s list of received PRE - PREPAREs and try sending PREPARE
40,331
def canPrepare ( self , ppReq ) -> ( bool , str ) : if self . has_sent_prepare ( ppReq ) : return False , 'has already sent PREPARE for {}' . format ( ppReq ) return True , ''
Return whether the batch of requests in the PRE - PREPARE can proceed to the PREPARE step .
40,332
def validatePrepare ( self , prepare : Prepare , sender : str ) -> bool : key = ( prepare . viewNo , prepare . ppSeqNo ) primaryStatus = self . isPrimaryForMsg ( prepare ) ppReq = self . getPrePrepare ( * key ) if self . isMsgFromPrimary ( prepare , sender ) : raise SuspiciousNode ( sender , Suspicions . PR_FRM_PRIMARY , prepare ) if primaryStatus is False : if self . prepares . hasPrepareFrom ( prepare , sender ) : raise SuspiciousNode ( sender , Suspicions . DUPLICATE_PR_SENT , prepare ) if not ppReq : self . enqueue_prepare ( prepare , sender ) self . _setup_last_ordered_for_non_master ( ) return False if primaryStatus is True : if self . prepares . hasPrepareFrom ( prepare , sender ) : raise SuspiciousNode ( sender , Suspicions . DUPLICATE_PR_SENT , prepare ) elif not ppReq : raise SuspiciousNode ( sender , Suspicions . UNKNOWN_PR_SENT , prepare ) if primaryStatus is None and not ppReq : self . enqueue_prepare ( prepare , sender ) self . _setup_last_ordered_for_non_master ( ) return False if prepare . digest != ppReq . digest : raise SuspiciousNode ( sender , Suspicions . PR_DIGEST_WRONG , prepare ) elif prepare . stateRootHash != ppReq . stateRootHash : raise SuspiciousNode ( sender , Suspicions . PR_STATE_WRONG , prepare ) elif prepare . txnRootHash != ppReq . txnRootHash : raise SuspiciousNode ( sender , Suspicions . PR_TXN_WRONG , prepare ) elif prepare . auditTxnRootHash != ppReq . auditTxnRootHash : raise SuspiciousNode ( sender , Suspicions . PR_AUDIT_TXN_ROOT_HASH_WRONG , prepare ) try : self . execute_hook ( ReplicaHooks . VALIDATE_PR , prepare , ppReq ) except Exception as ex : self . logger . warning ( '{} encountered exception in replica ' 'hook {} : {}' . format ( self , ReplicaHooks . VALIDATE_PR , ex ) ) raise SuspiciousNode ( sender , Suspicions . PR_PLUGIN_EXCEPTION , prepare ) self . _bls_bft_replica . validate_prepare ( prepare , sender ) return True
Return whether the PREPARE specified is valid .
40,333
def addToPrepares ( self , prepare : Prepare , sender : str ) : self . _bls_bft_replica . process_prepare ( prepare , sender ) self . prepares . addVote ( prepare , sender ) self . dequeue_commits ( prepare . viewNo , prepare . ppSeqNo ) self . tryCommit ( prepare )
Add the specified PREPARE to this replica s list of received PREPAREs and try sending COMMIT
40,334
def canCommit ( self , prepare : Prepare ) -> ( bool , str ) : quorum = self . quorums . prepare . value if not self . prepares . hasQuorum ( prepare , quorum ) : return False , 'does not have prepare quorum for {}' . format ( prepare ) if self . hasCommitted ( prepare ) : return False , 'has already sent COMMIT for {}' . format ( prepare ) return True , ''
Return whether the specified PREPARE can proceed to the Commit step .
40,335
def validateCommit ( self , commit : Commit , sender : str ) -> bool : key = ( commit . viewNo , commit . ppSeqNo ) if not self . has_prepared ( key ) : self . enqueue_commit ( commit , sender ) return False if self . commits . hasCommitFrom ( commit , sender ) : raise SuspiciousNode ( sender , Suspicions . DUPLICATE_CM_SENT , commit ) pre_prepare = self . getPrePrepare ( commit . viewNo , commit . ppSeqNo ) why_not = self . _bls_bft_replica . validate_commit ( commit , sender , pre_prepare ) if why_not == BlsBftReplica . CM_BLS_SIG_WRONG : self . logger . warning ( "{} discard Commit message from " "{}:{}" . format ( self , sender , commit ) ) raise SuspiciousNode ( sender , Suspicions . CM_BLS_SIG_WRONG , commit ) elif why_not is not None : self . logger . warning ( "Unknown error code returned for bls commit " "validation {}" . format ( why_not ) ) return True
Return whether the COMMIT specified is valid .
40,336
def addToCommits ( self , commit : Commit , sender : str ) : self . _bls_bft_replica . process_commit ( commit , sender ) self . commits . addVote ( commit , sender ) self . tryOrder ( commit )
Add the specified COMMIT to this replica s list of received commit requests .
40,337
def canOrder ( self , commit : Commit ) -> Tuple [ bool , Optional [ str ] ] : quorum = self . quorums . commit . value if not self . commits . hasQuorum ( commit , quorum ) : return False , "no quorum ({}): {} commits where f is {}" . format ( quorum , commit , self . f ) key = ( commit . viewNo , commit . ppSeqNo ) if self . has_already_ordered ( * key ) : return False , "already ordered" if commit . ppSeqNo > 1 and not self . all_prev_ordered ( commit ) : viewNo , ppSeqNo = commit . viewNo , commit . ppSeqNo if viewNo not in self . stashed_out_of_order_commits : self . stashed_out_of_order_commits [ viewNo ] = { } self . stashed_out_of_order_commits [ viewNo ] [ ppSeqNo ] = commit self . startRepeating ( self . process_stashed_out_of_order_commits , self . config . PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL ) return False , "stashing {} since out of order" . format ( commit ) return True , None
Return whether the specified commitRequest can be returned to the node .
40,338
def all_prev_ordered ( self , commit : Commit ) : viewNo , ppSeqNo = commit . viewNo , commit . ppSeqNo if self . last_ordered_3pc == ( viewNo , ppSeqNo - 1 ) : return True toCheck = set ( ) toCheck . update ( set ( self . sentPrePrepares . keys ( ) ) ) toCheck . update ( set ( self . prePrepares . keys ( ) ) ) toCheck . update ( set ( self . prepares . keys ( ) ) ) toCheck . update ( set ( self . commits . keys ( ) ) ) for ( v , p ) in toCheck : if v < viewNo and ( v , p ) not in self . ordered : return False if v == viewNo and p < ppSeqNo and ( v , p ) not in self . ordered : return False return True
Return True if all previous COMMITs have been ordered
40,339
def process_checkpoint ( self , msg : Checkpoint , sender : str ) -> bool : self . logger . info ( '{} processing checkpoint {} from {}' . format ( self , msg , sender ) ) result , reason = self . validator . validate_checkpoint_msg ( msg ) if result == DISCARD : self . discard ( msg , "{} discard message {} from {} " "with the reason: {}" . format ( self , msg , sender , reason ) , self . logger . trace ) elif result == PROCESS : self . _do_process_checkpoint ( msg , sender ) else : self . logger . debug ( "{} stashing checkpoint message {} with " "the reason: {}" . format ( self , msg , reason ) ) self . stasher . stash ( ( msg , sender ) , result ) return False return True
Process checkpoint messages
40,340
def _process_stashed_pre_prepare_for_time_if_possible ( self , key : Tuple [ int , int ] ) : self . logger . debug ( '{} going to process stashed PRE-PREPAREs with ' 'incorrect times' . format ( self ) ) q = self . quorums . f if len ( self . preparesWaitingForPrePrepare [ key ] ) > q : times = [ pr . ppTime for ( pr , _ ) in self . preparesWaitingForPrePrepare [ key ] ] most_common_time , freq = mostCommonElement ( times ) if self . quorums . timestamp . is_reached ( freq ) : self . logger . debug ( '{} found sufficient PREPAREs for the ' 'PRE-PREPARE{}' . format ( self , key ) ) stashed_pp = self . pre_prepares_stashed_for_incorrect_time pp , sender , done = stashed_pp [ key ] if done : self . logger . debug ( '{} already processed PRE-PREPARE{}' . format ( self , key ) ) return True stashed_pp [ key ] = ( pp , sender , True ) self . process_three_phase_msg ( pp , sender ) return True return False
Check if any PRE - PREPAREs that were stashed since their time was not acceptable can now be accepted since enough PREPAREs are received
40,341
def _remove_till_caught_up_3pc ( self , last_caught_up_3PC ) : outdated_pre_prepares = { } for key , pp in self . prePrepares . items ( ) : if compare_3PC_keys ( key , last_caught_up_3PC ) >= 0 : outdated_pre_prepares [ key ] = pp for key , pp in self . sentPrePrepares . items ( ) : if compare_3PC_keys ( key , last_caught_up_3PC ) >= 0 : outdated_pre_prepares [ key ] = pp self . logger . trace ( '{} going to remove messages for {} 3PC keys' . format ( self , len ( outdated_pre_prepares ) ) ) for key , pp in outdated_pre_prepares . items ( ) : self . batches . pop ( key , None ) self . sentPrePrepares . pop ( key , None ) self . prePrepares . pop ( key , None ) self . prepares . pop ( key , None ) self . commits . pop ( key , None ) self . _discard_ordered_req_keys ( pp )
Remove any 3 phase messages till the last ordered key and also remove any corresponding request keys
40,342
def _remove_ordered_from_queue ( self , last_caught_up_3PC = None ) : to_remove = [ ] for i , msg in enumerate ( self . outBox ) : if isinstance ( msg , Ordered ) and ( not last_caught_up_3PC or compare_3PC_keys ( ( msg . viewNo , msg . ppSeqNo ) , last_caught_up_3PC ) >= 0 ) : to_remove . append ( i ) self . logger . trace ( '{} going to remove {} Ordered messages from outbox' . format ( self , len ( to_remove ) ) ) removed = [ ] for i in reversed ( to_remove ) : removed . insert ( 0 , self . outBox [ i ] ) del self . outBox [ i ] return removed
Remove any Ordered that the replica might be sending to node which is less than or equal to last_caught_up_3PC if last_caught_up_3PC is passed else remove all ordered needed in catchup
40,343
def _remove_stashed_checkpoints ( self , till_3pc_key = None ) : if till_3pc_key is None : self . stashedRecvdCheckpoints . clear ( ) self . logger . info ( '{} removing all stashed checkpoints' . format ( self ) ) return for view_no in list ( self . stashedRecvdCheckpoints . keys ( ) ) : if view_no < till_3pc_key [ 0 ] : self . logger . info ( '{} removing stashed checkpoints for view {}' . format ( self , view_no ) ) del self . stashedRecvdCheckpoints [ view_no ] elif view_no == till_3pc_key [ 0 ] : for ( s , e ) in list ( self . stashedRecvdCheckpoints [ view_no ] . keys ( ) ) : if e <= till_3pc_key [ 1 ] : self . logger . info ( '{} removing stashed checkpoints: ' 'viewNo={}, seqNoStart={}, seqNoEnd={}' . format ( self , view_no , s , e ) ) del self . stashedRecvdCheckpoints [ view_no ] [ ( s , e ) ] if len ( self . stashedRecvdCheckpoints [ view_no ] ) == 0 : del self . stashedRecvdCheckpoints [ view_no ]
Remove stashed received checkpoints up to till_3pc_key if provided otherwise remove all stashed received checkpoints
40,344
def checkPortAvailable ( ha ) : sockTypes = ( socket . SOCK_DGRAM , socket . SOCK_STREAM ) for typ in sockTypes : sock = socket . socket ( socket . AF_INET , typ ) try : sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) sock . bind ( ha ) if typ == socket . SOCK_STREAM : l_onoff = 1 l_linger = 0 sock . setsockopt ( socket . SOL_SOCKET , socket . SO_LINGER , struct . pack ( 'ii' , l_onoff , l_linger ) ) except OSError as exc : if exc . errno in [ errno . EADDRINUSE , errno . EADDRNOTAVAIL , WS_SOCKET_BIND_ERROR_ALREADY_IN_USE , WS_SOCKET_BIND_ERROR_NOT_AVAILABLE ] : raise PortNotAvailable ( ha ) else : raise exc finally : sock . close ( )
Checks whether the given port is available
40,345
def evenCompare ( a : str , b : str ) -> bool : ab = a . encode ( 'utf-8' ) bb = b . encode ( 'utf-8' ) ac = crypto_hash_sha256 ( ab ) bc = crypto_hash_sha256 ( bb ) return ac < bc
A deterministic but more evenly distributed comparator than simple alphabetical . Useful when comparing consecutive strings and an even distribution is needed . Provides an even chance of returning true as often as false
40,346
def center_band ( close_data , high_data , low_data , period ) : tp = typical_price ( close_data , high_data , low_data ) cb = sma ( tp , period ) return cb
Center Band .
40,347
def simple_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" , category = RuntimeWarning ) sma = [ np . mean ( data [ idx - ( period - 1 ) : idx + 1 ] ) for idx in range ( 0 , len ( data ) ) ] sma = fill_for_noncomputable_vals ( data , sma ) return sma
Simple Moving Average .
40,348
def average_true_range_percent ( close_data , period ) : catch_errors . check_for_period_error ( close_data , period ) atrp = ( atr ( close_data , period ) / np . array ( close_data ) ) * 100 return atrp
Average True Range Percent .
40,349
def on_balance_volume ( close_data , volume ) : catch_errors . check_for_input_len_diff ( close_data , volume ) obv = np . zeros ( len ( volume ) ) obv [ 0 ] = 1 for idx in range ( 1 , len ( obv ) ) : if close_data [ idx ] > close_data [ idx - 1 ] : obv [ idx ] = obv [ idx - 1 ] + volume [ idx ] elif close_data [ idx ] < close_data [ idx - 1 ] : obv [ idx ] = obv [ idx - 1 ] - volume [ idx ] elif close_data [ idx ] == close_data [ idx - 1 ] : obv [ idx ] = obv [ idx - 1 ] return obv
On Balance Volume .
40,350
def rate_of_change ( data , period ) : catch_errors . check_for_period_error ( data , period ) rocs = [ ( ( data [ idx ] - data [ idx - ( period - 1 ) ] ) / data [ idx - ( period - 1 ) ] ) * 100 for idx in range ( period - 1 , len ( data ) ) ] rocs = fill_for_noncomputable_vals ( data , rocs ) return rocs
Rate of Change .
40,351
def average_true_range ( close_data , period ) : tr = true_range ( close_data , period ) atr = smoothed_moving_average ( tr , period ) atr [ 0 : period - 1 ] = tr [ 0 : period - 1 ] return atr
Average True Range .
40,352
def relative_strength_index ( data , period ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) changes = [ data_tup [ 1 ] - data_tup [ 0 ] for data_tup in zip ( data [ : : 1 ] , data [ 1 : : 1 ] ) ] filtered_gain = [ val < 0 for val in changes ] gains = [ 0 if filtered_gain [ idx ] is True else changes [ idx ] for idx in range ( 0 , len ( filtered_gain ) ) ] filtered_loss = [ val > 0 for val in changes ] losses = [ 0 if filtered_loss [ idx ] is True else abs ( changes [ idx ] ) for idx in range ( 0 , len ( filtered_loss ) ) ] avg_gain = np . mean ( gains [ : period ] ) avg_loss = np . mean ( losses [ : period ] ) rsi = [ ] if avg_loss == 0 : rsi . append ( 100 ) else : rs = avg_gain / avg_loss rsi . append ( 100 - ( 100 / ( 1 + rs ) ) ) for idx in range ( 1 , len ( data ) - period ) : avg_gain = ( ( avg_gain * ( period - 1 ) + gains [ idx + ( period - 1 ) ] ) / period ) avg_loss = ( ( avg_loss * ( period - 1 ) + losses [ idx + ( period - 1 ) ] ) / period ) if avg_loss == 0 : rsi . append ( 100 ) else : rs = avg_gain / avg_loss rsi . append ( 100 - ( 100 / ( 1 + rs ) ) ) rsi = fill_for_noncomputable_vals ( data , rsi ) return rsi
Relative Strength Index .
40,353
def vertical_horizontal_filter ( data , period ) : catch_errors . check_for_period_error ( data , period ) vhf = [ abs ( np . max ( data [ idx + 1 - period : idx + 1 ] ) - np . min ( data [ idx + 1 - period : idx + 1 ] ) ) / sum ( [ abs ( data [ idx + 1 - period : idx + 1 ] [ i ] - data [ idx + 1 - period : idx + 1 ] [ i - 1 ] ) for i in range ( 0 , len ( data [ idx + 1 - period : idx + 1 ] ) ) ] ) for idx in range ( period - 1 , len ( data ) ) ] vhf = fill_for_noncomputable_vals ( data , vhf ) return vhf
Vertical Horizontal Filter .
40,354
def buying_pressure ( close_data , low_data ) : catch_errors . check_for_input_len_diff ( close_data , low_data ) bp = [ close_data [ idx ] - np . min ( [ low_data [ idx ] , close_data [ idx - 1 ] ] ) for idx in range ( 1 , len ( close_data ) ) ] bp = fill_for_noncomputable_vals ( close_data , bp ) return bp
Buying Pressure .
40,355
def ultimate_oscillator ( close_data , low_data ) : a7 = 4 * average_7 ( close_data , low_data ) a14 = 2 * average_14 ( close_data , low_data ) a28 = average_28 ( close_data , low_data ) uo = 100 * ( ( a7 + a14 + a28 ) / 7 ) return uo
Ultimate Oscillator .
40,356
def aroon_up ( data , period ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) a_up = [ ( ( period - list ( reversed ( data [ idx + 1 - period : idx + 1 ] ) ) . index ( np . max ( data [ idx + 1 - period : idx + 1 ] ) ) ) / float ( period ) ) * 100 for idx in range ( period - 1 , len ( data ) ) ] a_up = fill_for_noncomputable_vals ( data , a_up ) return a_up
Aroon Up .
40,357
def aroon_down ( data , period ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) a_down = [ ( ( period - list ( reversed ( data [ idx + 1 - period : idx + 1 ] ) ) . index ( np . min ( data [ idx + 1 - period : idx + 1 ] ) ) ) / float ( period ) ) * 100 for idx in range ( period - 1 , len ( data ) ) ] a_down = fill_for_noncomputable_vals ( data , a_down ) return a_down
Aroon Down .
40,358
def upper_price_channel ( data , period , upper_percent ) : catch_errors . check_for_period_error ( data , period ) emas = ema ( data , period ) upper_channel = [ val * ( 1 + float ( upper_percent ) / 100 ) for val in emas ] return upper_channel
Upper Price Channel .
40,359
def lower_price_channel ( data , period , lower_percent ) : catch_errors . check_for_period_error ( data , period ) emas = ema ( data , period ) lower_channel = [ val * ( 1 - float ( lower_percent ) / 100 ) for val in emas ] return lower_channel
Lower Price Channel .
40,360
def exponential_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) emas = [ exponential_moving_average_helper ( data [ idx - period + 1 : idx + 1 ] , period ) for idx in range ( period - 1 , len ( data ) ) ] emas = fill_for_noncomputable_vals ( data , emas ) return emas
Exponential Moving Average .
40,361
def commodity_channel_index ( close_data , high_data , low_data , period ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data ) catch_errors . check_for_period_error ( close_data , period ) tp = typical_price ( close_data , high_data , low_data ) cci = ( ( tp - sma ( tp , period ) ) / ( 0.015 * np . mean ( np . absolute ( tp - np . mean ( tp ) ) ) ) ) return cci
Commodity Channel Index .
40,362
def williams_percent_r ( close_data ) : highest_high = np . max ( close_data ) lowest_low = np . min ( close_data ) wr = [ ( ( highest_high - close ) / ( highest_high - lowest_low ) ) * - 100 for close in close_data ] return wr
Williams %R .
40,363
def moving_average_convergence_divergence ( data , short_period , long_period ) : catch_errors . check_for_period_error ( data , short_period ) catch_errors . check_for_period_error ( data , long_period ) macd = ema ( data , short_period ) - ema ( data , long_period ) return macd
Moving Average Convergence Divergence .
40,364
def money_flow_index ( close_data , high_data , low_data , volume , period ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data , volume ) catch_errors . check_for_period_error ( close_data , period ) mf = money_flow ( close_data , high_data , low_data , volume ) tp = typical_price ( close_data , high_data , low_data ) flow = [ tp [ idx ] > tp [ idx - 1 ] for idx in range ( 1 , len ( tp ) ) ] pf = [ mf [ idx ] if flow [ idx ] else 0 for idx in range ( 0 , len ( flow ) ) ] nf = [ mf [ idx ] if not flow [ idx ] else 0 for idx in range ( 0 , len ( flow ) ) ] pmf = [ sum ( pf [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( pf ) ) ] nmf = [ sum ( nf [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( nf ) ) ] with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" , category = RuntimeWarning ) money_ratio = np . array ( pmf ) / np . array ( nmf ) mfi = 100 - ( 100 / ( 1 + money_ratio ) ) mfi = fill_for_noncomputable_vals ( close_data , mfi ) return mfi
Money Flow Index .
40,365
def typical_price ( close_data , high_data , low_data ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data ) tp = [ ( high_data [ idx ] + low_data [ idx ] + close_data [ idx ] ) / 3 for idx in range ( 0 , len ( close_data ) ) ] return np . array ( tp )
Typical Price .
40,366
def true_range ( close_data , period ) : catch_errors . check_for_period_error ( close_data , period ) tr = [ np . max ( [ np . max ( close_data [ idx + 1 - period : idx + 1 ] ) - np . min ( close_data [ idx + 1 - period : idx + 1 ] ) , abs ( np . max ( close_data [ idx + 1 - period : idx + 1 ] ) - close_data [ idx - 1 ] ) , abs ( np . min ( close_data [ idx + 1 - period : idx + 1 ] ) - close_data [ idx - 1 ] ) ] ) for idx in range ( period - 1 , len ( close_data ) ) ] tr = fill_for_noncomputable_vals ( close_data , tr ) return tr
True Range .
40,367
def double_smoothed_stochastic ( data , period ) : catch_errors . check_for_period_error ( data , period ) lows = [ data [ idx ] - np . min ( data [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( data ) ) ] sm_lows = ema ( ema ( lows , period ) , period ) highs = [ np . max ( data [ idx + 1 - period : idx + 1 ] ) - np . min ( data [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( data ) ) ] sm_highs = ema ( ema ( highs , period ) , period ) dss = ( sm_lows / sm_highs ) * 100 dss = fill_for_noncomputable_vals ( data , dss ) return dss
Double Smoothed Stochastic .
40,368
def volume_adjusted_moving_average ( close_data , volume , period ) : catch_errors . check_for_input_len_diff ( close_data , volume ) catch_errors . check_for_period_error ( close_data , period ) avg_vol = np . mean ( volume ) vol_incr = avg_vol * 0.67 vol_ratio = [ val / vol_incr for val in volume ] close_vol = np . array ( close_data ) * vol_ratio vama = [ sum ( close_vol [ idx + 1 - period : idx + 1 ] ) / period for idx in range ( period - 1 , len ( close_data ) ) ] vama = fill_for_noncomputable_vals ( close_data , vama ) return vama
Volume Adjusted Moving Average .
40,369
def double_exponential_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) dema = ( 2 * ema ( data , period ) ) - ema ( ema ( data , period ) , period ) return dema
Double Exponential Moving Average .
40,370
def triangular_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) tma = sma ( sma ( data , period ) , period ) return tma
Triangular Moving Average .
40,371
def weighted_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) k = ( period * ( period + 1 ) ) / 2.0 wmas = [ ] for idx in range ( 0 , len ( data ) - period + 1 ) : product = [ data [ idx + period_idx ] * ( period_idx + 1 ) for period_idx in range ( 0 , period ) ] wma = sum ( product ) / k wmas . append ( wma ) wmas = fill_for_noncomputable_vals ( data , wmas ) return wmas
Weighted Moving Average .
40,372
def conversion_base_line_helper ( data , period ) : catch_errors . check_for_period_error ( data , period ) cblh = [ ( np . max ( data [ idx + 1 - period : idx + 1 ] ) + np . min ( data [ idx + 1 - period : idx + 1 ] ) ) / 2 for idx in range ( period - 1 , len ( data ) ) ] cblh = fill_for_noncomputable_vals ( data , cblh ) return cblh
The only real difference between TenkanSen and KijunSen is the period value
40,373
def chande_momentum_oscillator ( close_data , period ) : catch_errors . check_for_period_error ( close_data , period ) close_data = np . array ( close_data ) moving_period_diffs = [ [ ( close_data [ idx + 1 - period : idx + 1 ] [ i ] - close_data [ idx + 1 - period : idx + 1 ] [ i - 1 ] ) for i in range ( 1 , len ( close_data [ idx + 1 - period : idx + 1 ] ) ) ] for idx in range ( 0 , len ( close_data ) ) ] sum_up = [ ] sum_down = [ ] for period_diffs in moving_period_diffs : ups = [ val if val > 0 else 0 for val in period_diffs ] sum_up . append ( sum ( ups ) ) downs = [ abs ( val ) if val < 0 else 0 for val in period_diffs ] sum_down . append ( sum ( downs ) ) sum_up = np . array ( sum_up ) sum_down = np . array ( sum_down ) with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" , category = RuntimeWarning ) cmo = 100 * ( ( sum_up - sum_down ) / ( sum_up + sum_down ) ) return cmo
Chande Momentum Oscillator .
40,374
def price_oscillator ( data , short_period , long_period ) : catch_errors . check_for_period_error ( data , short_period ) catch_errors . check_for_period_error ( data , long_period ) ema_short = ema ( data , short_period ) ema_long = ema ( data , long_period ) po = ( ( ema_short - ema_long ) / ema_long ) * 100 return po
Price Oscillator .
40,375
def check_for_period_error ( data , period ) : period = int ( period ) data_len = len ( data ) if data_len < period : raise Exception ( "Error: data_len < period" )
Check for Period Error .
40,376
def check_for_input_len_diff ( * args ) : arrays_len = [ len ( arr ) for arr in args ] if not all ( a == arrays_len [ 0 ] for a in arrays_len ) : err_msg = ( "Error: mismatched data lengths, check to ensure that all " "input data is the same length and valid" ) raise Exception ( err_msg )
Check for Input Length Difference .
40,377
def upper_bollinger_band ( data , period , std_mult = 2.0 ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) simple_ma = sma ( data , period ) [ period - 1 : ] upper_bb = [ ] for idx in range ( len ( data ) - period + 1 ) : std_dev = np . std ( data [ idx : idx + period ] ) upper_bb . append ( simple_ma [ idx ] + std_dev * std_mult ) upper_bb = fill_for_noncomputable_vals ( data , upper_bb ) return np . array ( upper_bb )
Upper Bollinger Band .
40,378
def middle_bollinger_band ( data , period , std = 2.0 ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) mid_bb = sma ( data , period ) return mid_bb
Middle Bollinger Band .
40,379
def lower_bollinger_band ( data , period , std = 2.0 ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) simple_ma = sma ( data , period ) [ period - 1 : ] lower_bb = [ ] for idx in range ( len ( data ) - period + 1 ) : std_dev = np . std ( data [ idx : idx + period ] ) lower_bb . append ( simple_ma [ idx ] - std_dev * std ) lower_bb = fill_for_noncomputable_vals ( data , lower_bb ) return np . array ( lower_bb )
Lower Bollinger Band .
40,380
def percent_bandwidth ( data , period , std = 2.0 ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) percent_bandwidth = ( ( np . array ( data ) - lower_bollinger_band ( data , period , std ) ) / bb_range ( data , period , std ) ) return percent_bandwidth
Percent Bandwidth .
40,381
def standard_deviation ( data , period ) : catch_errors . check_for_period_error ( data , period ) stds = [ np . std ( data [ idx + 1 - period : idx + 1 ] , ddof = 1 ) for idx in range ( period - 1 , len ( data ) ) ] stds = fill_for_noncomputable_vals ( data , stds ) return stds
Standard Deviation .
40,382
def detrended_price_oscillator ( data , period ) : catch_errors . check_for_period_error ( data , period ) period = int ( period ) dop = [ data [ idx ] - np . mean ( data [ idx + 1 - ( int ( period / 2 ) + 1 ) : idx + 1 ] ) for idx in range ( period - 1 , len ( data ) ) ] dop = fill_for_noncomputable_vals ( data , dop ) return dop
Detrended Price Oscillator .
40,383
def smoothed_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) series = pd . Series ( data ) return series . ewm ( alpha = 1.0 / period ) . mean ( ) . values . flatten ( )
Smoothed Moving Average .
40,384
def chaikin_money_flow ( close_data , high_data , low_data , volume , period ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data , volume ) catch_errors . check_for_period_error ( close_data , period ) close_data = np . array ( close_data ) high_data = np . array ( high_data ) low_data = np . array ( low_data ) volume = np . array ( volume ) cmf = [ sum ( ( ( ( close_data [ idx + 1 - period : idx + 1 ] - low_data [ idx + 1 - period : idx + 1 ] ) - ( high_data [ idx + 1 - period : idx + 1 ] - close_data [ idx + 1 - period : idx + 1 ] ) ) / ( high_data [ idx + 1 - period : idx + 1 ] - low_data [ idx + 1 - period : idx + 1 ] ) ) * volume [ idx + 1 - period : idx + 1 ] ) / sum ( volume [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( close_data ) ) ] cmf = fill_for_noncomputable_vals ( close_data , cmf ) return cmf
Chaikin Money Flow .
40,385
def hull_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) hma = wma ( 2 * wma ( data , int ( period / 2 ) ) - wma ( data , period ) , int ( np . sqrt ( period ) ) ) return hma
Hull Moving Average .
40,386
def standard_variance ( data , period ) : catch_errors . check_for_period_error ( data , period ) sv = [ np . var ( data [ idx + 1 - period : idx + 1 ] , ddof = 1 ) for idx in range ( period - 1 , len ( data ) ) ] sv = fill_for_noncomputable_vals ( data , sv ) return sv
Standard Variance .
40,387
def calculate_up_moves ( high_data ) : up_moves = [ high_data [ idx ] - high_data [ idx - 1 ] for idx in range ( 1 , len ( high_data ) ) ] return [ np . nan ] + up_moves
Up Move .
40,388
def calculate_down_moves ( low_data ) : down_moves = [ low_data [ idx - 1 ] - low_data [ idx ] for idx in range ( 1 , len ( low_data ) ) ] return [ np . nan ] + down_moves
Down Move .
40,389
def average_directional_index ( close_data , high_data , low_data , period ) : avg_di = ( abs ( ( positive_directional_index ( close_data , high_data , low_data , period ) - negative_directional_index ( close_data , high_data , low_data , period ) ) / ( positive_directional_index ( close_data , high_data , low_data , period ) + negative_directional_index ( close_data , high_data , low_data , period ) ) ) ) adx = 100 * smma ( avg_di , period ) return adx
Average Directional Index .
40,390
def linear_weighted_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) idx_period = list ( range ( 1 , period + 1 ) ) lwma = [ ( sum ( [ i * idx_period [ data [ idx - ( period - 1 ) : idx + 1 ] . index ( i ) ] for i in data [ idx - ( period - 1 ) : idx + 1 ] ] ) ) / sum ( range ( 1 , len ( data [ idx + 1 - period : idx + 1 ] ) + 1 ) ) for idx in range ( period - 1 , len ( data ) ) ] lwma = fill_for_noncomputable_vals ( data , lwma ) return lwma
Linear Weighted Moving Average .
40,391
def volume_oscillator ( volume , short_period , long_period ) : catch_errors . check_for_period_error ( volume , short_period ) catch_errors . check_for_period_error ( volume , long_period ) vo = ( 100 * ( ( sma ( volume , short_period ) - sma ( volume , long_period ) ) / sma ( volume , long_period ) ) ) return vo
Volume Oscillator .
40,392
def triple_exponential_moving_average ( data , period ) : catch_errors . check_for_period_error ( data , period ) tema = ( ( 3 * ema ( data , period ) - ( 3 * ema ( ema ( data , period ) , period ) ) ) + ema ( ema ( ema ( data , period ) , period ) , period ) ) return tema
Triple Exponential Moving Average .
40,393
def money_flow ( close_data , high_data , low_data , volume ) : catch_errors . check_for_input_len_diff ( close_data , high_data , low_data , volume ) mf = volume * tp ( close_data , high_data , low_data ) return mf
Money Flow .
40,394
def request_and_check ( self , url , method = 'get' , expected_content_type = None , ** kwargs ) : assert method in [ 'get' , 'post' ] result = self . driver . request ( method , url , ** kwargs ) if result . status_code != requests . codes . ok : raise RuntimeError ( 'Error requesting %r, status = %d' % ( url , result . status_code ) ) if expected_content_type is not None : content_type = result . headers . get ( 'content-type' , '' ) if not re . match ( expected_content_type , content_type ) : raise RuntimeError ( 'Error requesting %r, content type %r does not match %r' % ( url , content_type , expected_content_type ) ) return result
Performs a request and checks that the status is OK and that the content - type matches expectations .
40,395
def get_transactions_json ( self , include_investment = False , skip_duplicates = False , start_date = None , id = 0 ) : self . set_user_property ( 'hide_duplicates' , 'T' if skip_duplicates else 'F' ) try : start_date = datetime . strptime ( start_date , '%m/%d/%y' ) except ( TypeError , ValueError ) : start_date = None all_txns = [ ] offset = 0 while 1 : url = MINT_ROOT_URL + '/getJsonData.xevent' params = { 'queryNew' : '' , 'offset' : offset , 'comparableType' : '8' , 'rnd' : Mint . get_rnd ( ) , } if id > 0 or include_investment : params [ 'id' ] = id if include_investment : params [ 'task' ] = 'transactions' else : params [ 'task' ] = 'transactions,txnfilters' params [ 'filterType' ] = 'cash' result = self . request_and_check ( url , headers = JSON_HEADER , params = params , expected_content_type = 'text/json|application/json' ) data = json . loads ( result . text ) txns = data [ 'set' ] [ 0 ] . get ( 'data' , [ ] ) if not txns : break if start_date : last_dt = json_date_to_datetime ( txns [ - 1 ] [ 'odate' ] ) if last_dt < start_date : keep_txns = [ t for t in txns if json_date_to_datetime ( t [ 'odate' ] ) >= start_date ] all_txns . extend ( keep_txns ) break all_txns . extend ( txns ) offset += len ( txns ) return all_txns
Returns the raw JSON transaction data as downloaded from Mint . The JSON transaction data includes some additional information missing from the CSV data such as whether the transaction is pending or completed but leaves off the year for current year transactions .
40,396
def get_detailed_transactions ( self , include_investment = False , skip_duplicates = False , remove_pending = True , start_date = None ) : assert_pd ( ) result = self . get_transactions_json ( include_investment , skip_duplicates , start_date ) df = pd . DataFrame ( result ) df [ 'odate' ] = df [ 'odate' ] . apply ( json_date_to_datetime ) if remove_pending : df = df [ ~ df . isPending ] df . reset_index ( drop = True , inplace = True ) df . amount = df . apply ( reverse_credit_amount , axis = 1 ) return df
Returns the JSON transaction data as a DataFrame and converts current year dates and prior year dates into consistent datetime format and reverses credit activity .
40,397
def get_transactions_csv ( self , include_investment = False , acct = 0 ) : params = None if include_investment or acct > 0 : params = { 'accountId' : acct } result = self . request_and_check ( '{}/transactionDownload.event' . format ( MINT_ROOT_URL ) , params = params , expected_content_type = 'text/csv' ) return result . content
Returns the raw CSV transaction data as downloaded from Mint .
40,398
def get_transactions ( self , include_investment = False ) : assert_pd ( ) s = StringIO ( self . get_transactions_csv ( include_investment = include_investment ) ) s . seek ( 0 ) df = pd . read_csv ( s , parse_dates = [ 'Date' ] ) df . columns = [ c . lower ( ) . replace ( ' ' , '_' ) for c in df . columns ] df . category = ( df . category . str . lower ( ) . replace ( 'uncategorized' , pd . np . nan ) ) return df
Returns the transaction data as a Pandas DataFrame .
40,399
def payments ( self , cursor = None , order = 'asc' , limit = 10 , sse = False ) : return self . horizon . account_payments ( address = self . address , cursor = cursor , order = order , limit = limit , sse = sse )
Retrieve the payments JSON from this instance s Horizon server .