INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
Handle GET_TXN request
|
def handle_get_txn_req(self, request: Request, frm: str):
"""
Handle GET_TXN request
"""
ledger_id = request.operation.get(f.LEDGER_ID.nm, DOMAIN_LEDGER_ID)
if ledger_id not in self.ledger_to_req_handler:
self.send_nack_to_client((request.identifier, request.reqId),
'Invalid ledger id {}'.format(ledger_id),
frm)
return
seq_no = request.operation.get(DATA)
self.send_ack_to_client((request.identifier, request.reqId), frm)
ledger = self.getLedger(ledger_id)
try:
txn = self.getReplyFromLedger(ledger, seq_no)
except KeyError:
txn = None
if txn is None:
logger.debug(
"{} can not handle GET_TXN request: ledger doesn't "
"have txn with seqNo={}".format(self, str(seq_no)))
result = {
f.IDENTIFIER.nm: request.identifier,
f.REQ_ID.nm: request.reqId,
TXN_TYPE: request.operation[TXN_TYPE],
DATA: None
}
if txn:
result[DATA] = txn.result
result[f.SEQ_NO.nm] = get_seq_no(txn.result)
self.transmitToClient(Reply(result), frm)
|
Execute ordered request
:param ordered: an ordered request
:return: whether executed
|
def processOrdered(self, ordered: Ordered):
"""
Execute ordered request
:param ordered: an ordered request
:return: whether executed
"""
if ordered.instId not in self.instances.ids:
logger.warning('{} got ordered request for instance {} which '
'does not exist'.format(self, ordered.instId))
return False
if ordered.instId != self.instances.masterId:
# Requests from backup replicas are not executed
logger.trace("{} got ordered requests from backup replica {}"
.format(self, ordered.instId))
with self.metrics.measure_time(MetricsName.MONITOR_REQUEST_ORDERED_TIME):
self.monitor.requestOrdered(ordered.valid_reqIdr + ordered.invalid_reqIdr,
ordered.instId,
self.requests,
byMaster=False)
return False
logger.trace("{} got ordered requests from master replica"
.format(self))
logger.debug("{} executing Ordered batch {} {} of {} requests; state root {}; txn root {}"
.format(self.name,
ordered.viewNo,
ordered.ppSeqNo,
len(ordered.valid_reqIdr),
ordered.stateRootHash,
ordered.txnRootHash))
three_pc_batch = ThreePcBatch.from_ordered(ordered)
if self.db_manager.ledgers[AUDIT_LEDGER_ID].uncommittedRootHash is None:
# if we order request during view change
# in between catchup rounds, then the 3PC batch will not be applied,
# since it was reverted before catchup started, and only COMMITs were
# processed in between catchup that led to this ORDERED msg
logger.info("{} applying stashed requests for batch {} {} of {} requests; state root {}; txn root {}"
.format(self.name,
three_pc_batch.view_no,
three_pc_batch.pp_seq_no,
len(three_pc_batch.valid_digests),
three_pc_batch.state_root,
three_pc_batch.txn_root))
self.apply_stashed_reqs(three_pc_batch)
self.executeBatch(three_pc_batch,
ordered.valid_reqIdr,
ordered.invalid_reqIdr,
ordered.auditTxnRootHash)
with self.metrics.measure_time(MetricsName.MONITOR_REQUEST_ORDERED_TIME):
self.monitor.requestOrdered(ordered.valid_reqIdr + ordered.invalid_reqIdr,
ordered.instId,
self.requests,
byMaster=True)
return True
|
Take any messages from replica that have been ordered and process
them, this should be done rarely, like before catchup starts
so a more current LedgerStatus can be sent.
can be called either
1. when node is participating, this happens just before catchup starts
so the node can have the latest ledger status or
2. when node is not participating but a round of catchup is about to be
started, here is forces all the replica ordered messages to be appended
to the stashed ordered requests and the stashed ordered requests are
processed with appropriate checks
|
def force_process_ordered(self):
"""
Take any messages from replica that have been ordered and process
them, this should be done rarely, like before catchup starts
so a more current LedgerStatus can be sent.
can be called either
1. when node is participating, this happens just before catchup starts
so the node can have the latest ledger status or
2. when node is not participating but a round of catchup is about to be
started, here is forces all the replica ordered messages to be appended
to the stashed ordered requests and the stashed ordered requests are
processed with appropriate checks
"""
for instance_id, messages in self.replicas.take_ordereds_out_of_turn():
num_processed = 0
for message in messages:
self.try_processing_ordered(message)
num_processed += 1
logger.info('{} processed {} Ordered batches for instance {} '
'before starting catch up'
.format(self, num_processed, instance_id))
|
Process an exception escalated from a Replica
|
def processEscalatedException(self, ex):
"""
Process an exception escalated from a Replica
"""
if isinstance(ex, SuspiciousNode):
self.reportSuspiciousNodeEx(ex)
else:
raise RuntimeError("unhandled replica-escalated exception") from ex
|
Checks if any requests have been ordered since last performance check
and updates the performance check data store if needed.
:return: True if new ordered requests, False otherwise
|
def _update_new_ordered_reqs_count(self):
"""
Checks if any requests have been ordered since last performance check
and updates the performance check data store if needed.
:return: True if new ordered requests, False otherwise
"""
last_num_ordered = self._last_performance_check_data.get('num_ordered')
num_ordered = sum(num for num, _ in self.monitor.numOrderedRequests.values())
if num_ordered != last_num_ordered:
self._last_performance_check_data['num_ordered'] = num_ordered
return True
else:
return False
|
Check if master instance is slow and send an instance change request.
:returns True if master performance is OK, False if performance
degraded, None if the check was needed
|
def checkPerformance(self) -> Optional[bool]:
"""
Check if master instance is slow and send an instance change request.
:returns True if master performance is OK, False if performance
degraded, None if the check was needed
"""
logger.trace("{} checking its performance".format(self))
# Move ahead only if the node has synchronized its state with other
# nodes
if not self.isParticipating:
return
if self.view_change_in_progress:
return
if not self._update_new_ordered_reqs_count():
logger.trace("{} ordered no new requests".format(self))
return
if self.instances.masterId is not None:
self.sendNodeRequestSpike()
master_throughput, backup_throughput = self.monitor.getThroughputs(0)
if master_throughput is not None:
self.metrics.add_event(MetricsName.MONITOR_AVG_THROUGHPUT, master_throughput)
if backup_throughput is not None:
self.metrics.add_event(MetricsName.BACKUP_MONITOR_AVG_THROUGHPUT, backup_throughput)
avg_lat_master, avg_lat_backup = self.monitor.getLatencies()
if avg_lat_master:
self.metrics.add_event(MetricsName.MONITOR_AVG_LATENCY, avg_lat_master)
if avg_lat_backup:
self.metrics.add_event(MetricsName.BACKUP_MONITOR_AVG_LATENCY, avg_lat_backup)
degraded_backups = self.monitor.areBackupsDegraded()
if degraded_backups:
logger.display('{} backup instances performance degraded'.format(degraded_backups))
self.backup_instance_faulty_processor.on_backup_degradation(degraded_backups)
if self.monitor.isMasterDegraded():
logger.display('{} master instance performance degraded'.format(self))
self.view_changer.on_master_degradation()
return False
else:
logger.trace("{}'s master has higher performance than backups".
format(self))
return True
|
Schedule an primary connection check which in turn can send a view
change message
|
def lost_master_primary(self):
"""
Schedule an primary connection check which in turn can send a view
change message
"""
self.primaries_disconnection_times[self.master_replica.instId] = time.perf_counter()
self._schedule_view_change()
|
Validate the signature of the request
Note: Batch is whitelisted because the inner messages are checked
:param msg: a message requiring signature verification
:return: None; raises an exception if the signature is not valid
|
def verifySignature(self, msg):
"""
Validate the signature of the request
Note: Batch is whitelisted because the inner messages are checked
:param msg: a message requiring signature verification
:return: None; raises an exception if the signature is not valid
"""
if isinstance(msg, self.authnWhitelist):
return
if isinstance(msg, Propagate):
typ = 'propagate'
req = TxnUtilConfig.client_request_class(**msg.request)
else:
typ = ''
req = msg
key = None
if isinstance(req, Request):
key = req.key
if not isinstance(req, Mapping):
req = req.as_dict
with self.metrics.measure_time(MetricsName.VERIFY_SIGNATURE_TIME):
identifiers = self.authNr(req).authenticate(req, key=key)
logger.debug("{} authenticated {} signature on {} request {}".
format(self, identifiers, typ, req['reqId']),
extra={"cli": True,
"tags": ["node-msg-processing"]})
|
Execute the REQUEST sent to this Node
:param view_no: the view number (See glossary)
:param pp_time: the time at which PRE-PREPARE was sent
:param valid_reqs: list of valid client requests keys
:param valid_reqs: list of invalid client requests keys
|
def executeBatch(self, three_pc_batch: ThreePcBatch,
valid_reqs_keys: List, invalid_reqs_keys: List,
audit_txn_root) -> None:
"""
Execute the REQUEST sent to this Node
:param view_no: the view number (See glossary)
:param pp_time: the time at which PRE-PREPARE was sent
:param valid_reqs: list of valid client requests keys
:param valid_reqs: list of invalid client requests keys
"""
# We need hashes in apply and str in commit
three_pc_batch.txn_root = Ledger.hashToStr(three_pc_batch.txn_root)
three_pc_batch.state_root = Ledger.hashToStr(three_pc_batch.state_root)
for req_key in valid_reqs_keys:
self.execute_hook(NodeHooks.PRE_REQUEST_COMMIT, req_key=req_key,
pp_time=three_pc_batch.pp_time,
state_root=three_pc_batch.state_root,
txn_root=three_pc_batch.txn_root)
self.execute_hook(NodeHooks.PRE_BATCH_COMMITTED,
ledger_id=three_pc_batch.ledger_id,
pp_time=three_pc_batch.pp_time,
reqs_keys=valid_reqs_keys,
state_root=three_pc_batch.state_root,
txn_root=three_pc_batch.txn_root)
try:
committedTxns = self.get_executer(three_pc_batch.ledger_id)(three_pc_batch)
except Exception as exc:
logger.error(
"{} commit failed for batch request, error {}, view no {}, "
"ppSeqNo {}, ledger {}, state root {}, txn root {}, "
"requests: {}".format(
self, repr(exc), three_pc_batch.view_no, three_pc_batch.pp_seq_no,
three_pc_batch.ledger_id, three_pc_batch.state_root,
three_pc_batch.txn_root, [req_idr for req_idr in valid_reqs_keys]
)
)
raise
for req_key in valid_reqs_keys + invalid_reqs_keys:
if req_key in self.requests:
self.mark_request_as_executed(self.requests[req_key].request)
else:
# Means that this request is dropped from the main requests queue due to timeout,
# but anyway it is ordered and executed normally
logger.debug('{} normally executed request {} which object has been dropped '
'from the requests queue'.format(self, req_key))
pass
# TODO is it possible to get len(committedTxns) != len(valid_reqs)
# someday
if not committedTxns:
return
logger.debug("{} committed batch request, view no {}, ppSeqNo {}, "
"ledger {}, state root {}, txn root {}, requests: {}".
format(self, three_pc_batch.view_no, three_pc_batch.pp_seq_no,
three_pc_batch.ledger_id, three_pc_batch.state_root,
three_pc_batch.txn_root, [key for key in valid_reqs_keys]))
for txn in committedTxns:
self.execute_hook(NodeHooks.POST_REQUEST_COMMIT, txn=txn,
pp_time=three_pc_batch.pp_time, state_root=three_pc_batch.state_root,
txn_root=three_pc_batch.txn_root)
first_txn_seq_no = get_seq_no(committedTxns[0])
last_txn_seq_no = get_seq_no(committedTxns[-1])
reqs = []
reqs_list_built = True
for req_key in valid_reqs_keys:
if req_key in self.requests:
reqs.append(self.requests[req_key].request.as_dict)
else:
logger.warning("Could not build requests list for observers due to non-existent requests")
reqs_list_built = False
break
if reqs_list_built:
batch_committed_msg = BatchCommitted(reqs,
three_pc_batch.ledger_id,
0,
three_pc_batch.view_no,
three_pc_batch.pp_seq_no,
three_pc_batch.pp_time,
three_pc_batch.state_root,
three_pc_batch.txn_root,
first_txn_seq_no,
last_txn_seq_no,
audit_txn_root,
three_pc_batch.primaries)
self._observable.append_input(batch_committed_msg, self.name)
|
A batch of requests has been created and has been applied but
committed to ledger and state.
:param ledger_id:
:param state_root: state root after the batch creation
:return:
|
def onBatchCreated(self, three_pc_batch: ThreePcBatch):
"""
A batch of requests has been created and has been applied but
committed to ledger and state.
:param ledger_id:
:param state_root: state root after the batch creation
:return:
"""
ledger_id = three_pc_batch.ledger_id
if ledger_id == POOL_LEDGER_ID:
if isinstance(self.poolManager, TxnPoolManager):
self.get_req_handler(POOL_LEDGER_ID).onBatchCreated(three_pc_batch.state_root, three_pc_batch.pp_time)
elif self.get_req_handler(ledger_id):
self.get_req_handler(ledger_id).onBatchCreated(three_pc_batch.state_root, three_pc_batch.pp_time)
else:
logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
if ledger_id == POOL_LEDGER_ID:
three_pc_batch.primaries = self.future_primaries_handler.post_batch_applied(three_pc_batch)
elif not three_pc_batch.primaries:
three_pc_batch.primaries = self.future_primaries_handler.get_last_primaries() or self.primaries
self.audit_handler.post_batch_applied(three_pc_batch)
self.execute_hook(NodeHooks.POST_BATCH_CREATED, ledger_id, three_pc_batch.state_root)
|
A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return:
|
def onBatchRejected(self, ledger_id):
"""
A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return:
"""
if ledger_id == POOL_LEDGER_ID:
if isinstance(self.poolManager, TxnPoolManager):
self.get_req_handler(POOL_LEDGER_ID).onBatchRejected()
elif self.get_req_handler(ledger_id):
self.get_req_handler(ledger_id).onBatchRejected()
else:
logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
self.audit_handler.post_batch_rejected(ledger_id)
self.execute_hook(NodeHooks.POST_BATCH_REJECTED, ledger_id)
|
Adds a new client or steward to this node based on transaction type.
|
def addNewRole(self, txn):
"""
Adds a new client or steward to this node based on transaction type.
"""
# If the client authenticator is a simple authenticator then add verkey.
# For a custom authenticator, handle appropriately.
# NOTE: The following code should not be used in production
if isinstance(self.clientAuthNr.core_authenticator, SimpleAuthNr):
txn_data = get_payload_data(txn)
identifier = txn_data[TARGET_NYM]
verkey = txn_data.get(VERKEY)
v = DidVerifier(verkey, identifier=identifier)
if identifier not in self.clientAuthNr.core_authenticator.clients:
role = txn_data.get(ROLE)
if role not in (STEWARD, TRUSTEE, None):
logger.debug("Role if present must be {} and not {}".
format(Roles.STEWARD.name, role))
return
self.clientAuthNr.core_authenticator.addIdr(identifier,
verkey=v.verkey,
role=role)
|
Check whether the keys are setup in the local STP keep.
Raises KeysNotFoundException if not found.
|
def ensureKeysAreSetup(self):
"""
Check whether the keys are setup in the local STP keep.
Raises KeysNotFoundException if not found.
"""
if not areKeysSetup(self.name, self.keys_dir):
raise REx(REx.reason.format(self.name) + self.keygenScript)
|
Report suspicion on a node on the basis of an exception
|
def reportSuspiciousNodeEx(self, ex: SuspiciousNode):
"""
Report suspicion on a node on the basis of an exception
"""
self.reportSuspiciousNode(ex.node, ex.reason, ex.code, ex.offendingMsg)
|
Report suspicion on a node and add it to this node's blacklist.
:param nodeName: name of the node to report suspicion on
:param reason: the reason for suspicion
|
def reportSuspiciousNode(self,
nodeName: str,
reason=None,
code: int = None,
offendingMsg=None):
"""
Report suspicion on a node and add it to this node's blacklist.
:param nodeName: name of the node to report suspicion on
:param reason: the reason for suspicion
"""
logger.warning("{} raised suspicion on node {} for {}; suspicion code "
"is {}".format(self, nodeName, reason, code))
# TODO need a more general solution here
# TODO: Should not blacklist client on a single InvalidSignature.
# Should track if a lot of requests with incorrect signatures have been
# made in a short amount of time, only then blacklist client.
# if code == InvalidSignature.code:
# self.blacklistNode(nodeName,
# reason=InvalidSignature.reason,
# code=InvalidSignature.code)
# TODO: Consider blacklisting nodes again.
# if code in self.suspicions:
# self.blacklistNode(nodeName,
# reason=self.suspicions[code],
# code=code)
if code in (s.code for s in (Suspicions.PPR_DIGEST_WRONG,
Suspicions.PPR_REJECT_WRONG,
Suspicions.PPR_TXN_WRONG,
Suspicions.PPR_STATE_WRONG,
Suspicions.PPR_PLUGIN_EXCEPTION,
Suspicions.PPR_SUB_SEQ_NO_WRONG,
Suspicions.PPR_NOT_FINAL,
Suspicions.PPR_WITH_ORDERED_REQUEST,
Suspicions.PPR_AUDIT_TXN_ROOT_HASH_WRONG,
Suspicions.PPR_BLS_MULTISIG_WRONG,
Suspicions.PPR_TIME_WRONG,
)):
logger.display('{}{} got one of primary suspicions codes {}'.format(VIEW_CHANGE_PREFIX, self, code))
self.view_changer.on_suspicious_primary(Suspicions.get_by_code(code))
if offendingMsg:
self.discard(offendingMsg, reason, logger.debug)
|
Report suspicion on a client and add it to this node's blacklist.
:param clientName: name of the client to report suspicion on
:param reason: the reason for suspicion
|
def reportSuspiciousClient(self, clientName: str, reason):
"""
Report suspicion on a client and add it to this node's blacklist.
:param clientName: name of the client to report suspicion on
:param reason: the reason for suspicion
"""
logger.warning("{} raised suspicion on client {} for {}"
.format(self, clientName, reason))
self.blacklistClient(clientName)
|
Add the client specified by `clientName` to this node's blacklist
|
def blacklistClient(self, clientName: str,
reason: str = None, code: int = None):
"""
Add the client specified by `clientName` to this node's blacklist
"""
msg = "{} blacklisting client {}".format(self, clientName)
if reason:
msg += " for reason {}".format(reason)
logger.display(msg)
self.clientBlacklister.blacklist(clientName)
|
Add the node specified by `nodeName` to this node's blacklist
|
def blacklistNode(self, nodeName: str, reason: str = None, code: int = None):
"""
Add the node specified by `nodeName` to this node's blacklist
"""
msg = "{} blacklisting node {}".format(self, nodeName)
if reason:
msg += " for reason {}".format(reason)
if code:
msg += " for code {}".format(code)
logger.display(msg)
self.nodeBlacklister.blacklist(nodeName)
|
Print the node's current statistics to log.
|
def logstats(self):
"""
Print the node's current statistics to log.
"""
lines = [
"node {} current stats".format(self),
"--------------------------------------------------------",
"node inbox size : {}".format(len(self.nodeInBox)),
"client inbox size : {}".format(len(self.clientInBox)),
"age (seconds) : {}".format(time.time() - self.created),
"next check for reconnect: {}".format(time.perf_counter() -
self.nodestack.nextCheck),
"node connections : {}".format(self.nodestack.conns),
"f : {}".format(self.f),
"master instance : {}".format(self.instances.masterId),
"replicas : {}".format(len(self.replicas)),
"view no : {}".format(self.viewNo),
"rank : {}".format(self.rank),
"msgs to replicas : {}".format(self.replicas.sum_inbox_len),
"msgs to view changer : {}".format(len(self.msgsToViewChanger)),
"action queue : {} {}".format(len(self.actionQueue),
id(self.actionQueue)),
"action queue stash : {} {}".format(len(self.aqStash),
id(self.aqStash)),
]
logger.info("\n".join(lines), extra={"cli": False})
|
Print the node's info to log for the REST backend to read.
|
def logNodeInfo(self):
"""
Print the node's info to log for the REST backend to read.
"""
self.nodeInfo['data'] = self.collectNodeInfo()
with closing(open(os.path.join(self.ledger_dir, 'node_info'), 'w')) \
as logNodeInfoFile:
logNodeInfoFile.write(json.dumps(self.nodeInfo['data']))
|
Iterates over `collections` of the gives object and gives its byte size
and number of items in collection
|
def get_collection_sizes(obj, collections: Optional[Tuple]=None,
get_only_non_empty=False):
"""
Iterates over `collections` of the gives object and gives its byte size
and number of items in collection
"""
from pympler import asizeof
collections = collections or (list, dict, set, deque, abc.Sized)
if not isinstance(collections, tuple):
collections = tuple(collections)
result = []
for attr_name in dir(obj):
attr = getattr(obj, attr_name)
if isinstance(attr, collections) and (
not get_only_non_empty or len(attr) > 0):
result.append(
(attr_name, len(attr), asizeof.asizeof(attr, detail=1)))
return result
|
A safety net.
Decorator for functions that are only allowed to return True or raise
an exception.
Args:
f: A function whose only expected return value is True.
Returns:
A wrapped functions whose guaranteed only return value is True.
|
def returns_true_or_raises(f):
"""A safety net.
Decorator for functions that are only allowed to return True or raise
an exception.
Args:
f: A function whose only expected return value is True.
Returns:
A wrapped functions whose guaranteed only return value is True.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
ret = f(*args, **kwargs)
if ret is not True:
raise RuntimeError("Unexpected return value %r" % ret)
return True
return wrapped
|
Return the list of replicas that don't belong to the master protocol
instance
|
def backupIds(self) -> Sequence[int]:
"""
Return the list of replicas that don't belong to the master protocol
instance
"""
return [id for id in self.started.keys() if id != 0]
|
Checks whether n-f nodes completed view change and whether one
of them is the next primary
|
def _hasViewChangeQuorum(self):
# This method should just be present for master instance.
"""
Checks whether n-f nodes completed view change and whether one
of them is the next primary
"""
num_of_ready_nodes = len(self._view_change_done)
diff = self.quorum - num_of_ready_nodes
if diff > 0:
logger.info('{} needs {} ViewChangeDone messages'.format(self, diff))
return False
logger.info("{} got view change quorum ({} >= {})".
format(self.name, num_of_ready_nodes, self.quorum))
return True
|
Validate and process an instance change request.
:param instChg: the instance change request
:param frm: the name of the node that sent this `msg`
|
def process_instance_change_msg(self, instChg: InstanceChange, frm: str) -> None:
"""
Validate and process an instance change request.
:param instChg: the instance change request
:param frm: the name of the node that sent this `msg`
"""
if frm not in self.provider.connected_nodes():
self.provider.discard(
instChg,
"received instance change request: {} from {} "
"which is not in connected list: {}".format(
instChg, frm, self.provider.connected_nodes()), logger.info)
return
logger.info("{} received instance change request: {} from {}".format(self, instChg, frm))
# TODO: add sender to blacklist?
if not isinstance(instChg.viewNo, int):
self.provider.discard(
instChg, "{}field view_no has incorrect type: {}".format(
VIEW_CHANGE_PREFIX, type(instChg.viewNo)))
elif instChg.viewNo <= self.view_no:
self.provider.discard(
instChg,
"Received instance change request with view no {} "
"which is not more than its view no {}".format(
instChg.viewNo, self.view_no), logger.info)
else:
# Record instance changes for views but send instance change
# only when found master to be degraded. if quorum of view changes
# found then change view even if master not degraded
self._on_verified_instance_change_msg(instChg, frm)
if self.instance_changes.has_inst_chng_from(instChg.viewNo, self.name):
logger.info("{} received instance change message {} but has already "
"sent an instance change message".format(self, instChg))
elif not self.provider.is_master_degraded():
logger.info("{} received instance change message {} but did not "
"find the master to be slow".format(self, instChg))
else:
logger.display("{}{} found master degraded after receiving instance change"
" message from {}".format(VIEW_CHANGE_PREFIX, self, frm))
self.sendInstanceChange(instChg.viewNo)
|
Processes ViewChangeDone messages. Once n-f messages have been
received, decides on a primary for specific replica.
:param msg: ViewChangeDone message
:param sender: the name of the node from which this message was sent
|
def process_vchd_msg(self, msg: ViewChangeDone, sender: str) -> bool:
"""
Processes ViewChangeDone messages. Once n-f messages have been
received, decides on a primary for specific replica.
:param msg: ViewChangeDone message
:param sender: the name of the node from which this message was sent
"""
logger.info("{}'s primary selector started processing of ViewChangeDone msg from {} : {}".
format(self.name, sender, msg))
view_no = msg.viewNo
if self.view_no != view_no:
self.provider.discard(msg, '{} got Primary from {} for view no {} '
'whereas current view no is {}'.
format(self, sender, view_no, self.view_no),
logMethod=logger.info)
return False
new_primary_name = msg.name
if new_primary_name == self.previous_master_primary:
self.provider.discard(msg, '{} got Primary from {} for {} who was primary of '
'master in previous view too'.
format(self, sender, new_primary_name),
logMethod=logger.info)
return False
# Since a node can send ViewChangeDone more than one time
self._on_verified_view_change_done_msg(msg, sender)
# TODO why do we check that after the message tracking
if self.provider.has_primary():
self.provider.discard(msg, "it already decided primary which is {}".
format(self.provider.current_primary_name()), logger.info)
return False
self._start_selection()
|
Service at most `limit` messages from the inBox.
:param limit: the maximum number of messages to service
:return: the number of messages successfully processed
|
async def serviceQueues(self, limit=None) -> int:
"""
Service at most `limit` messages from the inBox.
:param limit: the maximum number of messages to service
:return: the number of messages successfully processed
"""
# do not start any view changes until catch-up is finished!
if not Mode.is_done_syncing(self.provider.node_mode()):
return 0
return await self.inBoxRouter.handleAll(self.inBox, limit)
|
Broadcast an instance change request to all the remaining nodes
:param view_no: the view number when the instance change is requested
|
def sendInstanceChange(self, view_no: int,
suspicion=Suspicions.PRIMARY_DEGRADED):
"""
Broadcast an instance change request to all the remaining nodes
:param view_no: the view number when the instance change is requested
"""
# If not found any sent instance change messages in last
# `ViewChangeWindowSize` seconds or the last sent instance change
# message was sent long enough ago then instance change message can be
# sent otherwise no.
canSendInsChange, cooldown = self.insChngThrottler.acquire()
if canSendInsChange:
logger.info(
"{}{} sending an instance change with view_no {}"
" since {}".format(
VIEW_CHANGE_PREFIX,
self,
view_no,
suspicion.reason))
logger.info("{}{} metrics for monitor: {}"
.format(MONITORING_PREFIX, self,
self.provider.pretty_metrics()))
msg = self._create_instance_change_msg(view_no, suspicion.code)
self.send(msg)
# record instance change vote for self and try to change the view
# if quorum is reached
self._on_verified_instance_change_msg(msg, self.name)
else:
logger.info("{} cannot send instance change sooner then {} seconds".format(self, cooldown))
|
Return whether there's quorum for view change for the proposed view
number and its view is less than or equal to the proposed view
|
def _canViewChange(self, proposedViewNo: int) -> (bool, str):
"""
Return whether there's quorum for view change for the proposed view
number and its view is less than or equal to the proposed view
"""
msg = None
quorum = self.quorums.view_change.value
if not self.instance_changes.has_quorum(proposedViewNo, quorum):
msg = '{} has no quorum for view {}'.format(self, proposedViewNo)
elif not proposedViewNo > self.view_no:
msg = '{} is in higher view more than {}'.format(
self, proposedViewNo)
return not bool(msg), msg
|
Trigger the view change process.
:param proposed_view_no: the new view number after view change.
|
def start_view_change(self, proposed_view_no: int, continue_vc=False):
"""
Trigger the view change process.
:param proposed_view_no: the new view number after view change.
"""
# TODO: consider moving this to pool manager
# TODO: view change is a special case, which can have different
# implementations - we need to make this logic pluggable
if self.pre_vc_strategy and (not continue_vc):
self.pre_view_change_in_progress = True
self.pre_vc_strategy.prepare_view_change(proposed_view_no)
return
elif self.pre_vc_strategy:
self.pre_vc_strategy.on_strategy_complete()
self.previous_view_no = self.view_no
self.view_no = proposed_view_no
self.pre_view_change_in_progress = False
self.view_change_in_progress = True
self.previous_master_primary = self.provider.current_primary_name()
self.set_defaults()
self._process_vcd_for_future_view()
self.initInsChngThrottling()
self.provider.notify_view_change_start()
self.provider.start_catchup()
|
This method is called when sufficient number of ViewChangeDone
received and makes steps to switch to the new primary
|
def _verify_primary(self, new_primary, ledger_info):
"""
This method is called when sufficient number of ViewChangeDone
received and makes steps to switch to the new primary
"""
expected_primary = self.provider.next_primary_name()
if new_primary != expected_primary:
logger.error("{}{} expected next primary to be {}, but majority "
"declared {} instead for view {}"
.format(PRIMARY_SELECTION_PREFIX, self.name,
expected_primary, new_primary, self.view_no))
return False
self._primary_verified = True
return True
|
Sends ViewChangeDone message to other protocol participants
|
def _send_view_change_done_message(self):
"""
Sends ViewChangeDone message to other protocol participants
"""
new_primary_name = self.provider.next_primary_name()
ledger_summary = self.provider.ledger_summary()
message = ViewChangeDone(self.view_no,
new_primary_name,
ledger_summary)
logger.info("{} is sending ViewChangeDone msg to all : {}".format(self, message))
self.send(message)
self._on_verified_view_change_done_msg(message, self.name)
|
Returns the last accepted `ViewChangeDone` message.
If no view change has happened returns ViewChangeDone
with view no 0 to a newly joined node
|
def get_msgs_for_lagged_nodes(self) -> List[ViewChangeDone]:
# Should not return a list, only done for compatibility with interface
"""
Returns the last accepted `ViewChangeDone` message.
If no view change has happened returns ViewChangeDone
with view no 0 to a newly joined node
"""
# TODO: Consider a case where more than one node joins immediately,
# then one of the node might not have an accepted
# ViewChangeDone message
messages = []
accepted = self._accepted_view_change_done_message
if accepted:
messages.append(ViewChangeDone(self.last_completed_view_no, *accepted))
elif self.name in self._view_change_done:
messages.append(ViewChangeDone(self.last_completed_view_no,
*self._view_change_done[self.name]))
else:
logger.info('{} has no ViewChangeDone message to send for view {}'.
format(self, self.view_no))
return messages
|
Performs basic validation of field value and then passes it for
specific validation.
:param val: field value to validate
:return: error message or None
|
def validate(self, val):
"""
Performs basic validation of field value and then passes it for
specific validation.
:param val: field value to validate
:return: error message or None
"""
if self.nullable and val is None:
return
type_er = self.__type_check(val)
if type_er:
return type_er
spec_err = self._specific_validation(val)
if spec_err:
return spec_err
|
Return a signature for the given message.
|
def sign(self, msg: Dict) -> Dict:
"""
Return a signature for the given message.
"""
ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm])
bsig = self.naclSigner.signature(ser)
sig = base58.b58encode(bsig).decode("utf-8")
return sig
|
This will _lastPrePrepareSeqNo to values greater than its previous
values else it will not. To forcefully override as in case of `revert`,
directly set `self._lastPrePrepareSeqNo`
|
def lastPrePrepareSeqNo(self, n):
"""
This will _lastPrePrepareSeqNo to values greater than its previous
values else it will not. To forcefully override as in case of `revert`,
directly set `self._lastPrePrepareSeqNo`
"""
if n > self._lastPrePrepareSeqNo:
self._lastPrePrepareSeqNo = n
else:
self.logger.debug(
'{} cannot set lastPrePrepareSeqNo to {} as its '
'already {}'.format(
self, n, self._lastPrePrepareSeqNo))
|
Create and return the name for a replica using its nodeName and
instanceId.
Ex: Alpha:1
|
def generateName(nodeName: str, instId: int):
"""
Create and return the name for a replica using its nodeName and
instanceId.
Ex: Alpha:1
"""
if isinstance(nodeName, str):
# Because sometimes it is bytes (why?)
if ":" in nodeName:
# Because in some cases (for requested messages) it
# already has ':'. This should be fixed.
return nodeName
return "{}:{}".format(nodeName, instId)
|
Set the value of isPrimary.
:param value: the value to set isPrimary to
|
def primaryName(self, value: Optional[str]) -> None:
"""
Set the value of isPrimary.
:param value: the value to set isPrimary to
"""
if value is not None:
self.warned_no_primary = False
self.primaryNames[self.viewNo] = value
self.compact_primary_names()
if value != self._primaryName:
self._primaryName = value
self.logger.info("{} setting primaryName for view no {} to: {}".
format(self, self.viewNo, value))
if value is None:
# Since the GC needs to happen after a primary has been
# decided.
return
self._gc_before_new_view()
if self.__should_reset_watermarks_before_new_view():
self._reset_watermarks_before_new_view()
|
Return lowest pp_seq_no of the view for which can be prepared but
choose from unprocessed PRE-PREPAREs and PREPAREs.
|
def get_lowest_probable_prepared_certificate_in_view(
self, view_no) -> Optional[int]:
"""
Return lowest pp_seq_no of the view for which can be prepared but
choose from unprocessed PRE-PREPAREs and PREPAREs.
"""
# TODO: Naive implementation, dont need to iterate over the complete
# data structures, fix this later
seq_no_pp = SortedList() # pp_seq_no of PRE-PREPAREs
# pp_seq_no of PREPAREs with count of PREPAREs for each
seq_no_p = set()
for (v, p) in self.prePreparesPendingPrevPP:
if v == view_no:
seq_no_pp.add(p)
if v > view_no:
break
for (v, p), pr in self.preparesWaitingForPrePrepare.items():
if v == view_no and len(pr) >= self.quorums.prepare.value:
seq_no_p.add(p)
for n in seq_no_pp:
if n in seq_no_p:
return n
return None
|
Since last ordered view_no and pp_seq_no are only communicated for
master instance, backup instances use this method for restoring
`last_ordered_3pc`
:return:
|
def _setup_last_ordered_for_non_master(self):
"""
Since last ordered view_no and pp_seq_no are only communicated for
master instance, backup instances use this method for restoring
`last_ordered_3pc`
:return:
"""
if not self.isMaster and self.first_batch_after_catchup and \
not self.isPrimary:
# If not master instance choose last ordered seq no to be 1 less
# the lowest prepared certificate in this view
lowest_prepared = self.get_lowest_probable_prepared_certificate_in_view(
self.viewNo)
if lowest_prepared is not None:
# now after catch up we have in last_ordered_3pc[1] value 0
# it value should change last_ordered_3pc to lowest_prepared - 1
self.logger.info('{} Setting last ordered for non-master as {}'.
format(self, self.last_ordered_3pc))
self.last_ordered_3pc = (self.viewNo, lowest_prepared - 1)
self.update_watermark_from_3pc()
self.first_batch_after_catchup = False
|
Return whether this replica was primary in the given view
|
def is_primary_in_view(self, viewNo: int) -> Optional[bool]:
"""
Return whether this replica was primary in the given view
"""
if viewNo not in self.primaryNames:
return False
return self.primaryNames[viewNo] == self.name
|
Return whether this replica is primary if the request's view number is
equal this replica's view number and primary has been selected for
the current view.
Return None otherwise.
:param msg: message
|
def isPrimaryForMsg(self, msg) -> Optional[bool]:
"""
Return whether this replica is primary if the request's view number is
equal this replica's view number and primary has been selected for
the current view.
Return None otherwise.
:param msg: message
"""
return self.isPrimary if self.isMsgForCurrentView(msg) \
else self.is_primary_in_view(msg.viewNo)
|
Return whether this message was from primary replica
:param msg:
:param sender:
:return:
|
def isMsgFromPrimary(self, msg, sender: str) -> bool:
"""
Return whether this message was from primary replica
:param msg:
:param sender:
:return:
"""
if self.isMsgForCurrentView(msg):
return self.primaryName == sender
try:
return self.primaryNames[msg.viewNo] == sender
except KeyError:
return False
|
This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised
|
def processReqDuringBatch(
self,
req: Request,
cons_time: int):
"""
This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised
"""
if self.isMaster:
self.node.doDynamicValidation(req)
self.node.applyReq(req, cons_time)
|
TODO: for now default value for fields sub_seq_no is 0 and for final is True
|
def create_3pc_batch(self, ledger_id):
pp_seq_no = self.lastPrePrepareSeqNo + 1
pool_state_root_hash = self.stateRootHash(POOL_LEDGER_ID)
self.logger.debug("{} creating batch {} for ledger {} with state root {}".format(
self, pp_seq_no, ledger_id,
self.stateRootHash(ledger_id, to_str=False)))
if self.last_accepted_pre_prepare_time is None:
last_ordered_ts = self._get_last_timestamp_from_state(ledger_id)
if last_ordered_ts:
self.last_accepted_pre_prepare_time = last_ordered_ts
# DO NOT REMOVE `view_no` argument, used while replay
# tm = self.utc_epoch
tm = self.get_utc_epoch_for_preprepare(self.instId, self.viewNo,
pp_seq_no)
reqs, invalid_indices, rejects = self.consume_req_queue_for_pre_prepare(
ledger_id, tm, self.viewNo, pp_seq_no)
if self.isMaster:
three_pc_batch = ThreePcBatch(ledger_id=ledger_id,
inst_id=self.instId,
view_no=self.viewNo,
pp_seq_no=pp_seq_no,
pp_time=tm,
state_root=self.stateRootHash(ledger_id, to_str=False),
txn_root=self.txnRootHash(ledger_id, to_str=False),
primaries=[],
valid_digests=self.get_valid_req_ids_from_all_requests(
reqs, invalid_indices))
self.node.onBatchCreated(three_pc_batch)
digest = self.batchDigest(reqs)
state_root_hash = self.stateRootHash(ledger_id)
audit_txn_root_hash = self.txnRootHash(AUDIT_LEDGER_ID)
"""TODO: for now default value for fields sub_seq_no is 0 and for final is True"""
params = [
self.instId,
self.viewNo,
pp_seq_no,
tm,
[req.digest for req in reqs],
invalid_index_serializer.serialize(invalid_indices, toBytes=False),
digest,
ledger_id,
state_root_hash,
self.txnRootHash(ledger_id),
0,
True,
pool_state_root_hash,
audit_txn_root_hash
]
# BLS multi-sig:
params = self._bls_bft_replica.update_pre_prepare(params, ledger_id)
pre_prepare = PrePrepare(*params)
if self.isMaster:
rv = self.execute_hook(ReplicaHooks.CREATE_PPR, pre_prepare)
pre_prepare = rv if rv is not None else pre_prepare
self.logger.trace('{} created a PRE-PREPARE with {} requests for ledger {}'.format(
self, len(reqs), ledger_id))
self.lastPrePrepareSeqNo = pp_seq_no
self.last_accepted_pre_prepare_time = tm
if self.isMaster:
self.outBox.extend(rejects)
return pre_prepare
|
Process `limit` number of messages in the inBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
|
def serviceQueues(self, limit=None):
"""
Process `limit` number of messages in the inBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
"""
# TODO should handle SuspiciousNode here
r = self.dequeue_pre_prepares()
r += self.inBoxRouter.handleAllSync(self.inBox, limit)
r += self.send_3pc_batch()
r += self._serviceActions()
return r
|
Process a 3-phase (pre-prepare, prepare and commit) request.
Dispatch the request only if primary has already been decided, otherwise
stash it.
:param msg: the Three Phase message, one of PRE-PREPARE, PREPARE,
COMMIT
:param sender: name of the node that sent this message
|
def process_three_phase_msg(self, msg: ThreePhaseMsg, sender: str):
"""
Process a 3-phase (pre-prepare, prepare and commit) request.
Dispatch the request only if primary has already been decided, otherwise
stash it.
:param msg: the Three Phase message, one of PRE-PREPARE, PREPARE,
COMMIT
:param sender: name of the node that sent this message
"""
sender = self.generateName(sender, self.instId)
pp_key = ((msg.viewNo, msg.ppSeqNo) if
isinstance(msg, PrePrepare) else None)
# the same PrePrepare might come here multiple times
if (pp_key and (msg, sender) not in self.pre_prepare_tss[pp_key]):
# TODO more clean solution would be to set timestamps
# earlier (e.g. in zstack)
self.pre_prepare_tss[pp_key][msg, sender] = self.get_time_for_3pc_batch()
result, reason = self.validator.validate_3pc_msg(msg)
if result == DISCARD:
self.discard(msg, "{} discard message {} from {} "
"with the reason: {}".format(self, msg, sender, reason),
self.logger.trace)
elif result == PROCESS:
self.threePhaseRouter.handleSync((msg, sender))
else:
self.logger.debug("{} stashing 3 phase message {} with "
"the reason: {}".format(self, msg, reason))
self.stasher.stash((msg, sender), result)
|
Validate and process provided PRE-PREPARE, create and
broadcast PREPARE for it.
:param pre_prepare: message
:param sender: name of the node that sent this message
|
def processPrePrepare(self, pre_prepare: PrePrepare, sender: str):
"""
Validate and process provided PRE-PREPARE, create and
broadcast PREPARE for it.
:param pre_prepare: message
:param sender: name of the node that sent this message
"""
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
self.logger.debug("{} received PRE-PREPARE{} from {}".format(self, key, sender))
# TODO: should we still do it?
# Converting each req_idrs from list to tuple
req_idrs = {f.REQ_IDR.nm: [key for key in pre_prepare.reqIdr]}
pre_prepare = updateNamedTuple(pre_prepare, **req_idrs)
def report_suspicious(reason):
ex = SuspiciousNode(sender, reason, pre_prepare)
self.report_suspicious_node(ex)
why_not = self._can_process_pre_prepare(pre_prepare, sender)
if why_not is None:
why_not_applied = \
self._process_valid_preprepare(pre_prepare, sender)
if why_not_applied is not None:
if why_not_applied == PP_APPLY_REJECT_WRONG:
report_suspicious(Suspicions.PPR_REJECT_WRONG)
elif why_not_applied == PP_APPLY_WRONG_DIGEST:
report_suspicious(Suspicions.PPR_DIGEST_WRONG)
elif why_not_applied == PP_APPLY_WRONG_STATE:
report_suspicious(Suspicions.PPR_STATE_WRONG)
elif why_not_applied == PP_APPLY_ROOT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_TXN_WRONG)
elif why_not_applied == PP_APPLY_HOOK_ERROR:
report_suspicious(Suspicions.PPR_PLUGIN_EXCEPTION)
elif why_not_applied == PP_SUB_SEQ_NO_WRONG:
report_suspicious(Suspicions.PPR_SUB_SEQ_NO_WRONG)
elif why_not_applied == PP_NOT_FINAL:
# this is fine, just wait for another
return
elif why_not_applied == PP_APPLY_AUDIT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_AUDIT_TXN_ROOT_HASH_WRONG)
elif why_not_applied == PP_REQUEST_ALREADY_ORDERED:
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
elif why_not == PP_CHECK_NOT_FROM_PRIMARY:
report_suspicious(Suspicions.PPR_FRM_NON_PRIMARY)
elif why_not == PP_CHECK_TO_PRIMARY:
report_suspicious(Suspicions.PPR_TO_PRIMARY)
elif why_not == PP_CHECK_DUPLICATE:
report_suspicious(Suspicions.DUPLICATE_PPR_SENT)
elif why_not == PP_CHECK_INCORRECT_POOL_STATE_ROOT:
report_suspicious(Suspicions.PPR_POOL_STATE_ROOT_HASH_WRONG)
elif why_not == PP_CHECK_OLD:
self.logger.info("PRE-PREPARE {} has ppSeqNo lower "
"then the latest one - ignoring it".format(key))
elif why_not == PP_CHECK_REQUEST_NOT_FINALIZED:
absents = set()
non_fin = set()
non_fin_payload = set()
for key in pre_prepare.reqIdr:
req = self.requests.get(key)
if req is None:
absents.add(key)
elif not req.finalised:
non_fin.add(key)
non_fin_payload.add(req.request.payload_digest)
absent_str = ', '.join(str(key) for key in absents)
non_fin_str = ', '.join(
'{} ({} : {})'.format(str(key),
str(len(self.requests[key].propagates)),
', '.join(self.requests[key].propagates.keys())) for key in non_fin)
self.logger.warning(
"{} found requests in the incoming pp, of {} ledger, that are not finalized. "
"{} of them don't have propagates: {}."
"{} of them don't have enough propagates: {}.".format(self, pre_prepare.ledgerId,
len(absents), absent_str,
len(non_fin), non_fin_str))
def signal_suspicious(req):
self.logger.info("Request digest {} already ordered. Discard {} "
"from {}".format(req, pre_prepare, sender))
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
# checking for payload digest is more effective
for payload_key in non_fin_payload:
if self.node.seqNoDB.get_by_payload_digest(payload_key) != (None, None):
signal_suspicious(payload_key)
return
# for absents we can only check full digest
for full_key in absents:
if self.node.seqNoDB.get_by_full_digest(full_key) is not None:
signal_suspicious(full_key)
return
bad_reqs = absents | non_fin
self.enqueue_pre_prepare(pre_prepare, sender, bad_reqs)
# TODO: An optimisation might be to not request PROPAGATEs
# if some PROPAGATEs are present or a client request is
# present and sufficient PREPAREs and PRE-PREPARE are present,
# then the digest can be compared but this is expensive as the
# PREPARE and PRE-PREPARE contain a combined digest
self._schedule(partial(self.request_propagates_if_needed, bad_reqs, pre_prepare),
self.config.PROPAGATE_REQUEST_DELAY)
elif why_not == PP_CHECK_NOT_NEXT:
pp_view_no = pre_prepare.viewNo
pp_seq_no = pre_prepare.ppSeqNo
last_pp_view_no, last_pp_seq_no = self.__last_pp_3pc
if pp_view_no >= last_pp_view_no and (
self.isMaster or self.last_ordered_3pc[1] != 0):
seq_frm = last_pp_seq_no + 1 if pp_view_no == last_pp_view_no else 1
seq_to = pp_seq_no - 1
if seq_to >= seq_frm >= pp_seq_no - CHK_FREQ + 1:
self.logger.warning(
"{} missing PRE-PREPAREs from {} to {}, "
"going to request".format(self, seq_frm, seq_to))
self._request_missing_three_phase_messages(
pp_view_no, seq_frm, seq_to)
self.enqueue_pre_prepare(pre_prepare, sender)
self._setup_last_ordered_for_non_master()
elif why_not == PP_CHECK_WRONG_TIME:
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
item = (pre_prepare, sender, False)
self.pre_prepares_stashed_for_incorrect_time[key] = item
report_suspicious(Suspicions.PPR_TIME_WRONG)
elif why_not == BlsBftReplica.PPR_BLS_MULTISIG_WRONG:
report_suspicious(Suspicions.PPR_BLS_MULTISIG_WRONG)
else:
self.logger.warning("Unknown PRE-PREPARE check status: {}".format(why_not))
|
Try to send the Prepare message if the PrePrepare message is ready to
be passed into the Prepare phase.
|
def tryPrepare(self, pp: PrePrepare):
"""
Try to send the Prepare message if the PrePrepare message is ready to
be passed into the Prepare phase.
"""
rv, msg = self.canPrepare(pp)
if rv:
self.doPrepare(pp)
else:
self.logger.debug("{} cannot send PREPARE since {}".format(self, msg))
|
Validate and process the PREPARE specified.
If validation is successful, create a COMMIT and broadcast it.
:param prepare: a PREPARE msg
:param sender: name of the node that sent the PREPARE
|
def processPrepare(self, prepare: Prepare, sender: str) -> None:
"""
Validate and process the PREPARE specified.
If validation is successful, create a COMMIT and broadcast it.
:param prepare: a PREPARE msg
:param sender: name of the node that sent the PREPARE
"""
key = (prepare.viewNo, prepare.ppSeqNo)
self.logger.debug("{} received PREPARE{} from {}".format(self, key, sender))
# TODO move this try/except up higher
try:
if self.validatePrepare(prepare, sender):
self.addToPrepares(prepare, sender)
self.stats.inc(TPCStat.PrepareRcvd)
self.logger.debug("{} processed incoming PREPARE {}".format(
self, (prepare.viewNo, prepare.ppSeqNo)))
else:
# TODO let's have isValidPrepare throw an exception that gets
# handled and possibly logged higher
self.logger.trace("{} cannot process incoming PREPARE".format(self))
except SuspiciousNode as ex:
self.report_suspicious_node(ex)
|
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
|
def processCommit(self, commit: Commit, sender: str) -> None:
"""
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
"""
self.logger.debug("{} received COMMIT{} from {}".format(
self, (commit.viewNo, commit.ppSeqNo), sender))
if self.validateCommit(commit, sender):
self.stats.inc(TPCStat.CommitRcvd)
self.addToCommits(commit, sender)
self.logger.debug("{} processed incoming COMMIT{}".format(
self, (commit.viewNo, commit.ppSeqNo)))
|
Try to commit if the Prepare message is ready to be passed into the
commit phase.
|
def tryCommit(self, prepare: Prepare):
"""
Try to commit if the Prepare message is ready to be passed into the
commit phase.
"""
rv, reason = self.canCommit(prepare)
if rv:
self.doCommit(prepare)
else:
self.logger.debug("{} cannot send COMMIT since {}".format(self, reason))
|
Try to order if the Commit message is ready to be ordered.
|
def tryOrder(self, commit: Commit):
"""
Try to order if the Commit message is ready to be ordered.
"""
canOrder, reason = self.canOrder(commit)
if canOrder:
self.logger.trace("{} returning request to node".format(self))
self.doOrder(commit)
else:
self.logger.debug("{} cannot return request to node: {}".format(self, reason))
return canOrder
|
Create a commit message from the given Prepare message and trigger the
commit phase
:param p: the prepare message
|
def doCommit(self, p: Prepare):
"""
Create a commit message from the given Prepare message and trigger the
commit phase
:param p: the prepare message
"""
key_3pc = (p.viewNo, p.ppSeqNo)
self.logger.debug("{} Sending COMMIT{} at {}".format(self, key_3pc, self.get_current_time()))
params = [
self.instId, p.viewNo, p.ppSeqNo
]
pre_prepare = self.getPrePrepare(*key_3pc)
# BLS multi-sig:
if p.stateRootHash is not None:
pre_prepare = self.getPrePrepare(*key_3pc)
params = self._bls_bft_replica.update_commit(params, pre_prepare)
commit = Commit(*params)
if self.isMaster:
rv = self.execute_hook(ReplicaHooks.CREATE_CM, commit)
commit = rv if rv is not None else commit
self.send(commit, TPCStat.CommitSent)
self.addToCommits(commit, self.name)
|
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
|
def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
return {key for key in reqKeys if not self.requests.is_finalised(key)}
|
Applies (but not commits) requests of the PrePrepare
to the ledger and state
|
def _apply_pre_prepare(self, pre_prepare: PrePrepare):
"""
Applies (but not commits) requests of the PrePrepare
to the ledger and state
"""
reqs = []
idx = 0
rejects = []
invalid_indices = []
suspicious = False
# 1. apply each request
for req_key in pre_prepare.reqIdr:
req = self.requests[req_key].finalised
try:
self.processReqDuringBatch(req,
pre_prepare.ppTime)
except (InvalidClientMessageException, UnknownIdentifier, SuspiciousPrePrepare) as ex:
self.logger.warning('{} encountered exception {} while processing {}, '
'will reject'.format(self, ex, req))
rejects.append((req.key, Reject(req.identifier, req.reqId, ex)))
invalid_indices.append(idx)
if isinstance(ex, SuspiciousPrePrepare):
suspicious = True
finally:
reqs.append(req)
idx += 1
# 2. call callback for the applied batch
if self.isMaster:
three_pc_batch = ThreePcBatch.from_pre_prepare(pre_prepare,
state_root=self.stateRootHash(pre_prepare.ledgerId,
to_str=False),
txn_root=self.txnRootHash(pre_prepare.ledgerId,
to_str=False),
primaries=[],
valid_digests=self.get_valid_req_ids_from_all_requests(
reqs, invalid_indices))
self.node.onBatchCreated(three_pc_batch)
return reqs, invalid_indices, rejects, suspicious
|
Decide whether this replica is eligible to process a PRE-PREPARE.
:param pre_prepare: a PRE-PREPARE msg to process
:param sender: the name of the node that sent the PRE-PREPARE msg
|
def _can_process_pre_prepare(self, pre_prepare: PrePrepare, sender: str) -> Optional[int]:
"""
Decide whether this replica is eligible to process a PRE-PREPARE.
:param pre_prepare: a PRE-PREPARE msg to process
:param sender: the name of the node that sent the PRE-PREPARE msg
"""
# TODO: Check whether it is rejecting PRE-PREPARE from previous view
# PRE-PREPARE should not be sent from non primary
if not self.isMsgFromPrimary(pre_prepare, sender):
return PP_CHECK_NOT_FROM_PRIMARY
# Already has a PRE-PREPARE with same 3 phase key
if (pre_prepare.viewNo, pre_prepare.ppSeqNo) in self.prePrepares:
return PP_CHECK_DUPLICATE
if not self.is_pre_prepare_time_acceptable(pre_prepare, sender):
return PP_CHECK_WRONG_TIME
if compare_3PC_keys((pre_prepare.viewNo, pre_prepare.ppSeqNo),
self.__last_pp_3pc) > 0:
return PP_CHECK_OLD # ignore old pre-prepare
if self.nonFinalisedReqs(pre_prepare.reqIdr):
return PP_CHECK_REQUEST_NOT_FINALIZED
if not self.__is_next_pre_prepare(pre_prepare.viewNo,
pre_prepare.ppSeqNo):
return PP_CHECK_NOT_NEXT
if f.POOL_STATE_ROOT_HASH.nm in pre_prepare and \
pre_prepare.poolStateRootHash != self.stateRootHash(POOL_LEDGER_ID):
return PP_CHECK_INCORRECT_POOL_STATE_ROOT
# BLS multi-sig:
status = self._bls_bft_replica.validate_pre_prepare(pre_prepare,
sender)
if status is not None:
return status
return None
|
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
|
def addToPrePrepares(self, pp: PrePrepare) -> None:
"""
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
"""
key = (pp.viewNo, pp.ppSeqNo)
self.prePrepares[key] = pp
self.lastPrePrepareSeqNo = pp.ppSeqNo
self.last_accepted_pre_prepare_time = pp.ppTime
self.dequeue_prepares(*key)
self.dequeue_commits(*key)
self.stats.inc(TPCStat.PrePrepareRcvd)
self.tryPrepare(pp)
|
Return whether the batch of requests in the PRE-PREPARE can
proceed to the PREPARE step.
:param ppReq: any object with identifier and requestId attributes
|
def canPrepare(self, ppReq) -> (bool, str):
"""
Return whether the batch of requests in the PRE-PREPARE can
proceed to the PREPARE step.
:param ppReq: any object with identifier and requestId attributes
"""
if self.has_sent_prepare(ppReq):
return False, 'has already sent PREPARE for {}'.format(ppReq)
return True, ''
|
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
|
def validatePrepare(self, prepare: Prepare, sender: str) -> bool:
"""
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
"""
key = (prepare.viewNo, prepare.ppSeqNo)
primaryStatus = self.isPrimaryForMsg(prepare)
ppReq = self.getPrePrepare(*key)
# If a non primary replica and receiving a PREPARE request before a
# PRE-PREPARE request, then proceed
# PREPARE should not be sent from primary
if self.isMsgFromPrimary(prepare, sender):
raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare)
# If non primary replica
if primaryStatus is False:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
# If PRE-PREPARE not received for the PREPARE, might be slow
# network
if not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
# If primary replica
if primaryStatus is True:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
# If PRE-PREPARE was not sent for this PREPARE, certainly
# malicious behavior
elif not ppReq:
raise SuspiciousNode(
sender, Suspicions.UNKNOWN_PR_SENT, prepare)
if primaryStatus is None and not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
if prepare.digest != ppReq.digest:
raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
elif prepare.stateRootHash != ppReq.stateRootHash:
raise SuspiciousNode(sender, Suspicions.PR_STATE_WRONG,
prepare)
elif prepare.txnRootHash != ppReq.txnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_TXN_WRONG,
prepare)
elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG,
prepare)
try:
self.execute_hook(ReplicaHooks.VALIDATE_PR, prepare, ppReq)
except Exception as ex:
self.logger.warning('{} encountered exception in replica '
'hook {} : {}'.
format(self, ReplicaHooks.VALIDATE_PR, ex))
raise SuspiciousNode(sender, Suspicions.PR_PLUGIN_EXCEPTION,
prepare)
# BLS multi-sig:
self._bls_bft_replica.validate_prepare(prepare, sender)
return True
|
Add the specified PREPARE to this replica's list of received
PREPAREs and try sending COMMIT
:param prepare: the PREPARE to add to the list
|
def addToPrepares(self, prepare: Prepare, sender: str):
"""
Add the specified PREPARE to this replica's list of received
PREPAREs and try sending COMMIT
:param prepare: the PREPARE to add to the list
"""
# BLS multi-sig:
self._bls_bft_replica.process_prepare(prepare, sender)
self.prepares.addVote(prepare, sender)
self.dequeue_commits(prepare.viewNo, prepare.ppSeqNo)
self.tryCommit(prepare)
|
Return whether the specified PREPARE can proceed to the Commit
step.
Decision criteria:
- If this replica has got just n-f-1 PREPARE requests then commit request.
- If less than n-f-1 PREPARE requests then probably there's no consensus on
the request; don't commit
- If more than n-f-1 then already sent COMMIT; don't commit
:param prepare: the PREPARE
|
def canCommit(self, prepare: Prepare) -> (bool, str):
"""
Return whether the specified PREPARE can proceed to the Commit
step.
Decision criteria:
- If this replica has got just n-f-1 PREPARE requests then commit request.
- If less than n-f-1 PREPARE requests then probably there's no consensus on
the request; don't commit
- If more than n-f-1 then already sent COMMIT; don't commit
:param prepare: the PREPARE
"""
quorum = self.quorums.prepare.value
if not self.prepares.hasQuorum(prepare, quorum):
return False, 'does not have prepare quorum for {}'.format(prepare)
if self.hasCommitted(prepare):
return False, 'has already sent COMMIT for {}'.format(prepare)
return True, ''
|
Return whether the COMMIT specified is valid.
:param commit: the COMMIT to validate
:return: True if `request` is valid, False otherwise
|
def validateCommit(self, commit: Commit, sender: str) -> bool:
"""
Return whether the COMMIT specified is valid.
:param commit: the COMMIT to validate
:return: True if `request` is valid, False otherwise
"""
key = (commit.viewNo, commit.ppSeqNo)
if not self.has_prepared(key):
self.enqueue_commit(commit, sender)
return False
if self.commits.hasCommitFrom(commit, sender):
raise SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit)
# BLS multi-sig:
pre_prepare = self.getPrePrepare(commit.viewNo, commit.ppSeqNo)
why_not = self._bls_bft_replica.validate_commit(commit, sender, pre_prepare)
if why_not == BlsBftReplica.CM_BLS_SIG_WRONG:
self.logger.warning("{} discard Commit message from "
"{}:{}".format(self, sender, commit))
raise SuspiciousNode(sender,
Suspicions.CM_BLS_SIG_WRONG,
commit)
elif why_not is not None:
self.logger.warning("Unknown error code returned for bls commit "
"validation {}".format(why_not))
return True
|
Add the specified COMMIT to this replica's list of received
commit requests.
:param commit: the COMMIT to add to the list
:param sender: the name of the node that sent the COMMIT
|
def addToCommits(self, commit: Commit, sender: str):
"""
Add the specified COMMIT to this replica's list of received
commit requests.
:param commit: the COMMIT to add to the list
:param sender: the name of the node that sent the COMMIT
"""
# BLS multi-sig:
self._bls_bft_replica.process_commit(commit, sender)
self.commits.addVote(commit, sender)
self.tryOrder(commit)
|
Return whether the specified commitRequest can be returned to the node.
Decision criteria:
- If have got just n-f Commit requests then return request to node
- If less than n-f of commit requests then probably don't have
consensus on the request; don't return request to node
- If more than n-f then already returned to node; don't return request
to node
:param commit: the COMMIT
|
def canOrder(self, commit: Commit) -> Tuple[bool, Optional[str]]:
"""
Return whether the specified commitRequest can be returned to the node.
Decision criteria:
- If have got just n-f Commit requests then return request to node
- If less than n-f of commit requests then probably don't have
consensus on the request; don't return request to node
- If more than n-f then already returned to node; don't return request
to node
:param commit: the COMMIT
"""
quorum = self.quorums.commit.value
if not self.commits.hasQuorum(commit, quorum):
return False, "no quorum ({}): {} commits where f is {}". \
format(quorum, commit, self.f)
key = (commit.viewNo, commit.ppSeqNo)
if self.has_already_ordered(*key):
return False, "already ordered"
if commit.ppSeqNo > 1 and not self.all_prev_ordered(commit):
viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
if viewNo not in self.stashed_out_of_order_commits:
self.stashed_out_of_order_commits[viewNo] = {}
self.stashed_out_of_order_commits[viewNo][ppSeqNo] = commit
self.startRepeating(self.process_stashed_out_of_order_commits,
self.config.PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL)
return False, "stashing {} since out of order". \
format(commit)
return True, None
|
Return True if all previous COMMITs have been ordered
|
def all_prev_ordered(self, commit: Commit):
"""
Return True if all previous COMMITs have been ordered
"""
# TODO: This method does a lot of work, choose correct data
# structures to make it efficient.
viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
if self.last_ordered_3pc == (viewNo, ppSeqNo - 1):
# Last ordered was in same view as this COMMIT
return True
# if some PREPAREs/COMMITs were completely missed in the same view
toCheck = set()
toCheck.update(set(self.sentPrePrepares.keys()))
toCheck.update(set(self.prePrepares.keys()))
toCheck.update(set(self.prepares.keys()))
toCheck.update(set(self.commits.keys()))
for (v, p) in toCheck:
if v < viewNo and (v, p) not in self.ordered:
# Have commits from previous view that are unordered.
return False
if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered:
# If unordered commits are found with lower ppSeqNo then this
# cannot be ordered.
return False
return True
|
Process checkpoint messages
:return: whether processed (True) or stashed (False)
|
def process_checkpoint(self, msg: Checkpoint, sender: str) -> bool:
"""
Process checkpoint messages
:return: whether processed (True) or stashed (False)
"""
self.logger.info('{} processing checkpoint {} from {}'.format(self, msg, sender))
result, reason = self.validator.validate_checkpoint_msg(msg)
if result == DISCARD:
self.discard(msg, "{} discard message {} from {} "
"with the reason: {}".format(self, msg, sender, reason),
self.logger.trace)
elif result == PROCESS:
self._do_process_checkpoint(msg, sender)
else:
self.logger.debug("{} stashing checkpoint message {} with "
"the reason: {}".format(self, msg, reason))
self.stasher.stash((msg, sender), result)
return False
return True
|
:param ppSeqNo:
:return: True if ppSeqNo is less than or equal to last stable
checkpoint, false otherwise
|
def is_pp_seq_no_stable(self, msg: Checkpoint):
"""
:param ppSeqNo:
:return: True if ppSeqNo is less than or equal to last stable
checkpoint, false otherwise
"""
pp_seq_no = msg.seqNoEnd
ck = self.firstCheckPoint
if ck:
_, ckState = ck
return ckState.isStable and ckState.seqNo >= pp_seq_no
else:
return False
|
Dequeue any received PRE-PREPAREs that did not have finalized requests
or the replica was missing any PRE-PREPAREs before it
:return:
|
def dequeue_pre_prepares(self):
"""
Dequeue any received PRE-PREPAREs that did not have finalized requests
or the replica was missing any PRE-PREPAREs before it
:return:
"""
ppsReady = []
# Check if any requests have become finalised belonging to any stashed
# PRE-PREPAREs.
for i, (pp, sender, reqIds) in enumerate(
self.prePreparesPendingFinReqs):
finalised = set()
for r in reqIds:
if self.requests.is_finalised(r):
finalised.add(r)
diff = reqIds.difference(finalised)
# All requests become finalised
if not diff:
ppsReady.append(i)
self.prePreparesPendingFinReqs[i] = (pp, sender, diff)
for i in sorted(ppsReady, reverse=True):
pp, sender, _ = self.prePreparesPendingFinReqs.pop(i)
self.prePreparesPendingPrevPP[pp.viewNo, pp.ppSeqNo] = (pp, sender)
r = 0
while self.prePreparesPendingPrevPP and self.__is_next_pre_prepare(
*self.prePreparesPendingPrevPP.iloc[0]):
_, (pp, sender) = self.prePreparesPendingPrevPP.popitem(last=False)
if not self.can_pp_seq_no_be_in_view(pp.viewNo, pp.ppSeqNo):
self.discard(pp, "Pre-Prepare from a previous view",
self.logger.debug)
continue
self.logger.info("{} popping stashed PREPREPARE{} from sender {}".format(self, pp, sender))
self.process_three_phase_msg(pp, sender)
r += 1
return r
|
Checks if the `pp_seq_no` could have been in view `view_no`. It will
return False when the `pp_seq_no` belongs to a later view than
`view_no` else will return True
:return:
|
def can_pp_seq_no_be_in_view(self, view_no, pp_seq_no):
"""
Checks if the `pp_seq_no` could have been in view `view_no`. It will
return False when the `pp_seq_no` belongs to a later view than
`view_no` else will return True
:return:
"""
if view_no > self.viewNo:
raise PlenumValueError(
'view_no', view_no,
"<= current view_no {}".format(self.viewNo),
prefix=self
)
return view_no == self.viewNo or (
view_no < self.viewNo and self.last_prepared_before_view_change and compare_3PC_keys(
(view_no, pp_seq_no), self.last_prepared_before_view_change) >= 0)
|
Request preprepare
|
def _request_pre_prepare(self, three_pc_key: Tuple[int, int],
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
"""
Request preprepare
"""
recipients = self.primaryName
return self._request_three_phase_msg(three_pc_key,
self.requested_pre_prepares,
PREPREPARE,
recipients,
stash_data)
|
Request preprepare
|
def _request_prepare(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None,
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
"""
Request preprepare
"""
if recipients is None:
recipients = self.node.nodestack.connecteds.copy()
primaryName = self.primaryName[:self.primaryName.rfind(":")]
recipients.discard(primaryName)
return self._request_three_phase_msg(three_pc_key, self.requested_prepares, PREPARE, recipients, stash_data)
|
Request commit
|
def _request_commit(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None) -> bool:
"""
Request commit
"""
return self._request_three_phase_msg(three_pc_key, self.requested_commits, COMMIT, recipients)
|
Check if has an acceptable PRE_PREPARE already stashed, if not then
check count of PREPAREs, make sure >f consistent PREPAREs are found,
store the acceptable PREPARE state (digest, roots) for verification of
the received PRE-PREPARE
|
def _request_pre_prepare_for_prepare(self, three_pc_key) -> bool:
"""
Check if has an acceptable PRE_PREPARE already stashed, if not then
check count of PREPAREs, make sure >f consistent PREPAREs are found,
store the acceptable PREPARE state (digest, roots) for verification of
the received PRE-PREPARE
"""
if three_pc_key in self.prePreparesPendingPrevPP:
self.logger.debug('{} not requesting a PRE-PREPARE since already found '
'stashed for {}'.format(self, three_pc_key))
return False
if len(
self.preparesWaitingForPrePrepare[three_pc_key]) < self.quorums.prepare.value:
self.logger.debug(
'{} not requesting a PRE-PREPARE because does not have'
' sufficient PREPAREs for {}'.format(
self, three_pc_key))
return False
digest, state_root, txn_root, _ = \
self.get_acceptable_stashed_prepare_state(three_pc_key)
# Choose a better data structure for `prePreparesPendingFinReqs`
pre_prepares = [pp for pp, _, _ in self.prePreparesPendingFinReqs
if (pp.viewNo, pp.ppSeqNo) == three_pc_key]
if pre_prepares:
if [pp for pp in pre_prepares if (
pp.digest, pp.stateRootHash, pp.txnRootHash) == (
digest, state_root, txn_root)]:
self.logger.debug('{} not requesting a PRE-PREPARE since already '
'found stashed for {}'.format(self, three_pc_key))
return False
self._request_pre_prepare(three_pc_key,
stash_data=(digest, state_root, txn_root))
return True
|
Check if this PRE-PREPARE is not older than (not checking for greater
than since batches maybe sent in less than 1 second) last PRE-PREPARE
and in a sufficient range of local clock's UTC time.
:param pp:
:return:
|
def is_pre_prepare_time_correct(self, pp: PrePrepare, sender: str) -> bool:
"""
Check if this PRE-PREPARE is not older than (not checking for greater
than since batches maybe sent in less than 1 second) last PRE-PREPARE
and in a sufficient range of local clock's UTC time.
:param pp:
:return:
"""
tpcKey = (pp.viewNo, pp.ppSeqNo)
if (self.last_accepted_pre_prepare_time and
pp.ppTime < self.last_accepted_pre_prepare_time):
return False
elif ((tpcKey not in self.pre_prepare_tss) or
((pp, sender) not in self.pre_prepare_tss[tpcKey])):
return False
else:
return (
abs(pp.ppTime - self.pre_prepare_tss[tpcKey][pp, sender]) <=
self.config.ACCEPTABLE_DEVIATION_PREPREPARE_SECS
)
|
Returns True or False depending on the whether the time in PRE-PREPARE
is acceptable. Can return True if time is not acceptable but sufficient
PREPAREs are found to support the PRE-PREPARE
:param pp:
:return:
|
def is_pre_prepare_time_acceptable(self, pp: PrePrepare, sender: str) -> bool:
"""
Returns True or False depending on the whether the time in PRE-PREPARE
is acceptable. Can return True if time is not acceptable but sufficient
PREPAREs are found to support the PRE-PREPARE
:param pp:
:return:
"""
key = (pp.viewNo, pp.ppSeqNo)
if key in self.requested_pre_prepares:
# Special case for requested PrePrepares
return True
correct = self.is_pre_prepare_time_correct(pp, sender)
if not correct:
if key in self.pre_prepares_stashed_for_incorrect_time and \
self.pre_prepares_stashed_for_incorrect_time[key][-1]:
self.logger.debug('{} marking time as correct for {}'.format(self, pp))
correct = True
else:
self.logger.warning('{} found {} to have incorrect time.'.format(self, pp))
return correct
|
Check if any PRE-PREPAREs that were stashed since their time was not
acceptable, can now be accepted since enough PREPAREs are received
|
def _process_stashed_pre_prepare_for_time_if_possible(
self, key: Tuple[int, int]):
"""
Check if any PRE-PREPAREs that were stashed since their time was not
acceptable, can now be accepted since enough PREPAREs are received
"""
self.logger.debug('{} going to process stashed PRE-PREPAREs with '
'incorrect times'.format(self))
q = self.quorums.f
if len(self.preparesWaitingForPrePrepare[key]) > q:
times = [pr.ppTime for (pr, _) in
self.preparesWaitingForPrePrepare[key]]
most_common_time, freq = mostCommonElement(times)
if self.quorums.timestamp.is_reached(freq):
self.logger.debug('{} found sufficient PREPAREs for the '
'PRE-PREPARE{}'.format(self, key))
stashed_pp = self.pre_prepares_stashed_for_incorrect_time
pp, sender, done = stashed_pp[key]
if done:
self.logger.debug('{} already processed PRE-PREPARE{}'.format(self, key))
return True
# True is set since that will indicate to `is_pre_prepare_time_acceptable`
# that sufficient PREPAREs are received
stashed_pp[key] = (pp, sender, True)
self.process_three_phase_msg(pp, sender)
return True
return False
|
Send a message to the node on which this replica resides.
:param stat:
:param rid: remote id of one recipient (sends to all recipients if None)
:param msg: the message to send
|
def send(self, msg, stat=None) -> None:
"""
Send a message to the node on which this replica resides.
:param stat:
:param rid: remote id of one recipient (sends to all recipients if None)
:param msg: the message to send
"""
self.logger.trace("{} sending {}".format(self, msg.__class__.__name__),
extra={"cli": True, "tags": ['sending']})
self.logger.trace("{} sending {}".format(self, msg))
if stat:
self.stats.inc(stat)
self.outBox.append(msg)
|
Revert changes to ledger (uncommitted) and state made by any requests
that have not been ordered.
|
def revert_unordered_batches(self):
"""
Revert changes to ledger (uncommitted) and state made by any requests
that have not been ordered.
"""
i = 0
for key in sorted(self.batches.keys(), reverse=True):
if compare_3PC_keys(self.last_ordered_3pc, key) > 0:
ledger_id, discarded, _, prevStateRoot, len_reqIdr = self.batches.pop(key)
discarded = invalid_index_serializer.deserialize(discarded)
self.logger.debug('{} reverting 3PC key {}'.format(self, key))
self.revert(ledger_id, prevStateRoot, len_reqIdr - len(discarded))
i += 1
else:
break
return i
|
Remove any 3 phase messages till the last ordered key and also remove
any corresponding request keys
|
def _remove_till_caught_up_3pc(self, last_caught_up_3PC):
"""
Remove any 3 phase messages till the last ordered key and also remove
any corresponding request keys
"""
outdated_pre_prepares = {}
for key, pp in self.prePrepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
for key, pp in self.sentPrePrepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
self.logger.trace('{} going to remove messages for {} 3PC keys'.format(
self, len(outdated_pre_prepares)))
for key, pp in outdated_pre_prepares.items():
self.batches.pop(key, None)
self.sentPrePrepares.pop(key, None)
self.prePrepares.pop(key, None)
self.prepares.pop(key, None)
self.commits.pop(key, None)
self._discard_ordered_req_keys(pp)
|
Remove any Ordered that the replica might be sending to node which is
less than or equal to `last_caught_up_3PC` if `last_caught_up_3PC` is
passed else remove all ordered, needed in catchup
|
def _remove_ordered_from_queue(self, last_caught_up_3PC=None):
"""
Remove any Ordered that the replica might be sending to node which is
less than or equal to `last_caught_up_3PC` if `last_caught_up_3PC` is
passed else remove all ordered, needed in catchup
"""
to_remove = []
for i, msg in enumerate(self.outBox):
if isinstance(msg, Ordered) and \
(not last_caught_up_3PC or
compare_3PC_keys((msg.viewNo, msg.ppSeqNo), last_caught_up_3PC) >= 0):
to_remove.append(i)
self.logger.trace('{} going to remove {} Ordered messages from outbox'.format(self, len(to_remove)))
# Removing Ordered from queue but returning `Ordered` in order that
# they should be processed.
removed = []
for i in reversed(to_remove):
removed.insert(0, self.outBox[i])
del self.outBox[i]
return removed
|
Remove stashed received checkpoints up to `till_3pc_key` if provided,
otherwise remove all stashed received checkpoints
|
def _remove_stashed_checkpoints(self, till_3pc_key=None):
"""
Remove stashed received checkpoints up to `till_3pc_key` if provided,
otherwise remove all stashed received checkpoints
"""
if till_3pc_key is None:
self.stashedRecvdCheckpoints.clear()
self.logger.info('{} removing all stashed checkpoints'.format(self))
return
for view_no in list(self.stashedRecvdCheckpoints.keys()):
if view_no < till_3pc_key[0]:
self.logger.info('{} removing stashed checkpoints for view {}'.format(self, view_no))
del self.stashedRecvdCheckpoints[view_no]
elif view_no == till_3pc_key[0]:
for (s, e) in list(self.stashedRecvdCheckpoints[view_no].keys()):
if e <= till_3pc_key[1]:
self.logger.info('{} removing stashed checkpoints: '
'viewNo={}, seqNoStart={}, seqNoEnd={}'.
format(self, view_no, s, e))
del self.stashedRecvdCheckpoints[view_no][(s, e)]
if len(self.stashedRecvdCheckpoints[view_no]) == 0:
del self.stashedRecvdCheckpoints[view_no]
|
Checks whether the given port is available
|
def checkPortAvailable(ha):
"""Checks whether the given port is available"""
# Not sure why OS would allow binding to one type and not other.
# Checking for port available for TCP and UDP.
sockTypes = (socket.SOCK_DGRAM, socket.SOCK_STREAM)
for typ in sockTypes:
sock = socket.socket(socket.AF_INET, typ)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(ha)
if typ == socket.SOCK_STREAM:
l_onoff = 1
l_linger = 0
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', l_onoff, l_linger))
except OSError as exc:
if exc.errno in [
errno.EADDRINUSE, errno.EADDRNOTAVAIL,
WS_SOCKET_BIND_ERROR_ALREADY_IN_USE,
WS_SOCKET_BIND_ERROR_NOT_AVAILABLE
]:
raise PortNotAvailable(ha)
else:
raise exc
finally:
sock.close()
|
A deterministic but more evenly distributed comparator than simple alphabetical.
Useful when comparing consecutive strings and an even distribution is needed.
Provides an even chance of returning true as often as false
|
def evenCompare(a: str, b: str) -> bool:
"""
A deterministic but more evenly distributed comparator than simple alphabetical.
Useful when comparing consecutive strings and an even distribution is needed.
Provides an even chance of returning true as often as false
"""
ab = a.encode('utf-8')
bb = b.encode('utf-8')
ac = crypto_hash_sha256(ab)
bc = crypto_hash_sha256(bb)
return ac < bc
|
Create a map where every node is connected every other node.
Assume each key in the returned dictionary to be connected to each item in
its value(list).
:param names: a list of node names
:return: a dictionary of name -> list(name).
|
def distributedConnectionMap(names: List[str]) -> OrderedDict:
"""
Create a map where every node is connected every other node.
Assume each key in the returned dictionary to be connected to each item in
its value(list).
:param names: a list of node names
:return: a dictionary of name -> list(name).
"""
names.sort()
combos = list(itertools.combinations(names, 2))
maxPer = math.ceil(len(list(combos)) / len(names))
# maxconns = math.ceil(len(names) / 2)
connmap = OrderedDict((n, []) for n in names)
for a, b in combos:
if len(connmap[a]) < maxPer:
connmap[a].append(b)
else:
connmap[b].append(a)
return connmap
|
Bandwidth.
Formula:
BW = SMA(H - L)
|
def band_width(high_data, low_data, period):
"""
Bandwidth.
Formula:
BW = SMA(H - L)
"""
catch_errors.check_for_input_len_diff(high_data, low_data)
diff = np.array(high_data) - np.array(low_data)
bw = sma(diff, period)
return bw
|
Center Band.
Formula:
CB = SMA(TP)
|
def center_band(close_data, high_data, low_data, period):
"""
Center Band.
Formula:
CB = SMA(TP)
"""
tp = typical_price(close_data, high_data, low_data)
cb = sma(tp, period)
return cb
|
Upper Band.
Formula:
UB = CB + BW
|
def upper_band(close_data, high_data, low_data, period):
"""
Upper Band.
Formula:
UB = CB + BW
"""
cb = center_band(close_data, high_data, low_data, period)
bw = band_width(high_data, low_data, period)
ub = cb + bw
return ub
|
Simple Moving Average.
Formula:
SUM(data / N)
|
def simple_moving_average(data, period):
"""
Simple Moving Average.
Formula:
SUM(data / N)
"""
catch_errors.check_for_period_error(data, period)
# Mean of Empty Slice RuntimeWarning doesn't affect output so it is
# supressed
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
sma = [np.mean(data[idx-(period-1):idx+1]) for idx in range(0, len(data))]
sma = fill_for_noncomputable_vals(data, sma)
return sma
|
Lower Band.
Formula:
LB = CB - BW
|
def lower_band(close_data, high_data, low_data, period):
"""
Lower Band.
Formula:
LB = CB - BW
"""
cb = center_band(close_data, high_data, low_data, period)
bw = band_width(high_data, low_data, period)
lb = cb - bw
return lb
|
Average True Range Percent.
Formula:
ATRP = (ATR / CLOSE) * 100
|
def average_true_range_percent(close_data, period):
"""
Average True Range Percent.
Formula:
ATRP = (ATR / CLOSE) * 100
"""
catch_errors.check_for_period_error(close_data, period)
atrp = (atr(close_data, period) / np.array(close_data)) * 100
return atrp
|
On Balance Volume.
Formula:
start = 1
if CLOSEt > CLOSEt-1
obv = obvt-1 + volumet
elif CLOSEt < CLOSEt-1
obv = obvt-1 - volumet
elif CLOSEt == CLOSTt-1
obv = obvt-1
|
def on_balance_volume(close_data, volume):
"""
On Balance Volume.
Formula:
start = 1
if CLOSEt > CLOSEt-1
obv = obvt-1 + volumet
elif CLOSEt < CLOSEt-1
obv = obvt-1 - volumet
elif CLOSEt == CLOSTt-1
obv = obvt-1
"""
catch_errors.check_for_input_len_diff(close_data, volume)
obv = np.zeros(len(volume))
obv[0] = 1
for idx in range(1, len(obv)):
if close_data[idx] > close_data[idx-1]:
obv[idx] = obv[idx-1] + volume[idx]
elif close_data[idx] < close_data[idx-1]:
obv[idx] = obv[idx-1] - volume[idx]
elif close_data[idx] == close_data[idx-1]:
obv[idx] = obv[idx-1]
return obv
|
Rate of Change.
Formula:
(Close - Close n periods ago) / (Close n periods ago) * 100
|
def rate_of_change(data, period):
"""
Rate of Change.
Formula:
(Close - Close n periods ago) / (Close n periods ago) * 100
"""
catch_errors.check_for_period_error(data, period)
rocs = [((data[idx] - data[idx - (period - 1)]) /
data[idx - (period - 1)]) * 100 for idx in range(period - 1, len(data))]
rocs = fill_for_noncomputable_vals(data, rocs)
return rocs
|
Average True Range.
Formula:
ATRt = ATRt-1 * (n - 1) + TRt / n
|
def average_true_range(close_data, period):
"""
Average True Range.
Formula:
ATRt = ATRt-1 * (n - 1) + TRt / n
"""
tr = true_range(close_data, period)
atr = smoothed_moving_average(tr, period)
atr[0:period-1] = tr[0:period-1]
return atr
|
Accumulation/Distribution.
Formula:
A/D = (Ct - Lt) - (Ht - Ct) / (Ht - Lt) * Vt + A/Dt-1
|
def accumulation_distribution(close_data, high_data, low_data, volume):
"""
Accumulation/Distribution.
Formula:
A/D = (Ct - Lt) - (Ht - Ct) / (Ht - Lt) * Vt + A/Dt-1
"""
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
ad = np.zeros(len(close_data))
for idx in range(1, len(close_data)):
ad[idx] = (
(((close_data[idx] - low_data[idx]) -
(high_data[idx] - close_data[idx])) /
(high_data[idx] - low_data[idx]) *
volume[idx]) +
ad[idx-1]
)
return ad
|
Relative Strength Index.
Formula:
RSI = 100 - (100 / 1 + (prevGain/prevLoss))
|
def relative_strength_index(data, period):
"""
Relative Strength Index.
Formula:
RSI = 100 - (100 / 1 + (prevGain/prevLoss))
"""
catch_errors.check_for_period_error(data, period)
period = int(period)
changes = [data_tup[1] - data_tup[0] for data_tup in zip(data[::1], data[1::1])]
filtered_gain = [val < 0 for val in changes]
gains = [0 if filtered_gain[idx] is True else changes[idx] for idx in range(0, len(filtered_gain))]
filtered_loss = [val > 0 for val in changes]
losses = [0 if filtered_loss[idx] is True else abs(changes[idx]) for idx in range(0, len(filtered_loss))]
avg_gain = np.mean(gains[:period])
avg_loss = np.mean(losses[:period])
rsi = []
if avg_loss == 0:
rsi.append(100)
else:
rs = avg_gain / avg_loss
rsi.append(100 - (100 / (1 + rs)))
for idx in range(1, len(data) - period):
avg_gain = ((avg_gain * (period - 1) +
gains[idx + (period - 1)]) / period)
avg_loss = ((avg_loss * (period - 1) +
losses[idx + (period - 1)]) / period)
if avg_loss == 0:
rsi.append(100)
else:
rs = avg_gain / avg_loss
rsi.append(100 - (100 / (1 + rs)))
rsi = fill_for_noncomputable_vals(data, rsi)
return rsi
|
Vertical Horizontal Filter.
Formula:
ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1))
|
def vertical_horizontal_filter(data, period):
"""
Vertical Horizontal Filter.
Formula:
ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1))
"""
catch_errors.check_for_period_error(data, period)
vhf = [abs(np.max(data[idx+1-period:idx+1]) -
np.min(data[idx+1-period:idx+1])) /
sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))]
vhf = fill_for_noncomputable_vals(data, vhf)
return vhf
|
Buying Pressure.
Formula:
BP = current close - min()
|
def buying_pressure(close_data, low_data):
"""
Buying Pressure.
Formula:
BP = current close - min()
"""
catch_errors.check_for_input_len_diff(close_data, low_data)
bp = [close_data[idx] - np.min([low_data[idx], close_data[idx-1]]) for idx in range(1, len(close_data))]
bp = fill_for_noncomputable_vals(close_data, bp)
return bp
|
Ultimate Oscillator.
Formula:
UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1)
|
def ultimate_oscillator(close_data, low_data):
"""
Ultimate Oscillator.
Formula:
UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1)
"""
a7 = 4 * average_7(close_data, low_data)
a14 = 2 * average_14(close_data, low_data)
a28 = average_28(close_data, low_data)
uo = 100 * ((a7 + a14 + a28) / 7)
return uo
|
Aroon Up.
Formula:
AROONUP = (((PERIOD) - (PERIODS since PERIOD high)) / (PERIOD)) * 100
|
def aroon_up(data, period):
"""
Aroon Up.
Formula:
AROONUP = (((PERIOD) - (PERIODS since PERIOD high)) / (PERIOD)) * 100
"""
catch_errors.check_for_period_error(data, period)
period = int(period)
a_up = [((period -
list(reversed(data[idx+1-period:idx+1])).index(np.max(data[idx+1-period:idx+1]))) /
float(period)) * 100 for idx in range(period-1, len(data))]
a_up = fill_for_noncomputable_vals(data, a_up)
return a_up
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.