INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Return a dictionary form of the message :param msg: the message to be sent :raises: ValueError if msg cannot be converted to an appropriate format for transmission
def toDict(self, msg: Dict) -> Dict: """ Return a dictionary form of the message :param msg: the message to be sent :raises: ValueError if msg cannot be converted to an appropriate format for transmission """ if isinstance(msg, Request): tmsg = msg.as_dict elif hasattr(msg, "_asdict"): tmsg = dict(msg._asdict()) elif hasattr(msg, "__dict__"): tmsg = dict(msg.__dict__) elif self.allowDictOnly: raise ValueError("Message cannot be converted to an appropriate " "format for transmission") else: tmsg = msg return tmsg
Get all ledger IDs for which A) not updated for more than Freshness Timeout B) hasn't been attempted to update (returned from this method) for more than Freshness Timeout Should be called whenever we need to decide if ledgers need to be updated. :param ts: the current time check the freshness against :return: an ordered dict of outdated ledgers sorted by the time from the last update (from oldest to newest) and then by ledger ID (in case of equal update time)
def check_freshness(self, ts): ''' Get all ledger IDs for which A) not updated for more than Freshness Timeout B) hasn't been attempted to update (returned from this method) for more than Freshness Timeout Should be called whenever we need to decide if ledgers need to be updated. :param ts: the current time check the freshness against :return: an ordered dict of outdated ledgers sorted by the time from the last update (from oldest to newest) and then by ledger ID (in case of equal update time) ''' outdated_ledgers = {} for ledger_id, freshness_state in self._ledger_freshness.items(): if ts - freshness_state.last_updated <= self.freshness_timeout: continue if ts - freshness_state.last_marked_as_outdated <= self.freshness_timeout: continue outdated_ledgers[ledger_id] = ts - freshness_state.last_updated freshness_state.last_marked_as_outdated = ts # sort by last update time and then by ledger_id return OrderedDict( sorted( outdated_ledgers.items(), key=lambda item: (-item[1], item[0]) ) )
Updates the time at which the ledger was updated. Should be called whenever a txn for the ledger is ordered. :param ledger_id: the ID of the ledgers a txn was ordered for :param ts: the current time :return: None
def update_freshness(self, ledger_id, ts): ''' Updates the time at which the ledger was updated. Should be called whenever a txn for the ledger is ordered. :param ledger_id: the ID of the ledgers a txn was ordered for :param ts: the current time :return: None ''' if ledger_id in self._ledger_freshness: self._ledger_freshness[ledger_id].last_updated = ts
Gets the time at which each ledger was updated. Can be called at any time to get this information. :return: an ordered dict of outdated ledgers sorted by last update time (from old to new) and then by ledger ID (in case of equal update time)
def get_last_update_time(self): ''' Gets the time at which each ledger was updated. Can be called at any time to get this information. :return: an ordered dict of outdated ledgers sorted by last update time (from old to new) and then by ledger ID (in case of equal update time) ''' last_updated = {ledger_id: freshness_state.last_updated for ledger_id, freshness_state in self._ledger_freshness.items()} return OrderedDict( sorted( last_updated.items(), key=lambda item: (item[1], item[0]) ) )
Create a string representation of the given object. Examples: :: >>> serialize("str") 'str' >>> serialize([1,2,3,4,5]) '1,2,3,4,5' >>> signing.serlize({1:'a', 2:'b'}) '1:a|2:b' >>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]}) '1:a|2:b|3:1,2:k' :param obj: the object to serlize :param level: a parameter used internally for recursion to serialize nested data structures :param topLevelKeysToIgnore: the list of top level keys to ignore for serialization :return: a string representation of `obj`
def serialize(self, obj, level=0, objname=None, topLevelKeysToIgnore=None, toBytes=True): """ Create a string representation of the given object. Examples: :: >>> serialize("str") 'str' >>> serialize([1,2,3,4,5]) '1,2,3,4,5' >>> signing.serlize({1:'a', 2:'b'}) '1:a|2:b' >>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]}) '1:a|2:b|3:1,2:k' :param obj: the object to serlize :param level: a parameter used internally for recursion to serialize nested data structures :param topLevelKeysToIgnore: the list of top level keys to ignore for serialization :return: a string representation of `obj` """ res = None if not isinstance(obj, acceptableTypes): error("invalid type found {}: {}".format(objname, obj)) elif isinstance(obj, str): res = obj elif isinstance(obj, dict): if level > 0: keys = list(obj.keys()) else: topLevelKeysToIgnore = topLevelKeysToIgnore or [] keys = [k for k in obj.keys() if k not in topLevelKeysToIgnore] keys.sort() strs = [] for k in keys: onm = ".".join([str(objname), str(k)]) if objname else k strs.append( str(k) + ":" + self.serialize(obj[k], level + 1, onm, toBytes=False)) res = "|".join(strs) elif isinstance(obj, Iterable): strs = [] for o in obj: strs.append(self.serialize( o, level + 1, objname, toBytes=False)) res = ",".join(strs) elif obj is None: res = "" else: res = str(obj) # logger.trace("serialized msg {} into {}".format(obj, res)) if not toBytes: return res return res.encode('utf-8')
The number of txns from the beginning of `uncommittedTxns` to commit :param count: :return: a tuple of 2 seqNos indicating the start and end of sequence numbers of the committed txns
def commitTxns(self, count: int) -> Tuple[Tuple[int, int], List]: """ The number of txns from the beginning of `uncommittedTxns` to commit :param count: :return: a tuple of 2 seqNos indicating the start and end of sequence numbers of the committed txns """ committedSize = self.size committedTxns = [] for txn in self.uncommittedTxns[:count]: txn.update(self.append(txn)) committedTxns.append(txn) self.uncommittedTxns = self.uncommittedTxns[count:] logger.debug('Committed {} txns, {} are uncommitted'. format(len(committedTxns), len(self.uncommittedTxns))) if not self.uncommittedTxns: self.uncommittedTree = None self.uncommittedRootHash = None # Do not change `uncommittedTree` or `uncommittedRootHash` # if there are any `uncommittedTxns` since the ledger still has a # valid uncommittedTree and a valid root hash which are # different from the committed ones if committedTxns: return (committedSize + 1, committedSize + count), committedTxns else: return (committedSize, committedSize), committedTxns
The number of txns in `uncommittedTxns` which have to be discarded :param count: :return:
def discardTxns(self, count: int): """ The number of txns in `uncommittedTxns` which have to be discarded :param count: :return: """ # TODO: This can be optimised if multiple discards are combined # together since merkle root computation will be done only once. if count == 0: return if count > len(self.uncommittedTxns): raise LogicError("expected to revert {} txns while there are only {}". format(count, len(self.uncommittedTxns))) old_hash = self.uncommittedRootHash self.uncommittedTxns = self.uncommittedTxns[:-count] if not self.uncommittedTxns: self.uncommittedTree = None self.uncommittedRootHash = None else: self.uncommittedTree = self.treeWithAppliedTxns( self.uncommittedTxns) self.uncommittedRootHash = self.uncommittedTree.root_hash logger.info('Discarding {} txns and root hash {} and new root hash is {}. {} are still uncommitted'. format(count, Ledger.hashToStr(old_hash), Ledger.hashToStr(self.uncommittedRootHash), len(self.uncommittedTxns)))
Return a copy of merkle tree after applying the txns :param txns: :return:
def treeWithAppliedTxns(self, txns: List, currentTree=None): """ Return a copy of merkle tree after applying the txns :param txns: :return: """ currentTree = currentTree or self.tree # Copying the tree is not a problem since its a Compact Merkle Tree # so the size of the tree would be 32*(lg n) bytes where n is the # number of leaves (no. of txns) tempTree = copy(currentTree) for txn in txns: s = self.serialize_for_tree(txn) tempTree.append(s) return tempTree
Clear the values of all attributes of the transaction store.
def reset(self): """ Clear the values of all attributes of the transaction store. """ self.getsCounter = 0 # dictionary of processed requests for each client. Value for each # client is a dictionary with request id as key and transaction id as # value self.processedRequests = {} # type: Dict[str, Dict[int, str]] # dictionary of responses to be sent for each client. Value for each # client is an asyncio Queue self.responses = {} # type: Dict[str, asyncio.Queue] # dictionary with key as transaction id and `Reply` as # value self.transactions = {}
Try to stop the transaction store in the given timeout or raise an exception.
def stop(self, timeout: int = 5) -> None: """ Try to stop the transaction store in the given timeout or raise an exception. """ self.running = False start = time.perf_counter() while True: if self.getsCounter == 0: return True elif time.perf_counter() <= start + timeout: time.sleep(.1) else: raise StopTimeout("Stop timed out waiting for {} gets to " "complete.".format(self.getsCounter))
Add a client request to the transaction store's list of processed requests.
def addToProcessedTxns(self, identifier: str, txnId: str, reply: Reply) -> None: """ Add a client request to the transaction store's list of processed requests. """ self.transactions[txnId] = reply if identifier not in self.processedRequests: self.processedRequests[identifier] = {} self.processedRequests[identifier][reply.reqId] = txnId
Add the given Reply to this transaction store's list of responses. Also add to processedRequests if not added previously.
async def append(self, reply: Reply) \ -> None: """ Add the given Reply to this transaction store's list of responses. Also add to processedRequests if not added previously. """ result = reply.result identifier = result.get(f.IDENTIFIER.nm) txnId = result.get(TXN_ID) logger.debug("Reply being sent {}".format(reply)) if self._isNewTxn(identifier, reply, txnId): self.addToProcessedTxns(identifier, txnId, reply) if identifier not in self.responses: self.responses[identifier] = asyncio.Queue() await self.responses[identifier].put(reply)
If client is not in `processedRequests` or requestId is not there in processed requests and txnId is present then its a new reply
def _isNewTxn(self, identifier, reply, txnId) -> bool: """ If client is not in `processedRequests` or requestId is not there in processed requests and txnId is present then its a new reply """ return (identifier not in self.processedRequests or reply.reqId not in self.processedRequests[identifier]) and \ txnId is not None
Add the specified request to this request store.
def add(self, req: Request): """ Add the specified request to this request store. """ key = req.key if key not in self: self[key] = ReqState(req) return self[key]
Should be called by each replica when request is ordered or replica is removed.
def ordered_by_replica(self, request_key): """ Should be called by each replica when request is ordered or replica is removed. """ state = self.get(request_key) if not state: return state.unordered_by_replicas_num -= 1
Works together with 'mark_as_executed' and 'free' methods. It marks request as forwarded to 'to' replicas. To let request be removed, it should be marked as executed and each of 'to' replicas should call 'free'.
def mark_as_forwarded(self, req: Request, to: int): """ Works together with 'mark_as_executed' and 'free' methods. It marks request as forwarded to 'to' replicas. To let request be removed, it should be marked as executed and each of 'to' replicas should call 'free'. """ self[req.key].forwarded = True self[req.key].forwardedTo = to self[req.key].unordered_by_replicas_num = to
Add the specified request to the list of received PROPAGATEs. :param req: the REQUEST to add :param sender: the name of the node sending the msg
def add_propagate(self, req: Request, sender: str): """ Add the specified request to the list of received PROPAGATEs. :param req: the REQUEST to add :param sender: the name of the node sending the msg """ data = self.add(req) data.propagates[sender] = req
Get the number of propagates for a given reqId and identifier.
def votes(self, req) -> int: """ Get the number of propagates for a given reqId and identifier. """ try: votes = len(self[req.key].propagates) except KeyError: votes = 0 return votes
Works together with 'mark_as_forwarded' and 'free' methods. It makes request to be removed if all replicas request was forwarded to freed it.
def mark_as_executed(self, req: Request): """ Works together with 'mark_as_forwarded' and 'free' methods. It makes request to be removed if all replicas request was forwarded to freed it. """ state = self[req.key] state.executed = True self._clean(state)
Works together with 'mark_as_forwarded' and 'mark_as_executed' methods. It makes request to be removed if all replicas request was forwarded to freed it and if request executor marked it as executed.
def free(self, request_key): """ Works together with 'mark_as_forwarded' and 'mark_as_executed' methods. It makes request to be removed if all replicas request was forwarded to freed it and if request executor marked it as executed. """ state = self.get(request_key) if not state: return state.forwardedTo -= 1 self._clean(state)
Check whether the request specified has already been propagated.
def has_propagated(self, req: Request, sender: str) -> bool: """ Check whether the request specified has already been propagated. """ return req.key in self and sender in self[req.key].propagates
Broadcast a PROPAGATE to all other nodes :param request: the REQUEST to propagate
def propagate(self, request: Request, clientName): """ Broadcast a PROPAGATE to all other nodes :param request: the REQUEST to propagate """ if self.requests.has_propagated(request, self.name): logger.trace("{} already propagated {}".format(self, request)) else: with self.metrics.measure_time(MetricsName.SEND_PROPAGATE_TIME): self.requests.add_propagate(request, self.name) propagate = self.createPropagate(request, clientName) logger.debug("{} propagating request {} from client {}".format(self, request.key, clientName), extra={"cli": True, "tags": ["node-propagate"]}) self.send(propagate)
Create a new PROPAGATE for the given REQUEST. :param request: the client REQUEST :return: a new PROPAGATE msg
def createPropagate( request: Union[Request, dict], client_name) -> Propagate: """ Create a new PROPAGATE for the given REQUEST. :param request: the client REQUEST :return: a new PROPAGATE msg """ if not isinstance(request, (Request, dict)): logger.error("{}Request not formatted properly to create propagate" .format(THREE_PC_PREFIX)) return logger.trace("Creating PROPAGATE for REQUEST {}".format(request)) request = request.as_dict if isinstance(request, Request) else \ request if isinstance(client_name, bytes): client_name = client_name.decode() return Propagate(request, client_name)
Determine whether to forward client REQUESTs to replicas, based on the following logic: - If exactly f+1 PROPAGATE requests are received, then forward. - If less than f+1 of requests then probably there's no consensus on the REQUEST, don't forward. - If more than f+1 then already forwarded to replicas, don't forward Even if the node hasn't received the client REQUEST itself, if it has received enough number of PROPAGATE messages for the same, the REQUEST can be forwarded. :param request: the client REQUEST
def canForward(self, request: Request): """ Determine whether to forward client REQUESTs to replicas, based on the following logic: - If exactly f+1 PROPAGATE requests are received, then forward. - If less than f+1 of requests then probably there's no consensus on the REQUEST, don't forward. - If more than f+1 then already forwarded to replicas, don't forward Even if the node hasn't received the client REQUEST itself, if it has received enough number of PROPAGATE messages for the same, the REQUEST can be forwarded. :param request: the client REQUEST """ if self.requests.forwarded(request): return 'already forwarded' # If not enough Propagates, don't bother comparing if not self.quorums.propagate.is_reached(self.requests.votes(request)): return 'not finalised' req = self.requests.req_with_acceptable_quorum(request, self.quorums.propagate) if req: self.requests.set_finalised(req) return None else: return 'not finalised'
Forward the specified client REQUEST to the other replicas on this node :param request: the REQUEST to propagate
def forward(self, request: Request): """ Forward the specified client REQUEST to the other replicas on this node :param request: the REQUEST to propagate """ key = request.key num_replicas = self.replicas.num_replicas logger.debug('{} forwarding request {} to {} replicas' .format(self, key, num_replicas)) self.replicas.pass_message(ReqKey(key)) self.monitor.requestUnOrdered(key) self.requests.mark_as_forwarded(request, num_replicas)
Record the request in the list of requests and propagate. :param request: :param clientName:
def recordAndPropagate(self, request: Request, clientName): """ Record the request in the list of requests and propagate. :param request: :param clientName: """ self.requests.add(request) self.propagate(request, clientName) self.tryForwarding(request)
Try to forward the request if the required conditions are met. See the method `canForward` for the conditions to check before forwarding a request.
def tryForwarding(self, request: Request): """ Try to forward the request if the required conditions are met. See the method `canForward` for the conditions to check before forwarding a request. """ cannot_reason_msg = self.canForward(request) if cannot_reason_msg is None: # If haven't got the client request(REQUEST) for the corresponding # propagate request(PROPAGATE) but have enough propagate requests # to move ahead self.forward(request) else: logger.trace("{} not forwarding request {} to its replicas " "since {}".format(self, request, cannot_reason_msg))
Request PROPAGATEs for the given request keys. Since replicas can request PROPAGATEs independently of each other, check if it has been requested recently :param req_keys: :return:
def request_propagates(self, req_keys): """ Request PROPAGATEs for the given request keys. Since replicas can request PROPAGATEs independently of each other, check if it has been requested recently :param req_keys: :return: """ i = 0 for digest in req_keys: if digest not in self.requested_propagates_for: if digest not in self.requests: # Request from all nodes self.request_msg(PROPAGATE, {f.DIGEST.nm: digest}) else: # Request from nodes that didn't send send_to = [conn for conn in self.nodestack.connecteds if conn not in self.requests[digest].propagates.keys()] self.request_msg(PROPAGATE, {f.DIGEST.nm: digest}, frm=send_to) self._add_to_recently_requested(digest) i += 1 else: logger.debug('{} already requested PROPAGATE recently for {}'. format(self, digest)) return i
Currently not using clear
def removeRemote(self, remote: Remote, clear=True): """ Currently not using clear """ name = remote.name pkey = remote.publicKey vkey = remote.verKey if name in self.remotes: self.remotes.pop(name) self.remotesByKeys.pop(pkey, None) self.verifiers.pop(vkey, None) else: logger.info('No remote named {} present')
Service `limit` number of received messages in this stack. :param limit: the maximum number of messages to be processed. If None, processes all of the messages in rxMsgs. :return: the number of messages processed.
async def service(self, limit=None, quota: Optional[Quota] = None) -> int: """ Service `limit` number of received messages in this stack. :param limit: the maximum number of messages to be processed. If None, processes all of the messages in rxMsgs. :return: the number of messages processed. """ if self.listener: await self._serviceStack(self.age, quota) else: logger.info("{} is stopped".format(self)) r = len(self.rxMsgs) if r > 0: pracLimit = limit if limit else sys.maxsize return self.processReceived(pracLimit) return 0
Receives messages from listener :param quota: number of messages to receive :return: number of received messages
def _receiveFromListener(self, quota: Quota) -> int: """ Receives messages from listener :param quota: number of messages to receive :return: number of received messages """ i = 0 incoming_size = 0 while i < quota.count and incoming_size < quota.size: try: ident, msg = self.listener.recv_multipart(flags=zmq.NOBLOCK) if not msg: # Router probing sends empty message on connection continue incoming_size += len(msg) i += 1 self._verifyAndAppend(msg, ident) except zmq.Again: break if i > 0: logger.trace('{} got {} messages through listener'. format(self, i)) return i
Receives messages from remotes :param quotaPerRemote: number of messages to receive from one remote :return: number of received messages
def _receiveFromRemotes(self, quotaPerRemote) -> int: """ Receives messages from remotes :param quotaPerRemote: number of messages to receive from one remote :return: number of received messages """ assert quotaPerRemote totalReceived = 0 for ident, remote in self.remotesByKeys.items(): if not remote.socket: continue i = 0 sock = remote.socket while i < quotaPerRemote: try: msg, = sock.recv_multipart(flags=zmq.NOBLOCK) if not msg: # Router probing sends empty message on connection continue i += 1 self._verifyAndAppend(msg, ident) except zmq.Again: break if i > 0: logger.trace('{} got {} messages through remote {}'. format(self, i, remote)) totalReceived += i return totalReceived
Connect to the node specified by name.
def connect(self, name=None, remoteId=None, ha=None, verKeyRaw=None, publicKeyRaw=None): """ Connect to the node specified by name. """ if not name: raise ValueError('Remote name should be specified') publicKey = None if name in self.remotes: remote = self.remotes[name] else: publicKey = z85.encode( publicKeyRaw) if publicKeyRaw else self.getPublicKey(name) verKey = z85.encode( verKeyRaw) if verKeyRaw else self.getVerKey(name) if not ha or not publicKey or (self.isRestricted and not verKey): raise ValueError('{} doesnt have enough info to connect. ' 'Need ha, public key and verkey. {} {} {}'. format(name, ha, verKey, publicKey)) remote = self.addRemote(name, ha, verKey, publicKey) public, secret = self.selfEncKeys remote.connect(self.ctx, public, secret) logger.info("{}{} looking for {} at {}:{}" .format(CONNECTION_PREFIX, self, name or remote.name, *remote.ha), extra={"cli": "PLAIN", "tags": ["node-looking"]}) # This should be scheduled as an async task self.sendPingPong(remote, is_ping=True) # re-send previously stashed pings/pongs from unknown remotes logger.trace("{} stashed pongs: {}".format(self.name, str(self._stashed_pongs))) if publicKey in self._stashed_pongs: logger.trace("{} sending stashed pongs to {}".format(self.name, str(z85_to_friendly(publicKey)))) self._stashed_pongs.discard(publicKey) self.sendPingPong(name, is_ping=False) return remote.uid
Disconnect remote and connect to it again :param remote: instance of Remote from self.remotes :param remoteName: name of remote :return:
def reconnectRemote(self, remote): """ Disconnect remote and connect to it again :param remote: instance of Remote from self.remotes :param remoteName: name of remote :return: """ if not isinstance(remote, Remote): raise PlenumTypeError('remote', remote, Remote) logger.info('{} reconnecting to {}'.format(self, remote)) public, secret = self.selfEncKeys remote.disconnect() remote.connect(self.ctx, public, secret) self.sendPingPong(remote, is_ping=True)
Returns a list of hashes with serial numbers between start and end, both inclusive.
def _readMultiple(self, start, end, db): """ Returns a list of hashes with serial numbers between start and end, both inclusive. """ self._validatePos(start, end) # Converting any bytearray to bytes return [bytes(db.get(str(pos))) for pos in range(start, end + 1)]
pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator
def pack_nibbles(nibbles): """pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator """ if nibbles[-1] == NIBBLE_TERMINATOR: flags = 2 nibbles = nibbles[:-1] else: flags = 0 oddlen = len(nibbles) % 2 flags |= oddlen # set lowest bit if odd number of nibbles if oddlen: nibbles = [flags] + nibbles else: nibbles = [flags, 0] + nibbles o = b'' for i in range(0, len(nibbles), 2): o += ascii_chr(16 * nibbles[i] + nibbles[i + 1]) return o
get last node for the given prefix, also update `seen_prfx` to track the path already traversed :param node: node in form of list, or BLANK_NODE :param key_prfx: prefix to look for :param seen_prfx: prefix already seen, updates with each call :return: BLANK_NODE if does not exist, otherwise value or hash
def _get_last_node_for_prfx(self, node, key_prfx, seen_prfx): """ get last node for the given prefix, also update `seen_prfx` to track the path already traversed :param node: node in form of list, or BLANK_NODE :param key_prfx: prefix to look for :param seen_prfx: prefix already seen, updates with each call :return: BLANK_NODE if does not exist, otherwise value or hash """ node_type = self._get_node_type(node) if node_type == NODE_TYPE_BLANK: return BLANK_NODE if node_type == NODE_TYPE_BRANCH: # already reach the expected node if not key_prfx: return node sub_node = self._decode_to_node(node[key_prfx[0]]) seen_prfx.append(key_prfx[0]) return self._get_last_node_for_prfx(sub_node, key_prfx[1:], seen_prfx) # key value node curr_key = key_nibbles_from_key_value_node(node) if node_type == NODE_TYPE_LEAF: # Return this node only if the complete prefix is part of the current key if starts_with(curr_key, key_prfx): # Do not update `seen_prefix` as node has the prefix return node else: return BLANK_NODE if node_type == NODE_TYPE_EXTENSION: # traverse child nodes if len(key_prfx) > len(curr_key): if starts_with(key_prfx, curr_key): sub_node = self._get_inner_node_from_extension(node) seen_prfx.extend(curr_key) return self._get_last_node_for_prfx(sub_node, key_prfx[len(curr_key):], seen_prfx) else: return BLANK_NODE else: if starts_with(curr_key, key_prfx): # Do not update `seen_prefix` as node has the prefix return node else: return BLANK_NODE
yield (key, value) stored in this and the descendant nodes :param node: node in form of list, or BLANK_NODE .. note:: Here key is in full form, rather than key of the individual node
def _iter_branch(self, node): """yield (key, value) stored in this and the descendant nodes :param node: node in form of list, or BLANK_NODE .. note:: Here key is in full form, rather than key of the individual node """ if node == BLANK_NODE: raise StopIteration node_type = self._get_node_type(node) if is_key_value_type(node_type): nibbles = key_nibbles_from_key_value_node(node) key = b'+'.join([to_string(x) for x in nibbles]) if node_type == NODE_TYPE_EXTENSION: sub_tree = self._iter_branch(self._get_inner_node_from_extension(node)) else: sub_tree = [(to_string(NIBBLE_TERMINATOR), node[1])] # prepend key of this node to the keys of children for sub_key, sub_value in sub_tree: full_key = (key + b'+' + sub_key).strip(b'+') yield (full_key, sub_value) elif node_type == NODE_TYPE_BRANCH: for i in range(16): sub_tree = self._iter_branch(self._decode_to_node(node[i])) for sub_key, sub_value in sub_tree: full_key = (str_to_bytes(str(i)) + b'+' + sub_key).strip(b'+') yield (full_key, sub_value) if node[16]: yield (to_string(NIBBLE_TERMINATOR), node[-1])
Get value of a key when the root node was `root_node` :param root_node: :param key: :return:
def get_at(self, root_node, key): """ Get value of a key when the root node was `root_node` :param root_node: :param key: :return: """ return self._get(root_node, bin_to_nibbles(to_string(key)))
Calculate and return the metrics.
def metrics(self): """ Calculate and return the metrics. """ masterThrp, backupThrp = self.getThroughputs(self.instances.masterId) r = self.instance_throughput_ratio(self.instances.masterId) m = [ ("{} Monitor metrics:".format(self), None), ("Delta", self.Delta), ("Lambda", self.Lambda), ("Omega", self.Omega), ("instances started", self.instances.started), ("ordered request counts", {i: r[0] for i, r in self.numOrderedRequests.items()}), ("ordered request durations", {i: r[1] for i, r in self.numOrderedRequests.items()}), ("master request latencies", self.masterReqLatencies), ("client avg request latencies", {i: self.getLatency(i) for i in self.instances.ids}), ("throughput", {i: self.getThroughput(i) for i in self.instances.ids}), ("master throughput", masterThrp), ("total requests", self.totalRequests), ("avg backup throughput", backupThrp), ("master throughput ratio", r)] return m
Pretty printing for metrics
def prettymetrics(self) -> str: """ Pretty printing for metrics """ rendered = ["{}: {}".format(*m) for m in self.metrics()] return "\n ".join(rendered)
Reset the monitor. Sets all monitored values to defaults.
def reset(self): """ Reset the monitor. Sets all monitored values to defaults. """ logger.debug("{}'s Monitor being reset".format(self)) instances_ids = self.instances.started.keys() self.numOrderedRequests = {inst_id: (0, 0) for inst_id in instances_ids} self.requestTracker.reset() self.masterReqLatencies = {} self.masterReqLatencyTooHigh = False self.totalViewChanges += 1 self.lastKnownTraffic = self.calculateTraffic() if self.acc_monitor: self.acc_monitor.reset() for i in instances_ids: rm = self.create_throughput_measurement(self.config) self.throughputs[i] = rm lm = self.latency_measurement_cls(self.config) self.clientAvgReqLatencies[i] = lm
Add one protocol instance for monitoring.
def addInstance(self, inst_id): """ Add one protocol instance for monitoring. """ self.instances.add(inst_id) self.requestTracker.add_instance(inst_id) self.numOrderedRequests[inst_id] = (0, 0) rm = self.create_throughput_measurement(self.config) self.throughputs[inst_id] = rm lm = self.latency_measurement_cls(self.config) self.clientAvgReqLatencies[inst_id] = lm if self.acc_monitor: self.acc_monitor.add_instance(inst_id)
Measure the time taken for ordering of a request and return it. Monitor might have been reset due to view change due to which this method returns None
def requestOrdered(self, reqIdrs: List[str], instId: int, requests, byMaster: bool = False) -> Dict: """ Measure the time taken for ordering of a request and return it. Monitor might have been reset due to view change due to which this method returns None """ now = time.perf_counter() if self.acc_monitor: self.acc_monitor.update_time(now) durations = {} for key in reqIdrs: if key not in self.requestTracker: logger.debug("Got untracked ordered request with digest {}". format(key)) continue if self.acc_monitor: self.acc_monitor.request_ordered(key, instId) if key in self.requestTracker.handled_unordered(): started = self.requestTracker.started(key) logger.info('Consensus for ReqId: {} was achieved by {}:{} in {} seconds.' .format(key, self.name, instId, now - started)) duration = self.requestTracker.order(instId, key, now) self.throughputs[instId].add_request(now) if key in requests: identifier = requests[key].request.identifier self.clientAvgReqLatencies[instId].add_duration(identifier, duration) durations[key] = duration reqs, tm = self.numOrderedRequests[instId] orderedNow = len(durations) self.numOrderedRequests[instId] = (reqs + orderedNow, tm + sum(durations.values())) # TODO: Inefficient, as on every request a minimum of a large list is # calculated if min(r[0] for r in self.numOrderedRequests.values()) == (reqs + orderedNow): # If these requests is ordered by the last instance then increment # total requests, but why is this important, why cant is ordering # by master not enough? self.totalRequests += orderedNow self.postOnReqOrdered() if 0 == reqs: self.postOnNodeStarted(self.started) return durations
Record the time at which request ordering started.
def requestUnOrdered(self, key: str): """ Record the time at which request ordering started. """ now = time.perf_counter() if self.acc_monitor: self.acc_monitor.update_time(now) self.acc_monitor.request_received(key) self.requestTracker.start(key, now)
Return whether the master instance is slow.
def isMasterDegraded(self): """ Return whether the master instance is slow. """ if self.acc_monitor: self.acc_monitor.update_time(time.perf_counter()) return self.acc_monitor.is_master_degraded() else: return (self.instances.masterId is not None and (self.isMasterThroughputTooLow() or # TODO for now, view_change procedure can take more that 15 minutes # (5 minutes for catchup and 10 minutes for primary's answer). # Therefore, view_change triggering by max latency now is not indicative. # self.isMasterReqLatencyTooHigh() or self.isMasterAvgReqLatencyTooHigh()))
Return slow instance.
def areBackupsDegraded(self): """ Return slow instance. """ slow_instances = [] if self.acc_monitor: for instance in self.instances.backupIds: if self.acc_monitor.is_instance_degraded(instance): slow_instances.append(instance) else: for instance in self.instances.backupIds: if self.is_instance_throughput_too_low(instance): slow_instances.append(instance) return slow_instances
The relative throughput of an instance compared to the backup instances.
def instance_throughput_ratio(self, inst_id): """ The relative throughput of an instance compared to the backup instances. """ inst_thrp, otherThrp = self.getThroughputs(inst_id) # Backup throughput may be 0 so moving ahead only if it is not 0 r = inst_thrp / otherThrp if otherThrp and inst_thrp is not None \ else None return r
Return whether the throughput of the master instance is greater than the acceptable threshold
def is_instance_throughput_too_low(self, inst_id): """ Return whether the throughput of the master instance is greater than the acceptable threshold """ r = self.instance_throughput_ratio(inst_id) if r is None: logger.debug("{} instance {} throughput is not " "measurable.".format(self, inst_id)) return None too_low = r < self.Delta if too_low: logger.display("{}{} instance {} throughput ratio {} is lower than Delta {}.". format(MONITORING_PREFIX, self, inst_id, r, self.Delta)) else: logger.trace("{} instance {} throughput ratio {} is acceptable.". format(self, inst_id, r)) return too_low
Return whether the request latency of the master instance is greater than the acceptable threshold
def isMasterReqLatencyTooHigh(self): """ Return whether the request latency of the master instance is greater than the acceptable threshold """ # TODO for now, view_change procedure can take more that 15 minutes # (5 minutes for catchup and 10 minutes for primary's answer). # Therefore, view_change triggering by max latency is not indicative now. r = self.masterReqLatencyTooHigh or \ next(((key, lat) for key, lat in self.masterReqLatencies.items() if lat > self.Lambda), None) if r: logger.display("{}{} found master's latency {} to be higher than the threshold for request {}.". format(MONITORING_PREFIX, self, r[1], r[0])) else: logger.trace("{} found master's latency to be lower than the " "threshold for all requests.".format(self)) return r
Return whether the average request latency of an instance is greater than the acceptable threshold
def is_instance_avg_req_latency_too_high(self, inst_id): """ Return whether the average request latency of an instance is greater than the acceptable threshold """ avg_lat, avg_lat_others = self.getLatencies() if not avg_lat or not avg_lat_others: return False d = avg_lat - avg_lat_others if d < self.Omega: return False if inst_id == self.instances.masterId: logger.info("{}{} found difference between master's and " "backups's avg latency {} to be higher than the " "threshold".format(MONITORING_PREFIX, self, d)) logger.trace( "{}'s master's avg request latency is {} and backup's " "avg request latency is {}".format(self, avg_lat, avg_lat_others)) return True
Return a tuple of the throughput of the given instance and the average throughput of the remaining instances. :param instId: the id of the protocol instance
def getThroughputs(self, desired_inst_id: int): """ Return a tuple of the throughput of the given instance and the average throughput of the remaining instances. :param instId: the id of the protocol instance """ instance_thrp = self.getThroughput(desired_inst_id) totalReqs, totalTm = self.getInstanceMetrics(forAllExcept=desired_inst_id) # Average backup replica's throughput if len(self.throughputs) > 1: thrs = [] for inst_id, thr_obj in self.throughputs.items(): if inst_id == desired_inst_id: continue thr = self.getThroughput(inst_id) if thr is not None: thrs.append(thr) if thrs: if desired_inst_id == self.instances.masterId: other_thrp = self.throughput_avg_strategy_cls.get_avg(thrs) else: other_thrp = self.backup_throughput_avg_strategy_cls.get_avg(thrs) else: other_thrp = None else: other_thrp = None if instance_thrp == 0: if self.numOrderedRequests[desired_inst_id] == (0, 0): avgReqsPerInst = (totalReqs or 0) / self.instances.count if avgReqsPerInst <= 1: # too early to tell if we need an instance change instance_thrp = None return instance_thrp, other_thrp
Return the throughput of the specified instance. :param instId: the id of the protocol instance
def getThroughput(self, instId: int) -> float: """ Return the throughput of the specified instance. :param instId: the id of the protocol instance """ # We are using the instanceStarted time in the denominator instead of # a time interval. This is alright for now as all the instances on a # node are started at almost the same time. if instId not in self.instances.ids: return None perf_time = time.perf_counter() throughput = self.throughputs[instId].get_throughput(perf_time) return throughput
Calculate and return the average throughput of all the instances except the one specified as `forAllExcept`.
def getInstanceMetrics( self, forAllExcept: int) -> Tuple[Optional[int], Optional[float]]: """ Calculate and return the average throughput of all the instances except the one specified as `forAllExcept`. """ m = [(reqs, tm) for i, (reqs, tm) in self.numOrderedRequests.items() if i != forAllExcept] if m: reqs, tm = zip(*m) return sum(reqs), sum(tm) else: return None, None
Return a dict with client identifier as a key and calculated latency as a value
def getLatency(self, instId: int) -> float: """ Return a dict with client identifier as a key and calculated latency as a value """ if len(self.clientAvgReqLatencies) == 0: return 0.0 return self.clientAvgReqLatencies[instId].get_avg_latency()
Enqueue the message into the remote's queue. :param msg: the message to enqueue :param rid: the id of the remote node
def _enqueue(self, msg: Any, rid: int, signer: Signer) -> None: """ Enqueue the message into the remote's queue. :param msg: the message to enqueue :param rid: the id of the remote node """ if rid not in self.outBoxes: self.outBoxes[rid] = deque() self.outBoxes[rid].append(msg)
Enqueue the specified message into all the remotes in the nodestack. :param msg: the message to enqueue
def _enqueueIntoAllRemotes(self, msg: Any, signer: Signer) -> None: """ Enqueue the specified message into all the remotes in the nodestack. :param msg: the message to enqueue """ for rid in self.remotes.keys(): self._enqueue(msg, rid, signer)
Enqueue the given message into the outBoxes of the specified remotes or into the outBoxes of all the remotes if rids is None :param msg: the message to enqueue :param rids: ids of the remotes to whose outBoxes this message must be enqueued :param message_splitter: callable that splits msg on two smaller messages
def send(self, msg: Any, * rids: Iterable[int], signer: Signer = None, message_splitter=None) -> None: """ Enqueue the given message into the outBoxes of the specified remotes or into the outBoxes of all the remotes if rids is None :param msg: the message to enqueue :param rids: ids of the remotes to whose outBoxes this message must be enqueued :param message_splitter: callable that splits msg on two smaller messages """ # Signing (if required) and serializing before enqueueing otherwise # each call to `_enqueue` will have to sign it and `transmit` will try # to serialize it which is waste of resources message_parts, err_msg = \ self.prepare_for_sending(msg, signer, message_splitter) # TODO: returning breaks contract of super class if err_msg is not None: return False, err_msg if rids: for r in rids: for part in message_parts: self._enqueue(part, r, signer) else: for part in message_parts: self._enqueueIntoAllRemotes(part, signer) return True, None
Clear the outBoxes and transmit batched messages to remotes.
def flushOutBoxes(self) -> None: """ Clear the outBoxes and transmit batched messages to remotes. """ removedRemotes = [] for rid, msgs in self.outBoxes.items(): try: dest = self.remotes[rid].name except KeyError: removedRemotes.append(rid) continue if msgs: if self._should_batch(msgs): logger.trace( "{} batching {} msgs to {} into fewer transmissions". format(self, len(msgs), dest)) logger.trace(" messages: {}".format(msgs)) batches = split_messages_on_batches(list(msgs), self._make_batch, self._test_batch_len, ) msgs.clear() if batches: for batch, size in batches: logger.trace("{} sending payload to {}: {}".format( self, dest, batch)) self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, size) # Setting timeout to never expire self.transmit( batch, rid, timeout=self.messageTimeout, serialized=True) else: logger.error("{} cannot create batch(es) for {}".format(self, dest)) else: while msgs: msg = msgs.popleft() logger.trace( "{} sending msg {} to {}".format(self, msg, dest)) self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, 1) # Setting timeout to never expire self.transmit(msg, rid, timeout=self.messageTimeout, serialized=True) for rid in removedRemotes: logger.warning("{}{} has removed rid {}" .format(CONNECTION_PREFIX, self, z85_to_friendly(rid)), extra={"cli": False}) msgs = self.outBoxes[rid] if msgs: self.discard(msgs, "{}rid {} no longer available" .format(CONNECTION_PREFIX, z85_to_friendly(rid)), logMethod=logger.debug) del self.outBoxes[rid]
Call `prod` once for each Prodable in this Looper :return: the sum of the number of events executed successfully
async def prodAllOnce(self): """ Call `prod` once for each Prodable in this Looper :return: the sum of the number of events executed successfully """ # TODO: looks like limit is always None??? limit = None s = 0 for n in self.prodables: s += await n.prod(limit) return s
Add one Prodable object to this Looper's list of Prodables :param prodable: the Prodable object to add
def add(self, prodable: Prodable) -> None: """ Add one Prodable object to this Looper's list of Prodables :param prodable: the Prodable object to add """ if prodable.name in [p.name for p in self.prodables]: raise ProdableAlreadyAdded("Prodable {} already added.". format(prodable.name)) self.prodables.append(prodable) if self.autoStart: prodable.start(self.loop)
Remove the specified Prodable object from this Looper's list of Prodables :param prodable: the Prodable to remove
def removeProdable(self, prodable: Prodable=None, name: str=None) -> Optional[Prodable]: """ Remove the specified Prodable object from this Looper's list of Prodables :param prodable: the Prodable to remove """ if prodable: self.prodables.remove(prodable) return prodable elif name: for p in self.prodables: if hasattr(p, "name") and getattr(p, "name") == name: prodable = p break if prodable: self.prodables.remove(prodable) return prodable else: logger.warning("Trying to remove a prodable {} which is not present" .format(prodable)) else: logger.error("Provide a prodable object or a prodable name")
Execute `runOnce` with a small tolerance of 0.01 seconds so that the Prodables can complete their other asynchronous tasks not running on the event-loop.
async def runOnceNicely(self): """ Execute `runOnce` with a small tolerance of 0.01 seconds so that the Prodables can complete their other asynchronous tasks not running on the event-loop. """ start = time.perf_counter() msgsProcessed = await self.prodAllOnce() if msgsProcessed == 0: # if no let other stuff run await asyncio.sleep(0.01, loop=self.loop) dur = time.perf_counter() - start if dur >= 15: logger.info("it took {:.3f} seconds to run once nicely". format(dur), extra={"cli": False})
Runs an arbitrary list of coroutines in order and then quits the loop, if not running as a context manager.
def run(self, *coros: CoroWrapper): """ Runs an arbitrary list of coroutines in order and then quits the loop, if not running as a context manager. """ if not self.running: raise RuntimeError("not running!") async def wrapper(): results = [] for coro in coros: try: if inspect.isawaitable(coro): results.append(await coro) elif inspect.isfunction(coro): res = coro() if inspect.isawaitable(res): results.append(await res) else: results.append(res) else: raise RuntimeError( "don't know how to run {}".format(coro)) except Exception as ex: logger.error("Error while running coroutine {}: {}".format(coro.__name__, ex.__repr__())) raise ex if len(results) == 1: return results[0] return results if coros: what = wrapper() else: # if no coros supplied, then assume we run forever what = self.runFut return self.loop.run_until_complete(what)
Shut down this Looper.
async def shutdown(self): """ Shut down this Looper. """ logger.display("Looper shutting down now...", extra={"cli": False}) self.running = False start = time.perf_counter() if not self.runFut.done(): await self.runFut self.stopall() logger.display("Looper shut down in {:.3f} seconds.". format(time.perf_counter() - start), extra={"cli": False}) # Unset signal handlers, bug: https://bugs.python.org/issue23548 for sig_name in self.signals: logger.debug("Unsetting handler for {}".format(sig_name)) sig_num = getattr(signal, sig_name) self.loop.remove_signal_handler(sig_num)
Creates a default BLS factory to instantiate BLS BFT classes. :param node: Node instance :return: BLS factory instance
def create_default_bls_bft_factory(node): ''' Creates a default BLS factory to instantiate BLS BFT classes. :param node: Node instance :return: BLS factory instance ''' bls_keys_dir = os.path.join(node.keys_dir, node.name) bls_crypto_factory = create_default_bls_crypto_factory(bls_keys_dir) return BlsFactoryBftPlenum(bls_crypto_factory, node)
Verifies the signature of a signed message, returning the message if it has not been tampered with else raising :class:`~ValueError`. :param smessage: [:class:`bytes`] Either the original messaged or a signature and message concated together. :param signature: [:class:`bytes`] If an unsigned message is given for smessage then the detached signature must be provded. :param encoder: A class that is able to decode the secret message and signature. :rtype: :class:`bytes`
def verify(self, smessage, signature=None, encoder=encoding.RawEncoder): """ Verifies the signature of a signed message, returning the message if it has not been tampered with else raising :class:`~ValueError`. :param smessage: [:class:`bytes`] Either the original messaged or a signature and message concated together. :param signature: [:class:`bytes`] If an unsigned message is given for smessage then the detached signature must be provded. :param encoder: A class that is able to decode the secret message and signature. :rtype: :class:`bytes` """ if signature is not None: # If we were given the message and signature separately, combine # them. smessage = signature + smessage # Decode the signed message smessage = encoder.decode(smessage) return libnacl.crypto_sign_open(smessage, self._key)
Generates a random :class:`~SigningKey` object. :rtype: :class:`~SigningKey`
def generate(cls): """ Generates a random :class:`~SigningKey` object. :rtype: :class:`~SigningKey` """ return cls( libnacl.randombytes(libnacl.crypto_sign_SEEDBYTES), encoder=encoding.RawEncoder, )
Sign a message using this key. :param message: [:class:`bytes`] The data to be signed. :param encoder: A class that is used to encode the signed message. :rtype: :class:`~SignedMessage`
def sign(self, message, encoder=encoding.RawEncoder): """ Sign a message using this key. :param message: [:class:`bytes`] The data to be signed. :param encoder: A class that is used to encode the signed message. :rtype: :class:`~SignedMessage` """ raw_signed = libnacl.crypto_sign(message, self._signing_key) signature = encoder.encode(raw_signed[:libnacl.crypto_sign_BYTES]) message = encoder.encode(raw_signed[libnacl.crypto_sign_BYTES:]) signed = encoder.encode(raw_signed) return SignedMessage._from_parts(signature, message, signed)
Verify the message
def verify(self, signature, msg): ''' Verify the message ''' if not self.key: return False try: self.key.verify(signature + msg) except ValueError: return False return True
Generates a random :class:`~PrivateKey` object :rtype: :class:`~PrivateKey`
def generate(cls): """ Generates a random :class:`~PrivateKey` object :rtype: :class:`~PrivateKey` """ return cls(libnacl.randombytes(PrivateKey.SIZE), encoder=encoding.RawEncoder)
Encrypts the plaintext message using the given `nonce` and returns the ciphertext encoded with the encoder. .. warning:: It is **VITALLY** important that the nonce is a nonce, i.e. it is a number used only once for any given key. If you fail to do this, you compromise the privacy of the messages encrypted. :param plaintext: [:class:`bytes`] The plaintext message to encrypt :param nonce: [:class:`bytes`] The nonce to use in the encryption :param encoder: The encoder to use to encode the ciphertext :rtype: [:class:`nacl.utils.EncryptedMessage`]
def encrypt(self, plaintext, nonce, encoder=encoding.RawEncoder): """ Encrypts the plaintext message using the given `nonce` and returns the ciphertext encoded with the encoder. .. warning:: It is **VITALLY** important that the nonce is a nonce, i.e. it is a number used only once for any given key. If you fail to do this, you compromise the privacy of the messages encrypted. :param plaintext: [:class:`bytes`] The plaintext message to encrypt :param nonce: [:class:`bytes`] The nonce to use in the encryption :param encoder: The encoder to use to encode the ciphertext :rtype: [:class:`nacl.utils.EncryptedMessage`] """ if len(nonce) != self.NONCE_SIZE: raise ValueError("The nonce must be exactly %s bytes long" % self.NONCE_SIZE) ciphertext = libnacl.crypto_box_afternm( plaintext, nonce, self._shared_key, ) encoded_nonce = encoder.encode(nonce) encoded_ciphertext = encoder.encode(ciphertext) return EncryptedMessage._from_parts( encoded_nonce, encoded_ciphertext, encoder.encode(nonce + ciphertext), )
Decrypts the ciphertext using the given nonce and returns the plaintext message. :param ciphertext: [:class:`bytes`] The encrypted message to decrypt :param nonce: [:class:`bytes`] The nonce used when encrypting the ciphertext :param encoder: The encoder used to decode the ciphertext. :rtype: [:class:`bytes`]
def decrypt(self, ciphertext, nonce=None, encoder=encoding.RawEncoder): """ Decrypts the ciphertext using the given nonce and returns the plaintext message. :param ciphertext: [:class:`bytes`] The encrypted message to decrypt :param nonce: [:class:`bytes`] The nonce used when encrypting the ciphertext :param encoder: The encoder used to decode the ciphertext. :rtype: [:class:`bytes`] """ # Decode our ciphertext ciphertext = encoder.decode(ciphertext) if nonce is None: # If we were given the nonce and ciphertext combined, split them. nonce = ciphertext[:self.NONCE_SIZE] ciphertext = ciphertext[self.NONCE_SIZE:] if len(nonce) != self.NONCE_SIZE: raise ValueError("The nonce must be exactly %s bytes long" % self.NONCE_SIZE) plaintext = libnacl.crypto_box_open_afternm( ciphertext, nonce, self._shared_key, ) return plaintext
Return duple of (cyphertext, nonce) resulting from encrypting the message using shared key generated from the .key and the pubkey If pubkey is hex encoded it is converted first If enhex is True then use HexEncoder otherwise use RawEncoder Intended for the owner of the passed in public key msg is string pub is Publican instance
def encrypt(self, msg, pubkey, enhex=False): ''' Return duple of (cyphertext, nonce) resulting from encrypting the message using shared key generated from the .key and the pubkey If pubkey is hex encoded it is converted first If enhex is True then use HexEncoder otherwise use RawEncoder Intended for the owner of the passed in public key msg is string pub is Publican instance ''' if not isinstance(pubkey, PublicKey): if len(pubkey) == 32: pubkey = PublicKey(pubkey, encoding.RawEncoder) else: pubkey = PublicKey(pubkey, encoding.HexEncoder) box = Box(self.key, pubkey) nonce = self.nonce() encoder = encoding.HexEncoder if enhex else encoding.RawEncoder encrypted = box.encrypt(msg, nonce, encoder) return (encrypted.ciphertext, encrypted.nonce)
Return decrypted msg contained in cypher using nonce and shared key generated from .key and pubkey. If pubkey is hex encoded it is converted first If dehex is True then use HexEncoder otherwise use RawEncoder Intended for the owner of .key cypher is string nonce is string pub is Publican instance
def decrypt(self, cipher, nonce, pubkey, dehex=False): ''' Return decrypted msg contained in cypher using nonce and shared key generated from .key and pubkey. If pubkey is hex encoded it is converted first If dehex is True then use HexEncoder otherwise use RawEncoder Intended for the owner of .key cypher is string nonce is string pub is Publican instance ''' if not isinstance(pubkey, PublicKey): if len(pubkey) == 32: pubkey = PublicKey(pubkey, encoding.RawEncoder) else: pubkey = PublicKey(pubkey, encoding.HexEncoder) box = Box(self.key, pubkey) decoder = encoding.HexEncoder if dehex else encoding.RawEncoder if dehex and len(nonce) != box.NONCE_SIZE: nonce = decoder.decode(nonce) return box.decrypt(cipher, nonce, decoder)
Add the leaf (transaction) to the log and the merkle tree. Note: Currently data is serialised same way for inserting it in the log as well as the merkle tree, only difference is the tree needs binary data to the textual (utf-8) representation is converted to bytes.
def add(self, leaf): """ Add the leaf (transaction) to the log and the merkle tree. Note: Currently data is serialised same way for inserting it in the log as well as the merkle tree, only difference is the tree needs binary data to the textual (utf-8) representation is converted to bytes. """ # Serializing here to avoid serialisation in `_addToStore` and # `_addToTree` serz_leaf = self.serialize_for_txn_log(leaf) self._addToStore(serz_leaf, serialized=True) serz_leaf_for_tree = self.serialize_for_tree(leaf) merkle_info = self._addToTree(serz_leaf_for_tree, serialized=True) return merkle_info
:param source: some iterable source (list, file, etc) :param lineSep: string of separators (chars) that must be removed :return: list of non empty lines with removed separators
def cleanLines(source, lineSep=os.linesep): """ :param source: some iterable source (list, file, etc) :param lineSep: string of separators (chars) that must be removed :return: list of non empty lines with removed separators """ stripped = (line.strip(lineSep) for line in source) return (line for line in stripped if len(line) != 0)
Reads config from the installation directory of Plenum. :param installDir: installation directory of Plenum :param configFile: name of the configuration file :raises: FileNotFoundError :return: the configuration as a python object
def getInstalledConfig(installDir, configFile): """ Reads config from the installation directory of Plenum. :param installDir: installation directory of Plenum :param configFile: name of the configuration file :raises: FileNotFoundError :return: the configuration as a python object """ configPath = os.path.join(installDir, configFile) if not os.path.exists(configPath): raise FileNotFoundError("No file found at location {}". format(configPath)) spec = spec_from_file_location(configFile, configPath) config = module_from_spec(spec) spec.loader.exec_module(config) return config
Reads a file called config.py in the project directory :raises: FileNotFoundError :return: the configuration as a python object
def _getConfig(general_config_dir: str = None): """ Reads a file called config.py in the project directory :raises: FileNotFoundError :return: the configuration as a python object """ stp_config = STPConfig() plenum_config = import_module("plenum.config") config = stp_config config.__dict__.update(plenum_config.__dict__) if general_config_dir: config.GENERAL_CONFIG_DIR = general_config_dir if not config.GENERAL_CONFIG_DIR: raise Exception('GENERAL_CONFIG_DIR must be set') extend_with_external_config(config, (config.GENERAL_CONFIG_DIR, config.GENERAL_CONFIG_FILE)) # "unsafe" is a set of attributes that can set certain behaviors that # are not safe, for example, 'disable_view_change' disables view changes # from happening. This might be useful in testing scenarios, but never # in a live network. if not hasattr(config, 'unsafe'): setattr(config, 'unsafe', set()) return config
Get the next function from the list of routes that is capable of processing o's type. :param o: the object to process :return: the next function
def getFunc(self, o: Any) -> Callable: """ Get the next function from the list of routes that is capable of processing o's type. :param o: the object to process :return: the next function """ for cls, func in self.routes.items(): if isinstance(o, cls): return func logger.error("Unhandled msg {}, available handlers are:".format(o)) for cls in self.routes.keys(): logger.error(" {}".format(cls)) raise RuntimeError("unhandled msg: {}".format(o))
Pass the message as an argument to the function defined in `routes`. If the msg is a tuple, pass the values as multiple arguments to the function. :param msg: tuple of object and callable
def handleSync(self, msg: Any) -> Any: """ Pass the message as an argument to the function defined in `routes`. If the msg is a tuple, pass the values as multiple arguments to the function. :param msg: tuple of object and callable """ # If a plain python tuple and not a named tuple, a better alternative # would be to create a named entity with the 3 characteristics below # TODO: non-obvious tuple, re-factor! if isinstance(msg, tuple) and len( msg) == 2 and not hasattr(msg, '_field_types'): return self.getFunc(msg[0])(*msg) else: return self.getFunc(msg)(msg)
Handle both sync and async functions. :param msg: a message :return: the result of execution of the function corresponding to this message's type
async def handle(self, msg: Any) -> Any: """ Handle both sync and async functions. :param msg: a message :return: the result of execution of the function corresponding to this message's type """ res = self.handleSync(msg) if isawaitable(res): return await res else: return res
Handle all items in a deque. Can call asynchronous handlers. :param deq: a deque of items to be handled by this router :param limit: the number of items in the deque to the handled :return: the number of items handled successfully
async def handleAll(self, deq: deque, limit=None) -> int: """ Handle all items in a deque. Can call asynchronous handlers. :param deq: a deque of items to be handled by this router :param limit: the number of items in the deque to the handled :return: the number of items handled successfully """ count = 0 while deq and (not limit or count < limit): count += 1 item = deq.popleft() await self.handle(item) return count
Synchronously handle all items in a deque. :param deq: a deque of items to be handled by this router :param limit: the number of items in the deque to the handled :return: the number of items handled successfully
def handleAllSync(self, deq: deque, limit=None) -> int: """ Synchronously handle all items in a deque. :param deq: a deque of items to be handled by this router :param limit: the number of items in the deque to the handled :return: the number of items handled successfully """ count = 0 while deq and (not limit or count < limit): count += 1 msg = deq.popleft() self.handleSync(msg) return count
Load this tree from a dumb data object for serialisation. The object must have attributes tree_size:int and hashes:list.
def load(self, other: merkle_tree.MerkleTree): """Load this tree from a dumb data object for serialisation. The object must have attributes tree_size:int and hashes:list. """ self._update(other.tree_size, other.hashes)
Save this tree into a dumb data object for serialisation. The object must have attributes tree_size:int and hashes:list.
def save(self, other: merkle_tree.MerkleTree): """Save this tree into a dumb data object for serialisation. The object must have attributes tree_size:int and hashes:list. """ other.__tree_size = self.__tree_size other.__hashes = self.__hashes
Returns the root hash of this tree. (Only re-computed on change.)
def root_hash(self): """Returns the root hash of this tree. (Only re-computed on change.)""" if self.__root_hash is None: self.__root_hash = ( self.__hasher._hash_fold(self.__hashes) if self.__hashes else self.__hasher.hash_empty()) return self.__root_hash
Append a new leaf onto the end of this tree and return the audit path
def append(self, new_leaf: bytes) -> List[bytes]: """Append a new leaf onto the end of this tree and return the audit path""" auditPath = list(reversed(self.__hashes)) self._push_subtree([new_leaf]) return auditPath
Extend this tree with new_leaves on the end. The algorithm works by using _push_subtree() as a primitive, calling it with the maximum number of allowed leaves until we can add the remaining leaves as a valid entire (non-full) subtree in one go.
def extend(self, new_leaves: List[bytes]): """Extend this tree with new_leaves on the end. The algorithm works by using _push_subtree() as a primitive, calling it with the maximum number of allowed leaves until we can add the remaining leaves as a valid entire (non-full) subtree in one go. """ size = len(new_leaves) final_size = self.tree_size + size idx = 0 while True: # keep pushing subtrees until mintree_size > remaining max_h = self.__mintree_height max_size = 1 << (max_h - 1) if max_h > 0 else 0 if max_h > 0 and size - idx >= max_size: self._push_subtree(new_leaves[idx:idx + max_size]) idx += max_size else: break # fill in rest of tree in one go, now that we can if idx < size: root_hash, hashes = self.__hasher._hash_full(new_leaves, idx, size) self._update(final_size, self.hashes + hashes) assert self.tree_size == final_size
Returns a new tree equal to this tree extended with new_leaves.
def extended(self, new_leaves: List[bytes]): """Returns a new tree equal to this tree extended with new_leaves.""" new_tree = self.__copy__() new_tree.extend(new_leaves) return new_tree
Check that the tree has same leaf count as expected and the number of nodes are also as expected
def verify_consistency(self, expected_leaf_count) -> bool: """ Check that the tree has same leaf count as expected and the number of nodes are also as expected """ if expected_leaf_count != self.leafCount: raise ConsistencyVerificationFailed() if self.get_expected_node_count(self.leafCount) != self.nodeCount: raise ConsistencyVerificationFailed() return True
Return whether the given str represents a hex value or not :param val: the string to check :return: whether the given str represents a hex value
def isHex(val: str) -> bool: """ Return whether the given str represents a hex value or not :param val: the string to check :return: whether the given str represents a hex value """ if isinstance(val, bytes): # only decodes utf-8 string try: val = val.decode() except ValueError: return False return isinstance(val, str) and all(c in string.hexdigits for c in val)
Generate client and server CURVE certificate files
def generate_certificates(base_dir, *peer_names, pubKeyDir=None, secKeyDir=None, sigKeyDir=None, verkeyDir=None, clean=True): ''' Generate client and server CURVE certificate files''' pubKeyDir = pubKeyDir or 'public_keys' secKeyDir = secKeyDir or 'private_keys' verkeyDir = verkeyDir or 'verif_keys' sigKeyDir = sigKeyDir or 'sig_keys' # keys_dir = os.path.join(base_dir, 'certificates') e_keys_dir = os.path.join(base_dir, '_enc') s_keys_dir = os.path.join(base_dir, '_sig') public_keys_dir = os.path.join(base_dir, pubKeyDir) secret_keys_dir = os.path.join(base_dir, secKeyDir) ver_keys_dir = os.path.join(base_dir, verkeyDir) sig_keys_dir = os.path.join(base_dir, sigKeyDir) # Create directories for certificates, remove old content if necessary for d in [e_keys_dir, s_keys_dir, public_keys_dir, secret_keys_dir, ver_keys_dir, sig_keys_dir]: if clean and os.path.exists(d): shutil.rmtree(d) os.makedirs(d, exist_ok=True) # create new keys in certificates dir for peer_name in peer_names: createEncAndSigKeys(e_keys_dir, s_keys_dir, peer_name) # move public keys to appropriate directory for keys_dir, pkdir, skdir in [ (e_keys_dir, public_keys_dir, secret_keys_dir), (s_keys_dir, ver_keys_dir, sig_keys_dir) ]: moveKeyFilesToCorrectLocations(keys_dir, pkdir, skdir) shutil.rmtree(e_keys_dir) shutil.rmtree(s_keys_dir) print('Public keys in {}'.format(public_keys_dir)) print('Private keys in {}'.format(secret_keys_dir)) print('Verification keys in {}'.format(ver_keys_dir)) print('Signing keys in {}'.format(sig_keys_dir))
Queries state for data on specified path :param path: path to data :param is_committed: queries the committed state root if True else the uncommitted root :param with_proof: creates proof if True :return: data
def lookup(self, path, is_committed=True, with_proof=False) -> (str, int): """ Queries state for data on specified path :param path: path to data :param is_committed: queries the committed state root if True else the uncommitted root :param with_proof: creates proof if True :return: data """ assert path is not None head_hash = self.state.committedHeadHash if is_committed else self.state.headHash encoded, proof = self._get_value_from_state(path, head_hash, with_proof=with_proof) if encoded: value, last_seq_no, last_update_time = decode_state_value(encoded) return value, last_seq_no, last_update_time, proof return None, None, None, proof
Implement exponential moving average
def _accumulate(self, old_accum, next_val): """ Implement exponential moving average """ return old_accum * (1 - self.alpha) + next_val * self.alpha
Calculates node position based on start and height :param start: The sequence number of the first leaf under this tree. :param height: Height of this node in the merkle tree :return: the node's position
def getNodePosition(cls, start, height=None) -> int: """ Calculates node position based on start and height :param start: The sequence number of the first leaf under this tree. :param height: Height of this node in the merkle tree :return: the node's position """ pwr = highest_bit_set(start) - 1 height = height or pwr if count_bits_set(start) == 1: adj = height - pwr return start - 1 + adj else: c = pow(2, pwr) return cls.getNodePosition(c, pwr) + \ cls.getNodePosition(start - c, height)
Get the audit path of the leaf at the position specified by serNo. :param seqNo: sequence number of the leaf to calculate the path for :param offset: the sequence number of the node from where the path should begin. :return: tuple of leafs and nodes
def getPath(cls, seqNo, offset=0): """ Get the audit path of the leaf at the position specified by serNo. :param seqNo: sequence number of the leaf to calculate the path for :param offset: the sequence number of the node from where the path should begin. :return: tuple of leafs and nodes """ if offset >= seqNo: raise ValueError("Offset should be less than serial number") pwr = highest_bit_set(seqNo - 1 - offset) - 1 if pwr <= 0: if seqNo % 2 == 0: return [seqNo - 1], [] else: return [], [] c = pow(2, pwr) + offset leafs, nodes = cls.getPath(seqNo, c) nodes.append(cls.getNodePosition(c, pwr)) return leafs, nodes
Fetches nodeHash based on start leaf and height of the node in the tree. :return: the nodeHash
def readNodeByTree(self, start, height=None): """ Fetches nodeHash based on start leaf and height of the node in the tree. :return: the nodeHash """ pos = self.getNodePosition(start, height) return self.readNode(pos)
Returns True if number of nodes are consistent with number of leaves
def is_consistent(self) -> bool: """ Returns True if number of nodes are consistent with number of leaves """ from ledger.compact_merkle_tree import CompactMerkleTree return self.nodeCount == CompactMerkleTree.get_expected_node_count( self.leafCount)
Close current and start next chunk
def _startNextChunk(self) -> None: """ Close current and start next chunk """ if self.currentChunk is None: self._useLatestChunk() else: self._useChunk(self.currentChunkIndex + self.chunkSize)