INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
Return chunk no and 1-based offset of key
:param key:
:return:
|
def _get_key_location(self, key) -> (int, int):
"""
Return chunk no and 1-based offset of key
:param key:
:return:
"""
key = int(key)
if key == 0:
return 1, 0
remainder = key % self.chunkSize
addend = ChunkedFileStore.firstChunkIndex
chunk_no = key - remainder + addend if remainder \
else key - self.chunkSize + addend
offset = remainder or self.chunkSize
return chunk_no, offset
|
Determines the file to retrieve the data from and retrieves the data.
:return: value corresponding to specified key
|
def get(self, key) -> str:
"""
Determines the file to retrieve the data from and retrieves the data.
:return: value corresponding to specified key
"""
# TODO: get is creating files when a key is given which is more than
# the store size
chunk_no, offset = self._get_key_location(key)
with self._openChunk(chunk_no) as chunk:
return chunk.get(str(offset))
|
Clear all data in file storage.
|
def reset(self) -> None:
"""
Clear all data in file storage.
"""
self.close()
for f in os.listdir(self.dataDir):
os.remove(os.path.join(self.dataDir, f))
self._useLatestChunk()
|
Lines in a store (all chunks)
:return: lines
|
def _lines(self):
"""
Lines in a store (all chunks)
:return: lines
"""
chunkIndices = self._listChunks()
for chunkIndex in chunkIndices:
with self._openChunk(chunkIndex) as chunk:
yield from chunk._lines()
|
Lists stored chunks
:return: sorted list of available chunk indices
|
def _listChunks(self):
"""
Lists stored chunks
:return: sorted list of available chunk indices
"""
chunks = []
for fileName in os.listdir(self.dataDir):
index = ChunkedFileStore._fileNameToChunkIndex(fileName)
if index is not None:
chunks.append(index)
return sorted(chunks)
|
Filters messages by view number so that only the messages that have the
current view number are retained.
:param wrappedMsgs: the messages to filter
|
def filterMsgs(self, wrappedMsgs: deque) -> deque:
"""
Filters messages by view number so that only the messages that have the
current view number are retained.
:param wrappedMsgs: the messages to filter
"""
filtered = deque()
while wrappedMsgs:
wrappedMsg = wrappedMsgs.popleft()
msg, sender = wrappedMsg
if hasattr(msg, f.VIEW_NO.nm):
reqViewNo = getattr(msg, f.VIEW_NO.nm)
if reqViewNo == self.viewNo:
filtered.append(wrappedMsg)
else:
self.discard(wrappedMsg,
"its view no {} is less than the elector's {}"
.format(reqViewNo, self.viewNo),
logger.debug)
else:
filtered.append(wrappedMsg)
return filtered
|
Service at most `limit` messages from the inBox.
:param limit: the maximum number of messages to service
:return: the number of messages successfully processed
|
async def serviceQueues(self, limit=None) -> int:
"""
Service at most `limit` messages from the inBox.
:param limit: the maximum number of messages to service
:return: the number of messages successfully processed
"""
return await self.inBoxRouter.handleAll(self.filterMsgs(self.inBox),
limit)
|
Notifies primary decider about the fact that view changed to let it
prepare for election, which then will be started from outside by
calling decidePrimaries()
|
def view_change_started(self, viewNo: int):
"""
Notifies primary decider about the fact that view changed to let it
prepare for election, which then will be started from outside by
calling decidePrimaries()
"""
if viewNo <= self.viewNo:
logger.warning("{}Provided view no {} is not greater"
" than the current view no {}"
.format(VIEW_CHANGE_PREFIX, viewNo, self.viewNo))
return False
self.previous_master_primary = self.node.master_primary_name
for replica in self.replicas.values():
replica.primaryName = None
return True
|
Send a message to the node on which this replica resides.
:param msg: the message to send
|
def send(self, msg):
"""
Send a message to the node on which this replica resides.
:param msg: the message to send
"""
logger.debug("{}'s elector sending {}".format(self.name, msg))
self.outBox.append(msg)
|
Authenticates a given request data by verifying signatures from
any registered authenticators. If the request is a query returns
immediately, if no registered authenticator can authenticate then an
exception is raised.
:param req_data:
:return:
|
def authenticate(self, req_data, key=None):
"""
Authenticates a given request data by verifying signatures from
any registered authenticators. If the request is a query returns
immediately, if no registered authenticator can authenticate then an
exception is raised.
:param req_data:
:return:
"""
identifiers = set()
typ = req_data.get(OPERATION, {}).get(TXN_TYPE)
if key and self._check_and_verify_existing_req(req_data, key):
return self._verified_reqs[key]['identifiers']
for authenticator in self._authenticators:
if authenticator.is_query(typ):
return set()
if not (authenticator.is_write(typ) or
authenticator.is_action(typ)):
continue
rv = authenticator.authenticate(deepcopy(req_data)) or set()
identifiers.update(rv)
if not identifiers:
raise NoAuthenticatorFound
if key:
self._verified_reqs[key] = {'signature': req_data.get(f.SIG.nm)}
self._verified_reqs[key]['identifiers'] = identifiers
return identifiers
|
Retrieve a plugin by name.
|
def get(self, name):
"""Retrieve a plugin by name."""
try:
return self.plugins[name]
except KeyError:
raise RuntimeError("plugin {} does not exist".format(name))
|
Authenticate the client's message with the signature provided.
:param identifier: some unique identifier; if None, then try to use
msg['identifier'] as identifier
:param signature: a utf-8 and base58 encoded signature
:param msg: the message to authenticate
:param threshold: The number of successful signature verification
:param key: The key of request for storing in internal maps
required. By default all signatures are required to be verified.
:return: the identifier; an exception of type SigningException is
raised if the signature is not valid
|
def authenticate(self,
msg: Dict,
identifier: Optional[str] = None,
signature: Optional[str] = None,
threshold: Optional[int] = None,
key: Optional[str] = None) -> str:
"""
Authenticate the client's message with the signature provided.
:param identifier: some unique identifier; if None, then try to use
msg['identifier'] as identifier
:param signature: a utf-8 and base58 encoded signature
:param msg: the message to authenticate
:param threshold: The number of successful signature verification
:param key: The key of request for storing in internal maps
required. By default all signatures are required to be verified.
:return: the identifier; an exception of type SigningException is
raised if the signature is not valid
"""
|
:param msg:
:param signatures: A mapping from identifiers to signatures.
:param threshold: The number of successful signature verification
required. By default all signatures are required to be verified.
:return: returns the identifiers whose signature was matched and
correct; a SigningException is raised if threshold was not met
|
def authenticate_multi(self, msg: Dict, signatures: Dict[str, str],
threshold: Optional[int] = None):
"""
:param msg:
:param signatures: A mapping from identifiers to signatures.
:param threshold: The number of successful signature verification
required. By default all signatures are required to be verified.
:return: returns the identifiers whose signature was matched and
correct; a SigningException is raised if threshold was not met
"""
|
Prepares the data to be serialised for signing and then verifies the
signature
:param req_data:
:param identifier:
:param signature:
:param verifier:
:return:
|
def authenticate(self, req_data, identifier: Optional[str]=None,
signature: Optional[str]=None, threshold: Optional[int] = None,
verifier: Verifier=DidVerifier):
"""
Prepares the data to be serialised for signing and then verifies the
signature
:param req_data:
:param identifier:
:param signature:
:param verifier:
:return:
"""
to_serialize = {k: v for k, v in req_data.items()
if k not in self.excluded_from_signing}
if req_data.get(f.SIG.nm) is None and \
req_data.get(f.SIGS.nm) is None and \
signature is None:
raise MissingSignature
if req_data.get(f.IDENTIFIER.nm) and (req_data.get(f.SIG.nm) or
signature):
try:
# if not identifier:
identifier = identifier or self._extract_identifier(req_data)
# if not signature:
signature = signature or self._extract_signature(req_data)
signatures = {identifier: signature}
except Exception as ex:
if ex in (MissingSignature, EmptySignature, MissingIdentifier,
EmptyIdentifier):
ex = ex(req_data.get(f.IDENTIFIER.nm), req_data.get(f.SIG.nm))
raise ex
else:
signatures = req_data.get(f.SIGS.nm, None)
return self.authenticate_multi(to_serialize, signatures=signatures,
threshold=threshold, verifier=verifier)
|
Compares two instances.
|
def cmp(cls, v1: 'VersionBase', v2: 'VersionBase') -> int:
""" Compares two instances. """
# TODO types checking
if v1._version > v2._version:
return 1
elif v1._version == v2._version:
return 0
else:
return -1
|
Ensure appropriate connections.
|
def maintainConnections(self, force=False):
"""
Ensure appropriate connections.
"""
now = time.perf_counter()
if now < self.nextCheck and not force:
return False
self.nextCheck = now + (self.config.RETRY_TIMEOUT_NOT_RESTRICTED
if self.isKeySharing
else self.config.RETRY_TIMEOUT_RESTRICTED)
missing = self.connectToMissing()
self.retryDisconnected(exclude=missing)
logger.trace("{} next check for retries in {:.2f} seconds"
.format(self, self.nextCheck - now))
return True
|
Check whether registry contains some addresses
that were never connected to
:return:
|
def reconcileNodeReg(self) -> set:
"""
Check whether registry contains some addresses
that were never connected to
:return:
"""
matches = set()
for name, remote in self.remotes.items():
if name not in self.registry:
continue
if self.sameAddr(remote.ha, self.registry[name]):
matches.add(name)
logger.debug("{} matched remote {} {}".
format(self, remote.uid, remote.ha))
return self.registry.keys() - matches - {self.name}
|
Try to connect to the missing nodes
|
def connectToMissing(self) -> set:
"""
Try to connect to the missing nodes
"""
missing = self.reconcileNodeReg()
if not missing:
return missing
logger.info("{}{} found the following missing connections: {}".
format(CONNECTION_PREFIX, self, ", ".join(missing)))
for name in missing:
try:
self.connect(name, ha=self.registry[name])
except (ValueError, KeyError, PublicKeyNotFoundOnDisk, VerKeyNotFoundOnDisk) as ex:
logger.warning('{}{} cannot connect to {} due to {}'.
format(CONNECTION_PREFIX, self, name, ex))
return missing
|
Passes the log record back to the CLI for rendering
|
def emit(self, record):
"""
Passes the log record back to the CLI for rendering
"""
should_cb = None
attr_val = None
if hasattr(record, self.typestr):
attr_val = getattr(record, self.typestr)
should_cb = bool(attr_val)
if should_cb is None and record.levelno >= logging.INFO:
should_cb = True
if hasattr(record, 'tags'):
for t in record.tags:
if t in self.tags:
if self.tags[t]:
should_cb = True
continue
else:
should_cb = False
break
if should_cb:
self.callback(record, attr_val)
|
Choose a schema for client request operation and validate
the operation field. If the schema is not found skips validation.
:param dct: an operation field from client request
:return: raises exception if invalid request
|
def validate(self, dct):
"""
Choose a schema for client request operation and validate
the operation field. If the schema is not found skips validation.
:param dct: an operation field from client request
:return: raises exception if invalid request
"""
if not isinstance(dct, dict):
# TODO this check should be in side of the validator not here
self._raise_invalid_fields('', dct, 'wrong type')
txn_type = dct.get(TXN_TYPE)
if txn_type is None:
self._raise_missed_fields(TXN_TYPE)
if txn_type in self.operations:
# check only if the schema is defined
op = self.operations[txn_type]
op.validate(dct)
|
Updates the connection count of this node if not already done.
|
def conns(self, value: Set[str]) -> None:
"""
Updates the connection count of this node if not already done.
"""
if not self._conns == value:
old = self._conns
self._conns = value
ins = value - old
outs = old - value
logger.display("{}'s connections changed from {} to {}".format(self, old, value))
self._connsChanged(ins, outs)
|
A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkProtocolInstaces()`
:param ins: new nodes connected
:param outs: nodes no longer connected
|
def _connsChanged(self, ins: Set[str], outs: Set[str]) -> None:
"""
A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkProtocolInstaces()`
:param ins: new nodes connected
:param outs: nodes no longer connected
"""
for o in outs:
logger.display("{}{} disconnected from {}".format(CONNECTION_PREFIX, self, o),
extra={"cli": "IMPORTANT", "tags": ["connected"]})
for i in ins:
logger.display("{}{} now connected to {}".format(CONNECTION_PREFIX, self, i),
extra={"cli": "IMPORTANT", "tags": ["connected"]})
# remove remotes for same ha when a connection is made
remote = self.getRemote(i)
others = [r for r in self.remotes.values()
if r.ha == remote.ha and r.name != i]
for o in others:
logger.debug("{} removing other remote".format(self))
self.removeRemote(o)
self.onConnsChanged(ins, outs)
|
Returns the name of the remote by HA if found in the node registry, else
returns None
|
def findInNodeRegByHA(self, remoteHa):
"""
Returns the name of the remote by HA if found in the node registry, else
returns None
"""
regName = [nm for nm, ha in self.registry.items()
if self.sameAddr(ha, remoteHa)]
if len(regName) > 1:
raise RuntimeError("more than one node registry entry with the "
"same ha {}: {}".format(remoteHa, regName))
if regName:
return regName[0]
return None
|
Returns the name of the remote object if found in node registry.
:param remote: the remote object
|
def getRemoteName(self, remote):
"""
Returns the name of the remote object if found in node registry.
:param remote: the remote object
"""
if remote.name not in self.registry:
find = [name for name, ha in self.registry.items()
if ha == remote.ha]
assert len(find) == 1
return find[0]
return remote.name
|
Returns the names of nodes in the registry this node is NOT connected
to.
|
def notConnectedNodes(self) -> Set[str]:
"""
Returns the names of nodes in the registry this node is NOT connected
to.
"""
return set(self.registry.keys()) - self.conns
|
Create and bind the ZAP socket
|
def start(self):
"""Create and bind the ZAP socket"""
self.zap_socket = self.context.socket(zmq.REP)
self.zap_socket.linger = 1
zapLoc = 'inproc://zeromq.zap.{}'.format(MultiZapAuthenticator.count)
self.zap_socket.bind(zapLoc)
self.log.debug('Starting ZAP at {}'.format(zapLoc))
|
Close the ZAP socket
|
def stop(self):
"""Close the ZAP socket"""
if self.zap_socket:
self.log.debug(
'Stopping ZAP at {}'.format(self.zap_socket.LAST_ENDPOINT))
super().stop()
|
Start ZAP authentication
|
def start(self):
"""Start ZAP authentication"""
super().start()
self.__poller = zmq.asyncio.Poller()
self.__poller.register(self.zap_socket, zmq.POLLIN)
self.__task = asyncio.ensure_future(self.__handle_zap())
|
Stop ZAP authentication
|
def stop(self):
"""Stop ZAP authentication"""
if self.__task:
self.__task.cancel()
if self.__poller:
self.__poller.unregister(self.zap_socket)
self.__poller = None
super().stop()
|
Generate a random string in hex of the specified size
DO NOT use python provided random class its a Pseudo Random Number Generator
and not secure enough for our needs
:param size: size of the random string to generate
:return: the hexadecimal random string
|
def randomString(size: int = 20) -> str:
"""
Generate a random string in hex of the specified size
DO NOT use python provided random class its a Pseudo Random Number Generator
and not secure enough for our needs
:param size: size of the random string to generate
:return: the hexadecimal random string
"""
def randomStr(size):
if not isinstance(size, int):
raise PlenumTypeError('size', size, int)
if not size > 0:
raise PlenumValueError('size', size, '> 0')
# Approach 1
rv = randombytes(size // 2).hex()
return rv if size % 2 == 0 else rv + hex(randombytes_uniform(15))[-1]
# Approach 2 this is faster than Approach 1, but lovesh had a doubt
# that part of a random may not be truly random, so until
# we have definite proof going to retain it commented
# rstr = randombytes(size).hex()
# return rstr[:size]
return randomStr(size)
|
Takes *size* random elements from provided alphabet
:param size:
:param alphabet:
|
def random_from_alphabet(size, alphabet):
"""
Takes *size* random elements from provided alphabet
:param size:
:param alphabet:
"""
import random
return list(random.choice(alphabet) for _ in range(size))
|
Find the most frequent element of a collection.
:param elements: An iterable of elements
:param to_hashable_f: (optional) if defined will be used to get
hashable presentation for non-hashable elements. Otherwise json.dumps
is used with sort_keys=True
:return: element which is the most frequent in the collection and
the number of its occurrences
|
def mostCommonElement(elements: Iterable[T], to_hashable_f: Callable=None):
"""
Find the most frequent element of a collection.
:param elements: An iterable of elements
:param to_hashable_f: (optional) if defined will be used to get
hashable presentation for non-hashable elements. Otherwise json.dumps
is used with sort_keys=True
:return: element which is the most frequent in the collection and
the number of its occurrences
"""
class _Hashable(collections.abc.Hashable):
def __init__(self, orig):
self.orig = orig
if isinstance(orig, collections.Hashable):
self.hashable = orig
elif to_hashable_f is not None:
self.hashable = to_hashable_f(orig)
else:
self.hashable = json.dumps(orig, sort_keys=True)
def __eq__(self, other):
return self.hashable == other.hashable
def __hash__(self):
return hash(self.hashable)
_elements = (_Hashable(el) for el in elements)
most_common, counter = Counter(_elements).most_common(n=1)[0]
return most_common.orig, counter
|
Search for an attribute in an object and replace it with another.
:param obj: the object to search for the attribute
:param toFrom: dictionary of the attribute name before and after search and replace i.e. search for the key and replace with the value
:param checked: set of attributes of the object for recursion. optional. defaults to `set()`
:param logMsg: a custom log message
|
def objSearchReplace(obj: Any,
toFrom: Dict[Any, Any],
checked: Set[Any]=None,
logMsg: str=None,
deepLevel: int=None) -> None:
"""
Search for an attribute in an object and replace it with another.
:param obj: the object to search for the attribute
:param toFrom: dictionary of the attribute name before and after search and replace i.e. search for the key and replace with the value
:param checked: set of attributes of the object for recursion. optional. defaults to `set()`
:param logMsg: a custom log message
"""
if checked is None:
checked = set()
checked.add(id(obj))
pairs = [(i, getattr(obj, i)) for i in dir(obj) if not i.startswith("__")]
if isinstance(obj, Mapping):
pairs += [x for x in iteritems(obj)]
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, string_types):
pairs += [x for x in enumerate(obj)]
for nm, o in pairs:
if id(o) not in checked:
mutated = False
for old, new in toFrom.items():
if id(o) == id(old):
logging.debug(
"{}in object {}, attribute {} changed from {} to {}". format(
logMsg + ": " if logMsg else "", obj, nm, old, new))
if isinstance(obj, dict):
obj[nm] = new
else:
setattr(obj, nm, new)
mutated = True
if not mutated:
if deepLevel is not None and deepLevel == 0:
continue
objSearchReplace(o, toFrom, checked, logMsg, deepLevel -
1 if deepLevel is not None else deepLevel)
checked.remove(id(obj))
|
A generator for prime numbers starting from 2.
|
def prime_gen() -> int:
# credit to David Eppstein, Wolfgang Beneicke, Paul Hofstra
"""
A generator for prime numbers starting from 2.
"""
D = {}
yield 2
for q in itertools.islice(itertools.count(3), 0, None, 2):
p = D.pop(q, None)
if p is None:
D[q * q] = 2 * q
yield q
else:
x = p + q
while x in D:
x += p
D[x] = p
|
Run an array of coroutines
:param corogen: a generator that generates coroutines
:return: list or returns of the coroutines
|
async def runall(corogen):
"""
Run an array of coroutines
:param corogen: a generator that generates coroutines
:return: list or returns of the coroutines
"""
results = []
for c in corogen:
result = await c
results.append(result)
return results
|
Keep checking the condition till it is true or a timeout is reached
:param condition: the condition to check (a function that returns bool)
:param args: the arguments to the condition
:return: True if the condition is met in the given timeout, False otherwise
|
async def untilTrue(condition, *args, timeout=5) -> bool:
"""
Keep checking the condition till it is true or a timeout is reached
:param condition: the condition to check (a function that returns bool)
:param args: the arguments to the condition
:return: True if the condition is met in the given timeout, False otherwise
"""
result = False
start = time.perf_counter()
elapsed = 0
while elapsed < timeout:
result = condition(*args)
if result:
break
await asyncio.sleep(.1)
elapsed = time.perf_counter() - start
return result
|
Compare provided fields of 2 named tuples for equality and returns true
:param tuple1:
:param tuple2:
:param fields:
:return:
|
def compareNamedTuple(tuple1: NamedTuple, tuple2: NamedTuple, *fields):
"""
Compare provided fields of 2 named tuples for equality and returns true
:param tuple1:
:param tuple2:
:param fields:
:return:
"""
tuple1 = tuple1._asdict()
tuple2 = tuple2._asdict()
comp = []
for field in fields:
comp.append(tuple1[field] == tuple2[field])
return all(comp)
|
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
|
def prettyDateDifference(startTime, finishTime=None):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
from datetime import datetime
if startTime is None:
return None
if not isinstance(startTime, (int, datetime)):
raise RuntimeError("Cannot parse time")
endTime = finishTime or datetime.now()
if isinstance(startTime, int):
diff = endTime - datetime.fromtimestamp(startTime)
elif isinstance(startTime, datetime):
diff = endTime - startTime
else:
diff = endTime - endTime
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(int(second_diff / 60)) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(int(second_diff / 3600)) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
|
Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise
|
def compare_3PC_keys(key1, key2) -> int:
"""
Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise
"""
if key1[0] == key2[0]:
return key2[1] - key1[1]
else:
return key2[0] - key1[0]
|
Create and return a hashStore implementation based on configuration
|
def initHashStore(data_dir, name, config=None, read_only=False) -> HashStore:
"""
Create and return a hashStore implementation based on configuration
"""
config = config or getConfig()
hsConfig = config.hashStore['type'].lower()
if hsConfig == HS_FILE:
return FileHashStore(dataDir=data_dir,
fileNamePrefix=name)
elif hsConfig == HS_LEVELDB or hsConfig == HS_ROCKSDB:
return DbHashStore(dataDir=data_dir,
fileNamePrefix=name,
db_type=hsConfig,
read_only=read_only,
config=config)
else:
return MemoryHashStore()
|
:param didMethodName: name of DID Method
:param required: if not found and True, throws an exception, else None
:return: DID Method
|
def get(self, didMethodName, required=True) -> DidMethod:
"""
:param didMethodName: name of DID Method
:param required: if not found and True, throws an exception, else None
:return: DID Method
"""
dm = self.d.get(didMethodName) if didMethodName else self.default
if not dm and required:
raise DidMethodNotFound
return dm
|
Transmit the specified message to the remote client specified by `remoteName`.
:param msg: a message
:param remoteName: the name of the remote
|
def transmitToClient(self, msg: Any, remoteName: str):
"""
Transmit the specified message to the remote client specified by `remoteName`.
:param msg: a message
:param remoteName: the name of the remote
"""
payload = self.prepForSending(msg)
try:
if isinstance(remoteName, str):
remoteName = remoteName.encode()
self.send(payload, remoteName)
except Exception as ex:
# TODO: This should not be an error since the client might not have
# sent the request to all nodes but only some nodes and other
# nodes might have got this request through PROPAGATE and thus
# might not have connection with the client.
logger.error(
"{}{} unable to send message {} to client {}; Exception: {}" .format(
CONNECTION_PREFIX, self, msg, remoteName, ex.__repr__()))
|
:param coroFuncs: iterable of no-arg functions
:param totalTimeout:
:param retryWait:
:param acceptableExceptions:
:param acceptableFails: how many of the passed in coroutines can
ultimately fail and still be ok
:return:
|
async def eventuallyAll(*coroFuncs: FlexFunc, # (use functools.partials if needed)
totalTimeout: float,
retryWait: float=0.1,
acceptableExceptions=None,
acceptableFails: int=0,
override_timeout_limit=False):
# TODO: Bug when `acceptableFails` > 0 if the first check fails, it will
# exhaust the entire timeout.
"""
:param coroFuncs: iterable of no-arg functions
:param totalTimeout:
:param retryWait:
:param acceptableExceptions:
:param acceptableFails: how many of the passed in coroutines can
ultimately fail and still be ok
:return:
"""
start = time.perf_counter()
def remaining():
return totalTimeout + start - time.perf_counter()
funcNames = []
others = 0
fails = 0
rem = None
for cf in coroFuncs:
if len(funcNames) < 2:
funcNames.append(get_func_name(cf))
else:
others += 1
# noinspection PyBroadException
try:
rem = remaining()
if rem <= 0:
break
await eventually(cf,
retryWait=retryWait,
timeout=rem,
acceptableExceptions=acceptableExceptions,
verbose=True,
override_timeout_limit=override_timeout_limit)
except Exception as ex:
if acceptableExceptions and type(ex) not in acceptableExceptions:
raise
fails += 1
logger.debug("a coro {} with args {} timed out without succeeding; fail count: "
"{}, acceptable: {}".
format(get_func_name(cf), get_func_args(cf), fails, acceptableFails))
if fails > acceptableFails:
raise
if rem is not None and rem <= 0:
fails += 1
if fails > acceptableFails:
err = 'All checks could not complete successfully since total timeout ' \
'expired {} sec ago'.format(-1 * rem if rem < 0 else 0)
raise EventuallyTimeoutException(err)
if others:
funcNames.append("and {} others".format(others))
desc = ", ".join(funcNames)
logger.debug("{} succeeded with {:.2f} seconds to spare".
format(desc, remaining()))
|
Merge any newly received txns during catchup with already received txns
:param existing_txns:
:param new_txns:
:return:
|
def _merge_catchup_txns(existing_txns, new_txns):
"""
Merge any newly received txns during catchup with already received txns
:param existing_txns:
:param new_txns:
:return:
"""
# TODO: Can we replace this with SortedDict and before merging substract existing transactions from new?
idx_to_remove = []
start_seq_no = new_txns[0][0]
end_seq_no = new_txns[-1][0]
for seq_no, _ in existing_txns:
if seq_no < start_seq_no:
continue
if seq_no > end_seq_no:
break
idx_to_remove.append(seq_no - start_seq_no)
for idx in reversed(idx_to_remove):
new_txns.pop(idx)
return list(merge(existing_txns, new_txns, key=lambda v: v[0]))
|
Transforms transactions for ledger!
Returns:
Whether catchup reply corresponding to seq_no
Name of node from which txns came
Number of transactions ready to be processed
|
def _has_valid_catchup_replies(self, seq_no: int, txns_to_process: List[Tuple[int, Any]]) -> Tuple[bool, str, int]:
"""
Transforms transactions for ledger!
Returns:
Whether catchup reply corresponding to seq_no
Name of node from which txns came
Number of transactions ready to be processed
"""
# TODO: Remove after stop passing seqNo here
assert seq_no == txns_to_process[0][0]
# Here seqNo has to be the seqNo of first transaction of
# `catchupReplies`
# Get the transactions in the catchup reply which has sequence
# number `seqNo`
node_name, catchup_rep = self._find_catchup_reply_for_seq_no(seq_no)
txns = catchup_rep.txns
# Add only those transaction in the temporary tree from the above
# batch which are not present in the ledger
# Integer keys being converted to strings when marshaled to JSON
txns = [self._provider.transform_txn_for_ledger(txn)
for s, txn in txns_to_process[:len(txns)]
if str(s) in txns]
# Creating a temporary tree which will be used to verify consistency
# proof, by inserting transactions. Duplicating a merkle tree is not
# expensive since we are using a compact merkle tree.
temp_tree = self._ledger.treeWithAppliedTxns(txns)
proof = catchup_rep.consProof
final_size = self._catchup_till.final_size
final_hash = self._catchup_till.final_hash
try:
logger.info("{} verifying proof for {}, {}, {}, {}, {}".
format(self, temp_tree.tree_size, final_size,
temp_tree.root_hash, final_hash, proof))
verified = self._provider.verifier(self._ledger_id).verify_tree_consistency(
temp_tree.tree_size,
final_size,
temp_tree.root_hash,
Ledger.strToHash(final_hash),
[Ledger.strToHash(p) for p in proof]
)
except Exception as ex:
logger.info("{} could not verify catchup reply {} since {}".format(self, catchup_rep, ex))
verified = False
return bool(verified), node_name, len(txns)
|
Returns validator ip, ports and keys
:param ledger:
:param returnActive: If returnActive is True, return only those
validators which are not out of service
:return:
|
def parseLedgerForHaAndKeys(ledger, returnActive=True, ledger_size=None):
"""
Returns validator ip, ports and keys
:param ledger:
:param returnActive: If returnActive is True, return only those
validators which are not out of service
:return:
"""
nodeReg = OrderedDict()
cliNodeReg = OrderedDict()
nodeKeys = {}
activeValidators = set()
try:
TxnStackManager._parse_pool_transaction_file(
ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators,
ledger_size=ledger_size)
except ValueError:
errMsg = 'Pool transaction file corrupted. Rebuild pool transactions.'
logger.exception(errMsg)
exit(errMsg)
if returnActive:
allNodes = tuple(nodeReg.keys())
for nodeName in allNodes:
if nodeName not in activeValidators:
nodeReg.pop(nodeName, None)
cliNodeReg.pop(nodeName + CLIENT_STACK_SUFFIX, None)
nodeKeys.pop(nodeName, None)
return nodeReg, cliNodeReg, nodeKeys
else:
return nodeReg, cliNodeReg, nodeKeys, activeValidators
|
Makes sure that we have integer as keys after possible deserialization from json
:param txn: txn to be transformed
:return: transformed txn
|
def transform_txn_for_ledger(txn):
'''
Makes sure that we have integer as keys after possible deserialization from json
:param txn: txn to be transformed
:return: transformed txn
'''
txn_data = get_payload_data(txn)
txn_data[AUDIT_TXN_LEDGERS_SIZE] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGERS_SIZE].items()}
txn_data[AUDIT_TXN_LEDGER_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGER_ROOT].items()}
txn_data[AUDIT_TXN_STATE_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_STATE_ROOT].items()}
return txn
|
helper function for parseLedgerForHaAndKeys
|
def _parse_pool_transaction_file(
ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators,
ledger_size=None):
"""
helper function for parseLedgerForHaAndKeys
"""
for _, txn in ledger.getAllTxn(to=ledger_size):
if get_type(txn) == NODE:
txn_data = get_payload_data(txn)
nodeName = txn_data[DATA][ALIAS]
clientStackName = nodeName + CLIENT_STACK_SUFFIX
nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \
if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \
else None
cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \
if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \
else None
if nHa:
nodeReg[nodeName] = HA(*nHa)
if cHa:
cliNodeReg[clientStackName] = HA(*cHa)
try:
# TODO: Need to handle abbreviated verkey
key_type = 'verkey'
verkey = cryptonymToHex(str(txn_data[TARGET_NYM]))
key_type = 'identifier'
cryptonymToHex(get_from(txn))
except ValueError:
logger.exception(
'Invalid {}. Rebuild pool transactions.'.format(key_type))
exit('Invalid {}. Rebuild pool transactions.'.format(key_type))
nodeKeys[nodeName] = verkey
services = txn_data[DATA].get(SERVICES)
if isinstance(services, list):
if VALIDATOR in services:
activeValidators.add(nodeName)
else:
activeValidators.discard(nodeName)
|
:param txn_count: The number of requests to commit (The actual requests
are picked up from the uncommitted list from the ledger)
:param state_root: The state trie root after the txns are committed
:param txn_root: The txn merkle root after the txns are committed
:return: list of committed transactions
|
def commit_batch(self, three_pc_batch, prev_handler_result=None):
"""
:param txn_count: The number of requests to commit (The actual requests
are picked up from the uncommitted list from the ledger)
:param state_root: The state trie root after the txns are committed
:param txn_root: The txn merkle root after the txns are committed
:return: list of committed transactions
"""
return self._commit(self.ledger, self.state, three_pc_batch)
|
Return hash reverting for and calculate count of reverted txns
:return: root_hash, for reverting to (needed in revertToHead method) and count of reverted txns
|
def reject_batch(self):
"""
Return hash reverting for and calculate count of reverted txns
:return: root_hash, for reverting to (needed in revertToHead method) and count of reverted txns
"""
prev_size = 0
if len(self.un_committed) == 0:
raise LogicError("No items to return")
if len(self.un_committed) > 0:
_, _, prev_size = self.un_committed.pop()
if len(self.un_committed) == 0:
committed_hash, committed_root, committed_size = self.last_committed
return committed_hash, committed_root, prev_size - committed_size
else:
lhash, ltxn_root, lsize = self.un_committed[-1]
return lhash, ltxn_root, prev_size - lsize
|
Serializes a dict to bytes preserving the order (in sorted order)
:param data: the data to be serialized
:return: serialized data as bytes
|
def serialize(self, data: Dict, fields=None, toBytes=True):
"""
Serializes a dict to bytes preserving the order (in sorted order)
:param data: the data to be serialized
:return: serialized data as bytes
"""
if isinstance(data, Dict):
data = self._sort_dict(data)
return msgpack.packb(data, use_bin_type=True)
|
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
|
def deserialize(self, data, fields=None):
"""
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
"""
# TODO: it can be that we returned data by `get_lines`, that is already deserialized
if not isinstance(data, (bytes, bytearray)):
return data
return msgpack.unpackb(data, encoding='utf-8', object_pairs_hook=decode_to_sorted)
|
:param txnCount: The number of requests to commit (The actual requests
are picked up from the uncommitted list from the ledger)
:param stateRoot: The state trie root after the txns are committed
:param txnRoot: The txn merkle root after the txns are committed
:return: list of committed transactions
|
def commit(self, txnCount, stateRoot, txnRoot, ppTime) -> List:
"""
:param txnCount: The number of requests to commit (The actual requests
are picked up from the uncommitted list from the ledger)
:param stateRoot: The state trie root after the txns are committed
:param txnRoot: The txn merkle root after the txns are committed
:return: list of committed transactions
"""
return self._commit(self.ledger, self.state, txnCount, stateRoot,
txnRoot, ppTime, ts_store=self.ts_store)
|
Get a value (and proof optionally)for the given path in state trie.
Does not return the proof is there is no aggregate signature for it.
:param path: the path generate a state proof for
:param head_hash: the root to create the proof against
:param get_value: whether to return the value
:return: a state proof or None
|
def get_value_from_state(self, path, head_hash=None, with_proof=False, multi_sig=None):
'''
Get a value (and proof optionally)for the given path in state trie.
Does not return the proof is there is no aggregate signature for it.
:param path: the path generate a state proof for
:param head_hash: the root to create the proof against
:param get_value: whether to return the value
:return: a state proof or None
'''
root_hash = head_hash if head_hash else self.state.committedHeadHash
encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash))
if not with_proof:
return self.state.get_for_root_hash(root_hash, path), None
if not multi_sig:
# Just return the value and not proof
try:
return self.state.get_for_root_hash(root_hash, path), None
except KeyError:
return None, None
else:
try:
proof, value = self.state.generate_state_proof(key=path,
root=self.state.get_head_by_hash(root_hash),
serialize=True,
get_value=True)
value = self.state.get_decoded(value) if value else value
encoded_proof = proof_nodes_serializer.serialize(proof)
proof = {
ROOT_HASH: encoded_root_hash,
MULTI_SIGNATURE: multi_sig.as_dict(),
PROOF_NODES: encoded_proof
}
return value, proof
except KeyError:
return None, None
|
Verify the consistency between two root hashes.
old_tree_size must be <= new_tree_size.
Args:
old_tree_size: size of the older tree.
new_tree_size: size of the newer_tree.
old_root: the root hash of the older tree.
new_root: the root hash of the newer tree.
proof: the consistency proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: the proof indicates an inconsistency
(this is usually really serious!).
ProofError: the proof is invalid.
ValueError: supplied tree sizes are invalid.
|
def verify_tree_consistency(self, old_tree_size: int, new_tree_size: int,
old_root: bytes, new_root: bytes,
proof: Sequence[bytes]):
"""Verify the consistency between two root hashes.
old_tree_size must be <= new_tree_size.
Args:
old_tree_size: size of the older tree.
new_tree_size: size of the newer_tree.
old_root: the root hash of the older tree.
new_root: the root hash of the newer tree.
proof: the consistency proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: the proof indicates an inconsistency
(this is usually really serious!).
ProofError: the proof is invalid.
ValueError: supplied tree sizes are invalid.
"""
old_size = old_tree_size
new_size = new_tree_size
if old_size < 0 or new_size < 0:
raise ValueError("Negative tree size")
if old_size > new_size:
raise ValueError("Older tree has bigger size (%d vs %d), did "
"you supply inputs in the wrong order?" %
(old_size, new_size))
if old_size == new_size:
if old_root == new_root:
if proof:
logging.debug("Trees are identical, ignoring proof")
return True
else:
raise error.ConsistencyError("Inconsistency: different root "
"hashes for the same tree size")
if old_size == 0:
if proof:
# A consistency proof with an empty tree is an empty proof.
# Anything is consistent with an empty tree, so ignore whatever
# bogus proof was supplied. Note we do not verify here that the
# root hash is a valid hash for an empty tree.
logging.debug("Ignoring non-empty consistency proof for "
"empty tree.")
return True
# Now 0 < old_size < new_size
# A consistency proof is essentially an audit proof for the node with
# index old_size - 1 in the newer tree. The sole difference is that
# the path is already hashed together into a single hash up until the
# first audit node that occurs in the newer tree only.
node = old_size - 1
last_node = new_size - 1
# While we are the right child, everything is in both trees,
# so move one level up.
while node % 2:
node //= 2
last_node //= 2
p = iter(proof)
try:
if node:
# Compute the two root hashes in parallel.
new_hash = old_hash = next(p)
else:
# The old tree was balanced (2**k nodes), so we already have
# the first root hash.
new_hash = old_hash = old_root
while node:
if node % 2:
# node is a right child: left sibling exists in both trees.
next_node = next(p)
old_hash = self.hasher.hash_children(next_node, old_hash)
new_hash = self.hasher.hash_children(next_node, new_hash)
elif node < last_node:
# node is a left child: right sibling only exists in the
# newer tree.
new_hash = self.hasher.hash_children(new_hash, next(p))
# else node == last_node: node is a left child with no sibling
# in either tree.
node //= 2
last_node //= 2
# Now old_hash is the hash of the first subtree. If the two trees
# have different height, continue the path until the new root.
while last_node:
n = next(p)
new_hash = self.hasher.hash_children(new_hash, n)
last_node //= 2
# If the second hash does not match, the proof is invalid for the
# given pair. If, on the other hand, the newer hash matches but the
# older one doesn't, then the proof (together with the signatures
# on the hashes) is proof of inconsistency.
# Continue to find out.
if new_hash != new_root:
raise error.ProofError("Bad Merkle proof: second root hash "
"does not match. Expected hash: %s "
", computed hash: %s" %
(hexlify(new_root).strip(),
hexlify(new_hash).strip()))
elif old_hash != old_root:
raise error.ConsistencyError("Inconsistency: first root hash "
"does not match. Expected hash: "
"%s, computed hash: %s" %
(hexlify(old_root).strip(),
hexlify(old_hash).strip())
)
except StopIteration:
raise error.ProofError("Merkle proof is too short")
# We've already verified consistency, so accept the proof even if
# there's garbage left over (but log a warning).
try:
next(p)
except StopIteration:
pass
else:
logging.debug("Proof has extra nodes")
return True
|
Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf_hash: The hash of the leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit
path.
sth: STH with the same tree size as the one used to fetch the
proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
|
def verify_leaf_hash_inclusion(self, leaf_hash: bytes, leaf_index: int,
proof: List[bytes], sth: STH):
"""Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf_hash: The hash of the leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit
path.
sth: STH with the same tree size as the one used to fetch the
proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
"""
leaf_index = int(leaf_index)
tree_size = int(sth.tree_size)
# TODO(eranm): Verify signature over STH
if tree_size <= leaf_index:
raise ValueError("Provided STH is for a tree that is smaller "
"than the leaf index. Tree size: %d Leaf "
"index: %d" % (tree_size, leaf_index))
if tree_size < 0 or leaf_index < 0:
raise ValueError("Negative tree size or leaf index: "
"Tree size: %d Leaf index: %d" %
(tree_size, leaf_index))
calculated_root_hash = self._calculate_root_hash_from_audit_path(
leaf_hash, leaf_index, proof[:], tree_size)
if calculated_root_hash == sth.sha256_root_hash:
return True
raise error.ProofError("Constructed root hash differs from provided "
"root hash. Constructed: %s Expected: %s" %
(hexlify(calculated_root_hash).strip(),
hexlify(sth.sha256_root_hash).strip()))
|
Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf: The leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit
path.
sth: STH with the same tree size as the one used to fetch the
proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
|
def verify_leaf_inclusion(self, leaf: bytes, leaf_index: int,
proof: List[bytes], sth: STH):
"""Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf: The leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit
path.
sth: STH with the same tree size as the one used to fetch the
proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
"""
leaf_hash = self.hasher.hash_leaf(leaf)
return self.verify_leaf_hash_inclusion(leaf_hash, leaf_index, proof,
sth)
|
Schedule an action to be executed after `seconds` seconds.
:param action: a callable to be scheduled
:param seconds: the time in seconds after which the action must be executed
|
def _schedule(self, action: Callable, seconds: int=0) -> int:
"""
Schedule an action to be executed after `seconds` seconds.
:param action: a callable to be scheduled
:param seconds: the time in seconds after which the action must be executed
"""
self.aid += 1
if seconds > 0:
nxt = time.perf_counter() + seconds
if nxt < self.aqNextCheck:
self.aqNextCheck = nxt
logger.trace("{} scheduling action {} with id {} to run in {} "
"seconds".format(self, get_func_name(action),
self.aid, seconds))
self.aqStash.append((nxt, (action, self.aid)))
else:
logger.trace("{} scheduling action {} with id {} to run now".
format(self, get_func_name(action), self.aid))
self.actionQueue.append((action, self.aid))
if action not in self.scheduled:
self.scheduled[action] = []
self.scheduled[action].append(self.aid)
return self.aid
|
Run all pending actions in the action queue.
:return: number of actions executed.
|
def _serviceActions(self) -> int:
"""
Run all pending actions in the action queue.
:return: number of actions executed.
"""
if self.aqStash:
tm = time.perf_counter()
if tm > self.aqNextCheck:
earliest = float('inf')
for d in list(self.aqStash):
nxt, action = d
if tm > nxt:
self.actionQueue.appendleft(action)
self.aqStash.remove(d)
if nxt < earliest:
earliest = nxt
self.aqNextCheck = earliest
count = len(self.actionQueue)
while self.actionQueue:
action, aid = self.actionQueue.popleft()
assert action in self.scheduled
if aid in self.scheduled[action]:
self.scheduled[action].remove(aid)
logger.trace("{} running action {} with id {}".
format(self, get_func_name(action), aid))
action()
else:
logger.trace("{} not running cancelled action {} with id {}".
format(self, get_func_name(action), aid))
return count
|
Execute a transaction that involves consensus pool management, like
adding a node, client or a steward.
:param ppTime: PrePrepare request time
:param reqs_keys: requests keys to be committed
|
def execute_pool_txns(self, three_pc_batch) -> List:
"""
Execute a transaction that involves consensus pool management, like
adding a node, client or a steward.
:param ppTime: PrePrepare request time
:param reqs_keys: requests keys to be committed
"""
committed_txns = self.default_executer(three_pc_batch)
for txn in committed_txns:
self.poolManager.onPoolMembershipChange(txn)
return committed_txns
|
If the trie is empty then initialize it by applying
txns from ledger.
|
def init_state_from_ledger(self, state: State, ledger: Ledger, reqHandler):
"""
If the trie is empty then initialize it by applying
txns from ledger.
"""
if state.isEmpty:
logger.info('{} found state to be empty, recreating from '
'ledger'.format(self))
for seq_no, txn in ledger.getAllTxn():
txn = self.update_txn_with_extra_data(txn)
reqHandler.updateState([txn, ], isCommitted=True)
state.commit(rootHash=state.headHash)
|
Notifies node about the fact that view changed to let it
prepare for election
|
def on_view_change_start(self):
"""
Notifies node about the fact that view changed to let it
prepare for election
"""
self.view_changer.start_view_change_ts = self.utc_epoch()
for replica in self.replicas.values():
replica.on_view_change_start()
logger.info("{} resetting monitor stats at view change start".format(self))
self.monitor.reset()
self.processStashedMsgsForView(self.viewNo)
self.backup_instance_faulty_processor.restore_replicas()
self.drop_primaries()
pop_keys(self.msgsForFutureViews, lambda x: x <= self.viewNo)
self.logNodeInfo()
# Keep on doing catchup until >(n-f) nodes LedgerStatus same on have a
# prepared certificate the first PRE-PREPARE of the new view
logger.info('{}{} changed to view {}, will start catchup now'.
format(VIEW_CHANGE_PREFIX, self, self.viewNo))
self._cancel(self._check_view_change_completed)
self._schedule(action=self._check_view_change_completed,
seconds=self._view_change_timeout)
# Set to 0 even when set to 0 in `on_view_change_complete` since
# catchup might be started due to several reasons.
self.catchup_rounds_without_txns = 0
self.last_sent_pp_store_helper.erase_last_sent_pp_seq_no()
|
View change completes for a replica when it has been decided which was
the last ppSeqNo and state and txn root for previous view
|
def on_view_change_complete(self):
"""
View change completes for a replica when it has been decided which was
the last ppSeqNo and state and txn root for previous view
"""
self.future_primaries_handler.set_node_state()
if not self.replicas.all_instances_have_primary:
raise LogicError(
"{} Not all replicas have "
"primaries: {}".format(self, self.replicas.primary_name_by_inst_id)
)
self._cancel(self._check_view_change_completed)
for replica in self.replicas.values():
replica.on_view_change_done()
self.view_changer.last_completed_view_no = self.view_changer.view_no
# Remove already ordered requests from requests list after view change
# If view change happen when one half of nodes ordered on master
# instance and backup but other only on master then we need to clear
# requests list. We do this to stop transactions ordering on backup
# replicas that have already been ordered on master.
# Test for this case in plenum/test/view_change/
# test_no_propagate_request_on_different_last_ordered_before_vc.py
for replica in self.replicas.values():
replica.clear_requests_and_fix_last_ordered()
self.monitor.reset()
|
Create and return a hashStore implementation based on configuration
|
def getHashStore(self, name) -> HashStore:
"""
Create and return a hashStore implementation based on configuration
"""
return initHashStore(self.dataLocation, name, self.config)
|
Actions to be performed on stopping the node.
- Close the UDP socket of the nodestack
|
def onStopping(self):
"""
Actions to be performed on stopping the node.
- Close the UDP socket of the nodestack
"""
# Log stats should happen before any kind of reset or clearing
if self.config.STACK_COMPANION == 1:
add_stop_time(self.ledger_dir, self.utc_epoch())
self.logstats()
self.reset()
# Stop the ledgers
for ledger in self.ledgers:
try:
ledger.stop()
except Exception as ex:
logger.exception('{} got exception while stopping ledger: {}'.format(self, ex))
self.nodestack.stop()
self.clientstack.stop()
self.closeAllKVStores()
self._info_tool.stop()
self.mode = None
self.execute_hook(NodeHooks.POST_NODE_STOPPED)
|
.opened
This function is executed by the node each time it gets its share of
CPU time from the event loop.
:param limit: the number of items to be serviced in this attempt
:return: total number of messages serviced by this node
|
async def prod(self, limit: int = None) -> int:
""".opened
This function is executed by the node each time it gets its share of
CPU time from the event loop.
:param limit: the number of items to be serviced in this attempt
:return: total number of messages serviced by this node
"""
c = 0
if self.last_prod_started:
self.metrics.add_event(MetricsName.LOOPER_RUN_TIME_SPENT, time.perf_counter() - self.last_prod_started)
self.last_prod_started = time.perf_counter()
self.quota_control.update_state({
'request_queue_size': len(self.monitor.requestTracker.unordered())}
)
if self.status is not Status.stopped:
c += await self.serviceReplicas(limit)
c += await self.serviceNodeMsgs(limit)
c += await self.serviceClientMsgs(limit)
with self.metrics.measure_time(MetricsName.SERVICE_NODE_ACTIONS_TIME):
c += self._serviceActions()
with self.metrics.measure_time(MetricsName.SERVICE_TIMERS_TIME):
self.timer.service()
with self.metrics.measure_time(MetricsName.SERVICE_MONITOR_ACTIONS_TIME):
c += self.monitor._serviceActions()
c += await self.serviceViewChanger(limit)
c += await self.service_observable(limit)
c += await self.service_observer(limit)
with self.metrics.measure_time(MetricsName.FLUSH_OUTBOXES_TIME):
self.nodestack.flushOutBoxes()
if self.isGoing():
with self.metrics.measure_time(MetricsName.SERVICE_NODE_LIFECYCLE_TIME):
self.nodestack.serviceLifecycle()
with self.metrics.measure_time(MetricsName.SERVICE_CLIENT_STACK_TIME):
self.clientstack.serviceClientStack()
return c
|
Process `limit` number of messages from the nodeInBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
|
async def serviceNodeMsgs(self, limit: int) -> int:
"""
Process `limit` number of messages from the nodeInBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
"""
with self.metrics.measure_time(MetricsName.SERVICE_NODE_STACK_TIME):
n = await self.nodestack.service(limit, self.quota_control.node_quota)
self.metrics.add_event(MetricsName.NODE_STACK_MESSAGES_PROCESSED, n)
await self.processNodeInBox()
return n
|
Process `limit` number of messages from the clientInBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
|
async def serviceClientMsgs(self, limit: int) -> int:
"""
Process `limit` number of messages from the clientInBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
"""
c = await self.clientstack.service(limit, self.quota_control.client_quota)
self.metrics.add_event(MetricsName.CLIENT_STACK_MESSAGES_PROCESSED, c)
await self.processClientInBox()
return c
|
Service the view_changer's inBox, outBox and action queues.
:return: the number of messages successfully serviced
|
async def serviceViewChanger(self, limit) -> int:
"""
Service the view_changer's inBox, outBox and action queues.
:return: the number of messages successfully serviced
"""
if not self.isReady():
return 0
o = self.serviceViewChangerOutBox(limit)
i = await self.serviceViewChangerInbox(limit)
return o + i
|
Service the observable's inBox and outBox
:return: the number of messages successfully serviced
|
async def service_observable(self, limit) -> int:
"""
Service the observable's inBox and outBox
:return: the number of messages successfully serviced
"""
if not self.isReady():
return 0
o = self._service_observable_out_box(limit)
i = await self._observable.serviceQueues(limit)
return o + i
|
Service at most `limit` number of messages from the observable's outBox.
:return: the number of messages successfully serviced.
|
def _service_observable_out_box(self, limit: int = None) -> int:
"""
Service at most `limit` number of messages from the observable's outBox.
:return: the number of messages successfully serviced.
"""
msg_count = 0
while True:
if limit and msg_count >= limit:
break
msg = self._observable.get_output()
if not msg:
break
msg_count += 1
msg, observer_ids = msg
# TODO: it's assumed that all Observers are connected the same way as Validators
self.sendToNodes(msg, observer_ids)
return msg_count
|
Service the observer's inBox and outBox
:return: the number of messages successfully serviced
|
async def service_observer(self, limit) -> int:
"""
Service the observer's inBox and outBox
:return: the number of messages successfully serviced
"""
if not self.isReady():
return 0
return await self._observer.serviceQueues(limit)
|
A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkInstances()`
|
def onConnsChanged(self, joined: Set[str], left: Set[str]):
"""
A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkInstances()`
"""
_prev_status = self.status
if self.isGoing():
if self.connectedNodeCount == self.totalNodes:
self.status = Status.started
elif self.connectedNodeCount >= self.minimumNodes:
self.status = Status.started_hungry
else:
self.status = Status.starting
self.elector.nodeCount = self.connectedNodeCount
if self.master_primary_name in joined:
self.primaries_disconnection_times[self.master_replica.instId] = None
if self.master_primary_name in left:
logger.display('{} lost connection to primary of master'.format(self))
self.lost_master_primary()
elif _prev_status == Status.starting and self.status == Status.started_hungry \
and self.primaries_disconnection_times[self.master_replica.instId] is not None \
and self.master_primary_name is not None:
"""
Such situation may occur if the pool has come back to reachable consensus but
primary is still disconnected, so view change proposal makes sense now.
"""
self._schedule_view_change()
for inst_id, replica in self.replicas.items():
if not replica.isMaster and replica.primaryName is not None:
primary_node_name = replica.primaryName.split(':')[0]
if primary_node_name in joined:
self.primaries_disconnection_times[inst_id] = None
elif primary_node_name in left:
self.primaries_disconnection_times[inst_id] = time.perf_counter()
self._schedule_replica_removal(inst_id)
if self.isReady():
self.checkInstances()
else:
logger.info("{} joined nodes {} but status is {}".format(self, joined, self.status))
# Send ledger status whether ready (connected to enough nodes) or not
for node in joined:
self.send_ledger_status_to_newly_connected_node(node)
for node in left:
self.network_i3pc_watcher.disconnect(node)
for node in joined:
self.network_i3pc_watcher.connect(node)
|
Ask other node for LedgerStatus
|
def _ask_for_ledger_status(self, node_name: str, ledger_id):
"""
Ask other node for LedgerStatus
"""
self.request_msg(LEDGER_STATUS, {f.LEDGER_ID.nm: ledger_id},
[node_name, ])
logger.info("{} asking {} for ledger status of ledger {}".format(self, node_name, ledger_id))
|
Check if this node has the minimum required number of protocol
instances, i.e. f+1. If not, add a replica. If no election is in
progress, this node will try to nominate one of its replicas as primary.
This method is called whenever a connection with a new node is
established.
|
def checkInstances(self) -> None:
# TODO: Is this method really needed?
"""
Check if this node has the minimum required number of protocol
instances, i.e. f+1. If not, add a replica. If no election is in
progress, this node will try to nominate one of its replicas as primary.
This method is called whenever a connection with a new node is
established.
"""
logger.debug("{} choosing to start election on the basis of count {} and nodes {}".
format(self, self.connectedNodeCount, self.nodestack.conns))
|
Add or remove replicas depending on `f`
|
def adjustReplicas(self,
old_required_number_of_instances: int,
new_required_number_of_instances: int):
"""
Add or remove replicas depending on `f`
"""
# TODO: refactor this
replica_num = old_required_number_of_instances
while replica_num < new_required_number_of_instances:
self.replicas.add_replica(replica_num)
self.processStashedMsgsForReplica(replica_num)
replica_num += 1
while replica_num > new_required_number_of_instances:
replica_num -= 1
self.replicas.remove_replica(replica_num)
pop_keys(self.msgsForFutureReplicas, lambda inst_id: inst_id < new_required_number_of_instances)
if len(self.primaries_disconnection_times) < new_required_number_of_instances:
self.primaries_disconnection_times.extend(
[None] * (new_required_number_of_instances - len(self.primaries_disconnection_times)))
elif len(self.primaries_disconnection_times) > new_required_number_of_instances:
self.primaries_disconnection_times = self.primaries_disconnection_times[:new_required_number_of_instances]
|
This thing checks whether new primary was elected.
If it was not - starts view change again
|
def _check_view_change_completed(self):
"""
This thing checks whether new primary was elected.
If it was not - starts view change again
"""
logger.info('{} running the scheduled check for view change completion'.format(self))
if not self.view_changer.view_change_in_progress:
logger.info('{} already completion view change'.format(self))
return False
self.view_changer.on_view_change_not_completed_in_time()
return True
|
Process `limit` number of replica messages
|
def service_replicas_outbox(self, limit: int = None) -> int:
"""
Process `limit` number of replica messages
"""
# TODO: rewrite this using Router
num_processed = 0
for message in self.replicas.get_output(limit):
num_processed += 1
if isinstance(message, (PrePrepare, Prepare, Commit, Checkpoint)):
self.send(message)
elif isinstance(message, Ordered):
self.try_processing_ordered(message)
elif isinstance(message, tuple) and isinstance(message[1], Reject):
with self.metrics.measure_time(MetricsName.NODE_SEND_REJECT_TIME):
digest, reject = message
result_reject = Reject(
reject.identifier,
reject.reqId,
self.reasonForClientFromException(
reject.reason))
# TODO: What the case when reqKey will be not in requestSender dict
if digest in self.requestSender:
self.transmitToClient(result_reject, self.requestSender[digest])
self.doneProcessingReq(digest)
elif isinstance(message, Exception):
self.processEscalatedException(message)
else:
# TODO: should not this raise exception?
logger.error("Received msg {} and don't "
"know how to handle it".format(message))
return num_processed
|
Service at most `limit` number of messages from the view_changer's outBox.
:return: the number of messages successfully serviced.
|
def serviceViewChangerOutBox(self, limit: int = None) -> int:
"""
Service at most `limit` number of messages from the view_changer's outBox.
:return: the number of messages successfully serviced.
"""
msgCount = 0
while self.view_changer.outBox and (not limit or msgCount < limit):
msgCount += 1
msg = self.view_changer.outBox.popleft()
if isinstance(msg, (InstanceChange, ViewChangeDone)):
self.send(msg)
else:
logger.error("Received msg {} and don't know how to handle it".
format(msg))
return msgCount
|
Service at most `limit` number of messages from the view_changer's outBox.
:return: the number of messages successfully serviced.
|
async def serviceViewChangerInbox(self, limit: int = None) -> int:
"""
Service at most `limit` number of messages from the view_changer's outBox.
:return: the number of messages successfully serviced.
"""
msgCount = 0
while self.msgsToViewChanger and (not limit or msgCount < limit):
msgCount += 1
msg = self.msgsToViewChanger.popleft()
self.view_changer.inBox.append(msg)
await self.view_changer.serviceQueues(limit)
return msgCount
|
Return the name of the primary node of the master instance
|
def master_primary_name(self) -> Optional[str]:
"""
Return the name of the primary node of the master instance
"""
master_primary_name = self.master_replica.primaryName
if master_primary_name:
return self.master_replica.getNodeName(master_primary_name)
return None
|
Return true if the instance id of message corresponds to a correct
replica.
:param msg: the node message to validate
:return:
|
def msgHasAcceptableInstId(self, msg, frm) -> bool:
"""
Return true if the instance id of message corresponds to a correct
replica.
:param msg: the node message to validate
:return:
"""
# TODO: refactor this! this should not do anything except checking!
instId = getattr(msg, f.INST_ID.nm, None)
if not (isinstance(instId, int) and instId >= 0):
return False
if instId >= self.requiredNumberOfInstances:
if instId not in self.msgsForFutureReplicas:
self.msgsForFutureReplicas[instId] = deque()
self.msgsForFutureReplicas[instId].append((msg, frm))
logger.debug("{} queueing message {} for future protocol instance {}".format(self, msg, instId))
return False
return True
|
Return true if the view no of message corresponds to the current view
no or a view no in the future
:param msg: the node message to validate
:return:
|
def msgHasAcceptableViewNo(self, msg, frm) -> bool:
"""
Return true if the view no of message corresponds to the current view
no or a view no in the future
:param msg: the node message to validate
:return:
"""
# TODO: refactor this! this should not do anything except checking!
view_no = getattr(msg, f.VIEW_NO.nm, None)
if not (isinstance(view_no, int) and view_no >= 0):
return False
if self.viewNo - view_no > 1:
self.discard(msg, "un-acceptable viewNo {}"
.format(view_no), logMethod=logger.warning)
if isinstance(msg, ViewChangeDone) and view_no < self.viewNo:
self.discard(msg, "Proposed viewNo {} less, then current {}"
.format(view_no, self.viewNo), logMethod=logger.warning)
elif (view_no > self.viewNo) or self._is_initial_view_change_now():
if view_no not in self.msgsForFutureViews:
self.msgsForFutureViews[view_no] = deque()
logger.debug('{} stashing a message for a future view: {}'.format(self, msg))
self.msgsForFutureViews[view_no].append((msg, frm))
if isinstance(msg, ViewChangeDone):
future_vcd_msg = FutureViewChangeDone(vcd_msg=msg)
self.msgsToViewChanger.append((future_vcd_msg, frm))
else:
return True
return False
|
Send the message to the intended replica.
:param msg: the message to send
:param frm: the name of the node which sent this `msg`
|
def sendToReplica(self, msg, frm):
"""
Send the message to the intended replica.
:param msg: the message to send
:param frm: the name of the node which sent this `msg`
"""
# TODO: discard or stash messages here instead of doing
# this in msgHas* methods!!!
if self.msgHasAcceptableInstId(msg, frm):
self.replicas.pass_message((msg, frm), msg.instId)
|
Send the message to the intended view changer.
:param msg: the message to send
:param frm: the name of the node which sent this `msg`
|
def sendToViewChanger(self, msg, frm):
"""
Send the message to the intended view changer.
:param msg: the message to send
:param frm: the name of the node which sent this `msg`
"""
if (isinstance(msg, InstanceChange) or
self.msgHasAcceptableViewNo(msg, frm)):
logger.debug("{} sending message to view changer: {}".
format(self, (msg, frm)))
self.msgsToViewChanger.append((msg, frm))
|
Send the message to the observer.
:param msg: the message to send
:param frm: the name of the node which sent this `msg`
|
def send_to_observer(self, msg, frm):
"""
Send the message to the observer.
:param msg: the message to send
:param frm: the name of the node which sent this `msg`
"""
logger.debug("{} sending message to observer: {}".
format(self, (msg, frm)))
self._observer.append_input(msg, frm)
|
Validate and process one message from a node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message
|
def handleOneNodeMsg(self, wrappedMsg):
"""
Validate and process one message from a node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message
"""
try:
vmsg = self.validateNodeMsg(wrappedMsg)
if vmsg:
logger.trace("{} msg validated {}".format(self, wrappedMsg),
extra={"tags": ["node-msg-validation"]})
self.unpackNodeMsg(*vmsg)
else:
logger.debug("{} invalidated msg {}".format(self, wrappedMsg),
extra={"tags": ["node-msg-validation"]})
except SuspiciousNode as ex:
self.reportSuspiciousNodeEx(ex)
except Exception as ex:
msg, frm = wrappedMsg
self.discard(msg, ex, logger.info)
|
Validate another node's message sent to this node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message
:return: Tuple of message from node and name of the node
|
def validateNodeMsg(self, wrappedMsg):
"""
Validate another node's message sent to this node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message
:return: Tuple of message from node and name of the node
"""
msg, frm = wrappedMsg
if self.isNodeBlacklisted(frm):
self.discard(str(msg)[:256], "received from blacklisted node {}".format(frm), logger.display)
return None
with self.metrics.measure_time(MetricsName.INT_VALIDATE_NODE_MSG_TIME):
try:
message = node_message_factory.get_instance(**msg)
except (MissingNodeOp, InvalidNodeOp) as ex:
raise ex
except Exception as ex:
raise InvalidNodeMsg(str(ex))
try:
self.verifySignature(message)
except BaseExc as ex:
raise SuspiciousNode(frm, ex, message) from ex
logger.debug("{} received node message from {}: {}".format(self, frm, message), extra={"cli": False})
return message, frm
|
If the message is a batch message validate each message in the batch,
otherwise add the message to the node's inbox.
:param msg: a node message
:param frm: the name of the node that sent this `msg`
|
def unpackNodeMsg(self, msg, frm) -> None:
"""
If the message is a batch message validate each message in the batch,
otherwise add the message to the node's inbox.
:param msg: a node message
:param frm: the name of the node that sent this `msg`
"""
# TODO: why do we unpack batches here? Batching is a feature of
# a transport, it should be encapsulated.
if isinstance(msg, Batch):
logger.trace("{} processing a batch {}".format(self, msg))
with self.metrics.measure_time(MetricsName.UNPACK_BATCH_TIME):
for m in msg.messages:
try:
m = self.nodestack.deserializeMsg(m)
except Exception as ex:
logger.warning("Got error {} while processing {} message".format(ex, m))
continue
self.handleOneNodeMsg((m, frm))
else:
self.postToNodeInBox(msg, frm)
|
Append the message to the node inbox
:param msg: a node message
:param frm: the name of the node that sent this `msg`
|
def postToNodeInBox(self, msg, frm):
"""
Append the message to the node inbox
:param msg: a node message
:param frm: the name of the node that sent this `msg`
"""
logger.trace("{} appending to nodeInbox {}".format(self, msg))
self.nodeInBox.append((msg, frm))
|
Process the messages in the node inbox asynchronously.
|
async def processNodeInBox(self):
"""
Process the messages in the node inbox asynchronously.
"""
while self.nodeInBox:
m = self.nodeInBox.popleft()
await self.process_one_node_message(m)
|
Validate and process a client message
:param wrappedMsg: a message from a client
|
def handleOneClientMsg(self, wrappedMsg):
"""
Validate and process a client message
:param wrappedMsg: a message from a client
"""
try:
vmsg = self.validateClientMsg(wrappedMsg)
if vmsg:
self.unpackClientMsg(*vmsg)
except BlowUp:
raise
except Exception as ex:
msg, frm = wrappedMsg
friendly = friendlyEx(ex)
if isinstance(ex, SuspiciousClient):
self.reportSuspiciousClient(frm, friendly)
self.handleInvalidClientMsg(ex, wrappedMsg)
|
Validate a message sent by a client.
:param wrappedMsg: a message from a client
:return: Tuple of clientMessage and client address
|
def validateClientMsg(self, wrappedMsg):
"""
Validate a message sent by a client.
:param wrappedMsg: a message from a client
:return: Tuple of clientMessage and client address
"""
msg, frm = wrappedMsg
if self.isClientBlacklisted(frm):
self.discard(str(msg)[:256], "received from blacklisted client {}".format(frm), logger.display)
return None
needStaticValidation = False
if all([msg.get(OPERATION), msg.get(f.REQ_ID.nm),
idr_from_req_data(msg)]):
cls = self.client_request_class
needStaticValidation = True
elif OP_FIELD_NAME in msg:
op = msg[OP_FIELD_NAME]
cls = node_message_factory.get_type(op)
if cls not in (Batch, LedgerStatus, CatchupReq):
raise InvalidClientMsgType(cls, msg.get(f.REQ_ID.nm))
else:
raise InvalidClientRequest(msg.get(f.IDENTIFIER.nm),
msg.get(f.REQ_ID.nm))
try:
cMsg = cls(**msg)
except TypeError as ex:
raise InvalidClientRequest(msg.get(f.IDENTIFIER.nm),
msg.get(f.REQ_ID.nm),
str(ex))
except Exception as ex:
raise InvalidClientRequest(msg.get(f.IDENTIFIER.nm),
msg.get(f.REQ_ID.nm)) from ex
if needStaticValidation:
self.doStaticValidation(cMsg)
self.execute_hook(NodeHooks.PRE_SIG_VERIFICATION, cMsg)
self.verifySignature(cMsg)
self.execute_hook(NodeHooks.POST_SIG_VERIFICATION, cMsg)
# Suspicions should only be raised when lot of sig failures are
# observed
# try:
# self.verifySignature(cMsg)
# except UnknownIdentifier as ex:
# raise
# except Exception as ex:
# raise SuspiciousClient from ex
logger.trace("{} received CLIENT message: {}".
format(self.clientstack.name, cMsg))
return cMsg, frm
|
If the message is a batch message validate each message in the batch,
otherwise add the message to the node's clientInBox.
But node return a Nack message if View Change in progress
:param msg: a client message
:param frm: the name of the client that sent this `msg`
|
def unpackClientMsg(self, msg, frm):
"""
If the message is a batch message validate each message in the batch,
otherwise add the message to the node's clientInBox.
But node return a Nack message if View Change in progress
:param msg: a client message
:param frm: the name of the client that sent this `msg`
"""
if isinstance(msg, Batch):
for m in msg.messages:
# This check is done since Client uses NodeStack (which can
# send and receive BATCH) to talk to nodes but Node uses
# ClientStack (which cannot send or receive BATCH).
# TODO: The solution is to have both kind of stacks be able to
# parse BATCH messages
if m in (ZStack.pingMessage, ZStack.pongMessage):
continue
m = self.clientstack.deserializeMsg(m)
self.handleOneClientMsg((m, frm))
else:
msg_dict = msg.as_dict if isinstance(msg, Request) else msg
if isinstance(msg_dict, dict):
if self.view_changer.view_change_in_progress and self.is_request_need_quorum(msg_dict):
self.discard(msg_dict,
reason="view change in progress",
logMethod=logger.debug)
self.send_nack_to_client((idr_from_req_data(msg_dict),
msg_dict.get(f.REQ_ID.nm, None)),
"Client request is discarded since view "
"change is in progress", frm)
return
self.postToClientInBox(msg, frm)
|
Process the messages in the node's clientInBox asynchronously.
All messages in the inBox have already been validated, including
signature check.
|
async def processClientInBox(self):
"""
Process the messages in the node's clientInBox asynchronously.
All messages in the inBox have already been validated, including
signature check.
"""
while self.clientInBox:
m = self.clientInBox.popleft()
req, frm = m
logger.debug("{} processing {} request {}".
format(self.clientstack.name, frm, req),
extra={"cli": True,
"tags": ["node-msg-processing"]})
try:
await self.clientMsgRouter.handle(m)
except InvalidClientMessageException as ex:
self.handleInvalidClientMsg(ex, m)
|
Check if received a quorum of view change done messages and if yes
check if caught up till the
Check if all requests ordered till last prepared certificate
Check if last catchup resulted in no txns
|
def is_catchup_needed_during_view_change(self) -> bool:
"""
Check if received a quorum of view change done messages and if yes
check if caught up till the
Check if all requests ordered till last prepared certificate
Check if last catchup resulted in no txns
"""
if self.caught_up_for_current_view():
logger.info('{} is caught up for the current view {}'.format(self, self.viewNo))
return False
logger.info('{} is not caught up for the current view {}'.format(self, self.viewNo))
if self.num_txns_caught_up_in_last_catchup() == 0:
if self.has_ordered_till_last_prepared_certificate():
logger.info('{} ordered till last prepared certificate'.format(self))
return False
if self.is_catch_up_limit(self.config.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE):
# No more 3PC messages will be processed since maximum catchup
# rounds have been done
self.master_replica.last_prepared_before_view_change = None
return False
return True
|
State based validation
|
def doDynamicValidation(self, request: Request):
"""
State based validation
"""
self.execute_hook(NodeHooks.PRE_DYNAMIC_VALIDATION, request=request)
# Digest validation
ledger_id, seq_no = self.seqNoDB.get_by_payload_digest(request.payload_digest)
if ledger_id is not None and seq_no is not None:
raise SuspiciousPrePrepare('Trying to order already ordered request')
ledger = self.getLedger(self.ledger_id_for_request(request))
for txn in ledger.uncommittedTxns:
if get_payload_digest(txn) == request.payload_digest:
raise SuspiciousPrePrepare('Trying to order already ordered request')
operation = request.operation
req_handler = self.get_req_handler(txn_type=operation[TXN_TYPE])
req_handler.validate(request)
self.execute_hook(NodeHooks.POST_DYNAMIC_VALIDATION, request=request)
|
Apply request to appropriate ledger and state. `cons_time` is the
UTC epoch at which consensus was reached.
|
def applyReq(self, request: Request, cons_time: int):
"""
Apply request to appropriate ledger and state. `cons_time` is the
UTC epoch at which consensus was reached.
"""
self.execute_hook(NodeHooks.PRE_REQUEST_APPLICATION, request=request,
cons_time=cons_time)
req_handler = self.get_req_handler(txn_type=request.operation[TXN_TYPE])
seq_no, txn = req_handler.apply(request, cons_time)
ledger_id = self.ledger_id_for_request(request)
self.execute_hook(NodeHooks.POST_REQUEST_APPLICATION, request=request,
cons_time=cons_time, ledger_id=ledger_id,
seq_no=seq_no, txn=txn)
|
Handle a REQUEST from the client.
If the request has already been executed, the node re-sends the reply to
the client. Otherwise, the node acknowledges the client request, adds it
to its list of client requests, and sends a PROPAGATE to the
remaining nodes.
:param request: the REQUEST from the client
:param frm: the name of the client that sent this REQUEST
|
def processRequest(self, request: Request, frm: str):
"""
Handle a REQUEST from the client.
If the request has already been executed, the node re-sends the reply to
the client. Otherwise, the node acknowledges the client request, adds it
to its list of client requests, and sends a PROPAGATE to the
remaining nodes.
:param request: the REQUEST from the client
:param frm: the name of the client that sent this REQUEST
"""
logger.debug("{} received client request: {} from {}".
format(self.name, request, frm))
self.nodeRequestSpikeMonitorData['accum'] += 1
# TODO: What if client sends requests with same request id quickly so
# before reply for one is generated, the other comes. In that
# case we need to keep track of what requests ids node has seen
# in-memory and once request with a particular request id is processed,
# it should be removed from that in-memory DS.
# If request is already processed(there is a reply for the
# request in
# the node's transaction store then return the reply from the
# transaction store)
# TODO: What if the reply was a REQNACK? Its not gonna be found in the
# replies.
txn_type = request.operation[TXN_TYPE]
if self.is_action(txn_type):
self.process_action(request, frm)
elif txn_type == GET_TXN:
self.handle_get_txn_req(request, frm)
self.total_read_request_number += 1
elif self.is_query(txn_type):
self.process_query(request, frm)
self.total_read_request_number += 1
elif self.can_write_txn(txn_type):
reply = self.getReplyFromLedgerForRequest(request)
if reply:
logger.debug("{} returning reply from already processed "
"REQUEST: {}".format(self, request))
self.transmitToClient(reply, frm)
return
# If the node is not already processing the request
if not self.isProcessingReq(request.key):
self.startedProcessingReq(request.key, frm)
# forced request should be processed before consensus
self.handle_request_if_forced(request)
# If not already got the propagate request(PROPAGATE) for the
# corresponding client request(REQUEST)
self.recordAndPropagate(request, frm)
self.send_ack_to_client((request.identifier, request.reqId), frm)
else:
raise InvalidClientRequest(
request.identifier,
request.reqId,
'Pool is in readonly mode, try again in 60 seconds')
|
Process one propagateRequest sent to this node asynchronously
- If this propagateRequest hasn't been seen by this node, then broadcast
it to all nodes after verifying the the signature.
- Add the client to blacklist if its signature is invalid
:param msg: the propagateRequest
:param frm: the name of the node which sent this `msg`
|
def processPropagate(self, msg: Propagate, frm):
"""
Process one propagateRequest sent to this node asynchronously
- If this propagateRequest hasn't been seen by this node, then broadcast
it to all nodes after verifying the the signature.
- Add the client to blacklist if its signature is invalid
:param msg: the propagateRequest
:param frm: the name of the node which sent this `msg`
"""
logger.debug("{} received propagated request: {}".
format(self.name, msg))
request = TxnUtilConfig.client_request_class(**msg.request)
clientName = msg.senderClient
if not self.isProcessingReq(request.key):
ledger_id, seq_no = self.seqNoDB.get_by_payload_digest(request.payload_digest)
if ledger_id is not None and seq_no is not None:
self._clean_req_from_verified(request)
logger.debug("{} ignoring propagated request {} "
"since it has been already ordered"
.format(self.name, msg))
return
self.startedProcessingReq(request.key, clientName)
# forced request should be processed before consensus
self.handle_request_if_forced(request)
else:
if clientName is not None and \
not self.is_sender_known_for_req(request.key):
# Since some propagates might not include the client name
self.set_sender_for_req(request.key,
clientName)
self.requests.add_propagate(request, frm)
self.propagate(request, clientName)
self.tryForwarding(request)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.