repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
ethereum/py-evm | eth/db/storage.py | AccountStorageDB._validate_flushed | def _validate_flushed(self) -> None:
"""
Will raise an exception if there are some changes made since the last persist.
"""
journal_diff = self._journal_storage.diff()
if len(journal_diff) > 0:
raise ValidationError(
"StorageDB had a dirty journal when it needed to be clean: %r" % journal_diff
) | python | def _validate_flushed(self) -> None:
"""
Will raise an exception if there are some changes made since the last persist.
"""
journal_diff = self._journal_storage.diff()
if len(journal_diff) > 0:
raise ValidationError(
"StorageDB had a dirty journal when it needed to be clean: %r" % journal_diff
) | [
"def",
"_validate_flushed",
"(",
"self",
")",
"->",
"None",
":",
"journal_diff",
"=",
"self",
".",
"_journal_storage",
".",
"diff",
"(",
")",
"if",
"len",
"(",
"journal_diff",
")",
">",
"0",
":",
"raise",
"ValidationError",
"(",
"\"StorageDB had a dirty journa... | Will raise an exception if there are some changes made since the last persist. | [
"Will",
"raise",
"an",
"exception",
"if",
"there",
"are",
"some",
"changes",
"made",
"since",
"the",
"last",
"persist",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/storage.py#L243-L251 | train | 225,000 |
ethereum/py-evm | eth/vm/memory.py | Memory.write | def write(self, start_position: int, size: int, value: bytes) -> None:
"""
Write `value` into memory.
"""
if size:
validate_uint256(start_position)
validate_uint256(size)
validate_is_bytes(value)
validate_length(value, length=size)
validate_lte(start_position + size, maximum=len(self))
for idx, v in enumerate(value):
self._bytes[start_position + idx] = v | python | def write(self, start_position: int, size: int, value: bytes) -> None:
"""
Write `value` into memory.
"""
if size:
validate_uint256(start_position)
validate_uint256(size)
validate_is_bytes(value)
validate_length(value, length=size)
validate_lte(start_position + size, maximum=len(self))
for idx, v in enumerate(value):
self._bytes[start_position + idx] = v | [
"def",
"write",
"(",
"self",
",",
"start_position",
":",
"int",
",",
"size",
":",
"int",
",",
"value",
":",
"bytes",
")",
"->",
"None",
":",
"if",
"size",
":",
"validate_uint256",
"(",
"start_position",
")",
"validate_uint256",
"(",
"size",
")",
"validat... | Write `value` into memory. | [
"Write",
"value",
"into",
"memory",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/memory.py#L49-L61 | train | 225,001 |
ethereum/py-evm | eth/vm/memory.py | Memory.read | def read(self, start_position: int, size: int) -> memoryview:
"""
Return a view into the memory
"""
return memoryview(self._bytes)[start_position:start_position + size] | python | def read(self, start_position: int, size: int) -> memoryview:
"""
Return a view into the memory
"""
return memoryview(self._bytes)[start_position:start_position + size] | [
"def",
"read",
"(",
"self",
",",
"start_position",
":",
"int",
",",
"size",
":",
"int",
")",
"->",
"memoryview",
":",
"return",
"memoryview",
"(",
"self",
".",
"_bytes",
")",
"[",
"start_position",
":",
"start_position",
"+",
"size",
"]"
] | Return a view into the memory | [
"Return",
"a",
"view",
"into",
"the",
"memory"
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/memory.py#L63-L67 | train | 225,002 |
ethereum/py-evm | eth/vm/memory.py | Memory.read_bytes | def read_bytes(self, start_position: int, size: int) -> bytes:
"""
Read a value from memory and return a fresh bytes instance
"""
return bytes(self._bytes[start_position:start_position + size]) | python | def read_bytes(self, start_position: int, size: int) -> bytes:
"""
Read a value from memory and return a fresh bytes instance
"""
return bytes(self._bytes[start_position:start_position + size]) | [
"def",
"read_bytes",
"(",
"self",
",",
"start_position",
":",
"int",
",",
"size",
":",
"int",
")",
"->",
"bytes",
":",
"return",
"bytes",
"(",
"self",
".",
"_bytes",
"[",
"start_position",
":",
"start_position",
"+",
"size",
"]",
")"
] | Read a value from memory and return a fresh bytes instance | [
"Read",
"a",
"value",
"from",
"memory",
"and",
"return",
"a",
"fresh",
"bytes",
"instance"
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/memory.py#L69-L73 | train | 225,003 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.extend_memory | def extend_memory(self, start_position: int, size: int) -> None:
"""
Extend the size of the memory to be at minimum ``start_position + size``
bytes in length. Raise `eth.exceptions.OutOfGas` if there is not enough
gas to pay for extending the memory.
"""
validate_uint256(start_position, title="Memory start position")
validate_uint256(size, title="Memory size")
before_size = ceil32(len(self._memory))
after_size = ceil32(start_position + size)
before_cost = memory_gas_cost(before_size)
after_cost = memory_gas_cost(after_size)
if self.logger.show_debug2:
self.logger.debug2(
"MEMORY: size (%s -> %s) | cost (%s -> %s)",
before_size,
after_size,
before_cost,
after_cost,
)
if size:
if before_cost < after_cost:
gas_fee = after_cost - before_cost
self._gas_meter.consume_gas(
gas_fee,
reason=" ".join((
"Expanding memory",
str(before_size),
"->",
str(after_size),
))
)
self._memory.extend(start_position, size) | python | def extend_memory(self, start_position: int, size: int) -> None:
"""
Extend the size of the memory to be at minimum ``start_position + size``
bytes in length. Raise `eth.exceptions.OutOfGas` if there is not enough
gas to pay for extending the memory.
"""
validate_uint256(start_position, title="Memory start position")
validate_uint256(size, title="Memory size")
before_size = ceil32(len(self._memory))
after_size = ceil32(start_position + size)
before_cost = memory_gas_cost(before_size)
after_cost = memory_gas_cost(after_size)
if self.logger.show_debug2:
self.logger.debug2(
"MEMORY: size (%s -> %s) | cost (%s -> %s)",
before_size,
after_size,
before_cost,
after_cost,
)
if size:
if before_cost < after_cost:
gas_fee = after_cost - before_cost
self._gas_meter.consume_gas(
gas_fee,
reason=" ".join((
"Expanding memory",
str(before_size),
"->",
str(after_size),
))
)
self._memory.extend(start_position, size) | [
"def",
"extend_memory",
"(",
"self",
",",
"start_position",
":",
"int",
",",
"size",
":",
"int",
")",
"->",
"None",
":",
"validate_uint256",
"(",
"start_position",
",",
"title",
"=",
"\"Memory start position\"",
")",
"validate_uint256",
"(",
"size",
",",
"titl... | Extend the size of the memory to be at minimum ``start_position + size``
bytes in length. Raise `eth.exceptions.OutOfGas` if there is not enough
gas to pay for extending the memory. | [
"Extend",
"the",
"size",
"of",
"the",
"memory",
"to",
"be",
"at",
"minimum",
"start_position",
"+",
"size",
"bytes",
"in",
"length",
".",
"Raise",
"eth",
".",
"exceptions",
".",
"OutOfGas",
"if",
"there",
"is",
"not",
"enough",
"gas",
"to",
"pay",
"for",... | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L205-L242 | train | 225,004 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.memory_read | def memory_read(self, start_position: int, size: int) -> memoryview:
"""
Read and return a view of ``size`` bytes from memory starting at ``start_position``.
"""
return self._memory.read(start_position, size) | python | def memory_read(self, start_position: int, size: int) -> memoryview:
"""
Read and return a view of ``size`` bytes from memory starting at ``start_position``.
"""
return self._memory.read(start_position, size) | [
"def",
"memory_read",
"(",
"self",
",",
"start_position",
":",
"int",
",",
"size",
":",
"int",
")",
"->",
"memoryview",
":",
"return",
"self",
".",
"_memory",
".",
"read",
"(",
"start_position",
",",
"size",
")"
] | Read and return a view of ``size`` bytes from memory starting at ``start_position``. | [
"Read",
"and",
"return",
"a",
"view",
"of",
"size",
"bytes",
"from",
"memory",
"starting",
"at",
"start_position",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L250-L254 | train | 225,005 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.memory_read_bytes | def memory_read_bytes(self, start_position: int, size: int) -> bytes:
"""
Read and return ``size`` bytes from memory starting at ``start_position``.
"""
return self._memory.read_bytes(start_position, size) | python | def memory_read_bytes(self, start_position: int, size: int) -> bytes:
"""
Read and return ``size`` bytes from memory starting at ``start_position``.
"""
return self._memory.read_bytes(start_position, size) | [
"def",
"memory_read_bytes",
"(",
"self",
",",
"start_position",
":",
"int",
",",
"size",
":",
"int",
")",
"->",
"bytes",
":",
"return",
"self",
".",
"_memory",
".",
"read_bytes",
"(",
"start_position",
",",
"size",
")"
] | Read and return ``size`` bytes from memory starting at ``start_position``. | [
"Read",
"and",
"return",
"size",
"bytes",
"from",
"memory",
"starting",
"at",
"start_position",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L256-L260 | train | 225,006 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.consume_gas | def consume_gas(self, amount: int, reason: str) -> None:
"""
Consume ``amount`` of gas from the remaining gas.
Raise `eth.exceptions.OutOfGas` if there is not enough gas remaining.
"""
return self._gas_meter.consume_gas(amount, reason) | python | def consume_gas(self, amount: int, reason: str) -> None:
"""
Consume ``amount`` of gas from the remaining gas.
Raise `eth.exceptions.OutOfGas` if there is not enough gas remaining.
"""
return self._gas_meter.consume_gas(amount, reason) | [
"def",
"consume_gas",
"(",
"self",
",",
"amount",
":",
"int",
",",
"reason",
":",
"str",
")",
"->",
"None",
":",
"return",
"self",
".",
"_gas_meter",
".",
"consume_gas",
"(",
"amount",
",",
"reason",
")"
] | Consume ``amount`` of gas from the remaining gas.
Raise `eth.exceptions.OutOfGas` if there is not enough gas remaining. | [
"Consume",
"amount",
"of",
"gas",
"from",
"the",
"remaining",
"gas",
".",
"Raise",
"eth",
".",
"exceptions",
".",
"OutOfGas",
"if",
"there",
"is",
"not",
"enough",
"gas",
"remaining",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L268-L273 | train | 225,007 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.stack_pop | def stack_pop(self, num_items: int=1, type_hint: str=None) -> Any:
# TODO: Needs to be replaced with
# `Union[int, bytes, Tuple[Union[int, bytes], ...]]` if done properly
"""
Pop and return a number of items equal to ``num_items`` from the stack.
``type_hint`` can be either ``'uint256'`` or ``'bytes'``. The return value
will be an ``int`` or ``bytes`` type depending on the value provided for
the ``type_hint``.
Raise `eth.exceptions.InsufficientStack` if there are not enough items on
the stack.
"""
return self._stack.pop(num_items, type_hint) | python | def stack_pop(self, num_items: int=1, type_hint: str=None) -> Any:
# TODO: Needs to be replaced with
# `Union[int, bytes, Tuple[Union[int, bytes], ...]]` if done properly
"""
Pop and return a number of items equal to ``num_items`` from the stack.
``type_hint`` can be either ``'uint256'`` or ``'bytes'``. The return value
will be an ``int`` or ``bytes`` type depending on the value provided for
the ``type_hint``.
Raise `eth.exceptions.InsufficientStack` if there are not enough items on
the stack.
"""
return self._stack.pop(num_items, type_hint) | [
"def",
"stack_pop",
"(",
"self",
",",
"num_items",
":",
"int",
"=",
"1",
",",
"type_hint",
":",
"str",
"=",
"None",
")",
"->",
"Any",
":",
"# TODO: Needs to be replaced with",
"# `Union[int, bytes, Tuple[Union[int, bytes], ...]]` if done properly",
"return",
"self",
"... | Pop and return a number of items equal to ``num_items`` from the stack.
``type_hint`` can be either ``'uint256'`` or ``'bytes'``. The return value
will be an ``int`` or ``bytes`` type depending on the value provided for
the ``type_hint``.
Raise `eth.exceptions.InsufficientStack` if there are not enough items on
the stack. | [
"Pop",
"and",
"return",
"a",
"number",
"of",
"items",
"equal",
"to",
"num_items",
"from",
"the",
"stack",
".",
"type_hint",
"can",
"be",
"either",
"uint256",
"or",
"bytes",
".",
"The",
"return",
"value",
"will",
"be",
"an",
"int",
"or",
"bytes",
"type",
... | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L311-L323 | train | 225,008 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.stack_push | def stack_push(self, value: Union[int, bytes]) -> None:
"""
Push ``value`` onto the stack.
Raise `eth.exceptions.StackDepthLimit` if the stack is full.
"""
return self._stack.push(value) | python | def stack_push(self, value: Union[int, bytes]) -> None:
"""
Push ``value`` onto the stack.
Raise `eth.exceptions.StackDepthLimit` if the stack is full.
"""
return self._stack.push(value) | [
"def",
"stack_push",
"(",
"self",
",",
"value",
":",
"Union",
"[",
"int",
",",
"bytes",
"]",
")",
"->",
"None",
":",
"return",
"self",
".",
"_stack",
".",
"push",
"(",
"value",
")"
] | Push ``value`` onto the stack.
Raise `eth.exceptions.StackDepthLimit` if the stack is full. | [
"Push",
"value",
"onto",
"the",
"stack",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L325-L331 | train | 225,009 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.prepare_child_message | def prepare_child_message(self,
gas: int,
to: Address,
value: int,
data: BytesOrView,
code: bytes,
**kwargs: Any) -> Message:
"""
Helper method for creating a child computation.
"""
kwargs.setdefault('sender', self.msg.storage_address)
child_message = Message(
gas=gas,
to=to,
value=value,
data=data,
code=code,
depth=self.msg.depth + 1,
**kwargs
)
return child_message | python | def prepare_child_message(self,
gas: int,
to: Address,
value: int,
data: BytesOrView,
code: bytes,
**kwargs: Any) -> Message:
"""
Helper method for creating a child computation.
"""
kwargs.setdefault('sender', self.msg.storage_address)
child_message = Message(
gas=gas,
to=to,
value=value,
data=data,
code=code,
depth=self.msg.depth + 1,
**kwargs
)
return child_message | [
"def",
"prepare_child_message",
"(",
"self",
",",
"gas",
":",
"int",
",",
"to",
":",
"Address",
",",
"value",
":",
"int",
",",
"data",
":",
"BytesOrView",
",",
"code",
":",
"bytes",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"Message",
":",
"k... | Helper method for creating a child computation. | [
"Helper",
"method",
"for",
"creating",
"a",
"child",
"computation",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L369-L390 | train | 225,010 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.apply_child_computation | def apply_child_computation(self, child_msg: Message) -> 'BaseComputation':
"""
Apply the vm message ``child_msg`` as a child computation.
"""
child_computation = self.generate_child_computation(child_msg)
self.add_child_computation(child_computation)
return child_computation | python | def apply_child_computation(self, child_msg: Message) -> 'BaseComputation':
"""
Apply the vm message ``child_msg`` as a child computation.
"""
child_computation = self.generate_child_computation(child_msg)
self.add_child_computation(child_computation)
return child_computation | [
"def",
"apply_child_computation",
"(",
"self",
",",
"child_msg",
":",
"Message",
")",
"->",
"'BaseComputation'",
":",
"child_computation",
"=",
"self",
".",
"generate_child_computation",
"(",
"child_msg",
")",
"self",
".",
"add_child_computation",
"(",
"child_computat... | Apply the vm message ``child_msg`` as a child computation. | [
"Apply",
"the",
"vm",
"message",
"child_msg",
"as",
"a",
"child",
"computation",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L392-L398 | train | 225,011 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation._get_log_entries | def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]:
"""
Return the log entries for this computation and its children.
They are sorted in the same order they were emitted during the transaction processing, and
include the sequential counter as the first element of the tuple representing every entry.
"""
if self.is_error:
return []
else:
return sorted(itertools.chain(
self._log_entries,
*(child._get_log_entries() for child in self.children)
)) | python | def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]:
"""
Return the log entries for this computation and its children.
They are sorted in the same order they were emitted during the transaction processing, and
include the sequential counter as the first element of the tuple representing every entry.
"""
if self.is_error:
return []
else:
return sorted(itertools.chain(
self._log_entries,
*(child._get_log_entries() for child in self.children)
)) | [
"def",
"_get_log_entries",
"(",
"self",
")",
"->",
"List",
"[",
"Tuple",
"[",
"int",
",",
"bytes",
",",
"List",
"[",
"int",
"]",
",",
"bytes",
"]",
"]",
":",
"if",
"self",
".",
"is_error",
":",
"return",
"[",
"]",
"else",
":",
"return",
"sorted",
... | Return the log entries for this computation and its children.
They are sorted in the same order they were emitted during the transaction processing, and
include the sequential counter as the first element of the tuple representing every entry. | [
"Return",
"the",
"log",
"entries",
"for",
"this",
"computation",
"and",
"its",
"children",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L463-L476 | train | 225,012 |
ethereum/py-evm | eth/vm/computation.py | BaseComputation.apply_computation | def apply_computation(cls,
state: BaseState,
message: Message,
transaction_context: BaseTransactionContext) -> 'BaseComputation':
"""
Perform the computation that would be triggered by the VM message.
"""
with cls(state, message, transaction_context) as computation:
# Early exit on pre-compiles
if message.code_address in computation.precompiles:
computation.precompiles[message.code_address](computation)
return computation
show_debug2 = computation.logger.show_debug2
for opcode in computation.code:
opcode_fn = computation.get_opcode_fn(opcode)
if show_debug2:
computation.logger.debug2(
"OPCODE: 0x%x (%s) | pc: %s",
opcode,
opcode_fn.mnemonic,
max(0, computation.code.pc - 1),
)
try:
opcode_fn(computation=computation)
except Halt:
break
return computation | python | def apply_computation(cls,
state: BaseState,
message: Message,
transaction_context: BaseTransactionContext) -> 'BaseComputation':
"""
Perform the computation that would be triggered by the VM message.
"""
with cls(state, message, transaction_context) as computation:
# Early exit on pre-compiles
if message.code_address in computation.precompiles:
computation.precompiles[message.code_address](computation)
return computation
show_debug2 = computation.logger.show_debug2
for opcode in computation.code:
opcode_fn = computation.get_opcode_fn(opcode)
if show_debug2:
computation.logger.debug2(
"OPCODE: 0x%x (%s) | pc: %s",
opcode,
opcode_fn.mnemonic,
max(0, computation.code.pc - 1),
)
try:
opcode_fn(computation=computation)
except Halt:
break
return computation | [
"def",
"apply_computation",
"(",
"cls",
",",
"state",
":",
"BaseState",
",",
"message",
":",
"Message",
",",
"transaction_context",
":",
"BaseTransactionContext",
")",
"->",
"'BaseComputation'",
":",
"with",
"cls",
"(",
"state",
",",
"message",
",",
"transaction... | Perform the computation that would be triggered by the VM message. | [
"Perform",
"the",
"computation",
"that",
"would",
"be",
"triggered",
"by",
"the",
"VM",
"message",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L562-L592 | train | 225,013 |
ethereum/py-evm | eth/vm/forks/homestead/headers.py | compute_homestead_difficulty | def compute_homestead_difficulty(parent_header: BlockHeader, timestamp: int) -> int:
"""
Computes the difficulty for a homestead block based on the parent block.
"""
parent_tstamp = parent_header.timestamp
validate_gt(timestamp, parent_tstamp, title="Header.timestamp")
offset = parent_header.difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR
sign = max(
1 - (timestamp - parent_tstamp) // HOMESTEAD_DIFFICULTY_ADJUSTMENT_CUTOFF,
-99)
difficulty = int(max(
parent_header.difficulty + offset * sign,
min(parent_header.difficulty, DIFFICULTY_MINIMUM)))
num_bomb_periods = (
(parent_header.block_number + 1) // BOMB_EXPONENTIAL_PERIOD
) - BOMB_EXPONENTIAL_FREE_PERIODS
if num_bomb_periods >= 0:
return max(difficulty + 2**num_bomb_periods, DIFFICULTY_MINIMUM)
else:
return difficulty | python | def compute_homestead_difficulty(parent_header: BlockHeader, timestamp: int) -> int:
"""
Computes the difficulty for a homestead block based on the parent block.
"""
parent_tstamp = parent_header.timestamp
validate_gt(timestamp, parent_tstamp, title="Header.timestamp")
offset = parent_header.difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR
sign = max(
1 - (timestamp - parent_tstamp) // HOMESTEAD_DIFFICULTY_ADJUSTMENT_CUTOFF,
-99)
difficulty = int(max(
parent_header.difficulty + offset * sign,
min(parent_header.difficulty, DIFFICULTY_MINIMUM)))
num_bomb_periods = (
(parent_header.block_number + 1) // BOMB_EXPONENTIAL_PERIOD
) - BOMB_EXPONENTIAL_FREE_PERIODS
if num_bomb_periods >= 0:
return max(difficulty + 2**num_bomb_periods, DIFFICULTY_MINIMUM)
else:
return difficulty | [
"def",
"compute_homestead_difficulty",
"(",
"parent_header",
":",
"BlockHeader",
",",
"timestamp",
":",
"int",
")",
"->",
"int",
":",
"parent_tstamp",
"=",
"parent_header",
".",
"timestamp",
"validate_gt",
"(",
"timestamp",
",",
"parent_tstamp",
",",
"title",
"=",... | Computes the difficulty for a homestead block based on the parent block. | [
"Computes",
"the",
"difficulty",
"for",
"a",
"homestead",
"block",
"based",
"on",
"the",
"parent",
"block",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/forks/homestead/headers.py#L39-L58 | train | 225,014 |
ethereum/py-evm | eth/vm/state.py | BaseState.snapshot | def snapshot(self) -> Tuple[Hash32, UUID]:
"""
Perform a full snapshot of the current state.
Snapshots are a combination of the :attr:`~state_root` at the time of the
snapshot and the id of the changeset from the journaled DB.
"""
return self.state_root, self._account_db.record() | python | def snapshot(self) -> Tuple[Hash32, UUID]:
"""
Perform a full snapshot of the current state.
Snapshots are a combination of the :attr:`~state_root` at the time of the
snapshot and the id of the changeset from the journaled DB.
"""
return self.state_root, self._account_db.record() | [
"def",
"snapshot",
"(",
"self",
")",
"->",
"Tuple",
"[",
"Hash32",
",",
"UUID",
"]",
":",
"return",
"self",
".",
"state_root",
",",
"self",
".",
"_account_db",
".",
"record",
"(",
")"
] | Perform a full snapshot of the current state.
Snapshots are a combination of the :attr:`~state_root` at the time of the
snapshot and the id of the changeset from the journaled DB. | [
"Perform",
"a",
"full",
"snapshot",
"of",
"the",
"current",
"state",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/state.py#L225-L232 | train | 225,015 |
ethereum/py-evm | eth/vm/state.py | BaseState.revert | def revert(self, snapshot: Tuple[Hash32, UUID]) -> None:
"""
Revert the VM to the state at the snapshot
"""
state_root, account_snapshot = snapshot
# first revert the database state root.
self._account_db.state_root = state_root
# now roll the underlying database back
self._account_db.discard(account_snapshot) | python | def revert(self, snapshot: Tuple[Hash32, UUID]) -> None:
"""
Revert the VM to the state at the snapshot
"""
state_root, account_snapshot = snapshot
# first revert the database state root.
self._account_db.state_root = state_root
# now roll the underlying database back
self._account_db.discard(account_snapshot) | [
"def",
"revert",
"(",
"self",
",",
"snapshot",
":",
"Tuple",
"[",
"Hash32",
",",
"UUID",
"]",
")",
"->",
"None",
":",
"state_root",
",",
"account_snapshot",
"=",
"snapshot",
"# first revert the database state root.",
"self",
".",
"_account_db",
".",
"state_root"... | Revert the VM to the state at the snapshot | [
"Revert",
"the",
"VM",
"to",
"the",
"state",
"at",
"the",
"snapshot"
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/state.py#L234-L243 | train | 225,016 |
ethereum/py-evm | eth/vm/state.py | BaseState.get_computation | def get_computation(self,
message: Message,
transaction_context: 'BaseTransactionContext') -> 'BaseComputation':
"""
Return a computation instance for the given `message` and `transaction_context`
"""
if self.computation_class is None:
raise AttributeError("No `computation_class` has been set for this State")
else:
computation = self.computation_class(self, message, transaction_context)
return computation | python | def get_computation(self,
message: Message,
transaction_context: 'BaseTransactionContext') -> 'BaseComputation':
"""
Return a computation instance for the given `message` and `transaction_context`
"""
if self.computation_class is None:
raise AttributeError("No `computation_class` has been set for this State")
else:
computation = self.computation_class(self, message, transaction_context)
return computation | [
"def",
"get_computation",
"(",
"self",
",",
"message",
":",
"Message",
",",
"transaction_context",
":",
"'BaseTransactionContext'",
")",
"->",
"'BaseComputation'",
":",
"if",
"self",
".",
"computation_class",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"N... | Return a computation instance for the given `message` and `transaction_context` | [
"Return",
"a",
"computation",
"instance",
"for",
"the",
"given",
"message",
"and",
"transaction_context"
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/state.py#L283-L293 | train | 225,017 |
ethereum/py-evm | eth/vm/state.py | BaseState.apply_transaction | def apply_transaction(
self,
transaction: BaseOrSpoofTransaction) -> 'BaseComputation':
"""
Apply transaction to the vm state
:param transaction: the transaction to apply
:return: the computation
"""
if self.state_root != BLANK_ROOT_HASH and not self._account_db.has_root(self.state_root):
raise StateRootNotFound(self.state_root)
else:
return self.execute_transaction(transaction) | python | def apply_transaction(
self,
transaction: BaseOrSpoofTransaction) -> 'BaseComputation':
"""
Apply transaction to the vm state
:param transaction: the transaction to apply
:return: the computation
"""
if self.state_root != BLANK_ROOT_HASH and not self._account_db.has_root(self.state_root):
raise StateRootNotFound(self.state_root)
else:
return self.execute_transaction(transaction) | [
"def",
"apply_transaction",
"(",
"self",
",",
"transaction",
":",
"BaseOrSpoofTransaction",
")",
"->",
"'BaseComputation'",
":",
"if",
"self",
".",
"state_root",
"!=",
"BLANK_ROOT_HASH",
"and",
"not",
"self",
".",
"_account_db",
".",
"has_root",
"(",
"self",
"."... | Apply transaction to the vm state
:param transaction: the transaction to apply
:return: the computation | [
"Apply",
"transaction",
"to",
"the",
"vm",
"state"
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/state.py#L311-L323 | train | 225,018 |
ethereum/py-evm | eth/_utils/env.py | get_env_value | def get_env_value(name: str, required: bool=False, default: Any=empty) -> str:
"""
Core function for extracting the environment variable.
Enforces mutual exclusivity between `required` and `default` keywords.
The `empty` sentinal value is used as the default `default` value to allow
other function to handle default/empty logic in the appropriate way.
"""
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
elif required:
try:
value = os.environ[name]
except KeyError:
raise KeyError(
"Must set environment variable {0}".format(name)
)
else:
value = os.environ.get(name, default)
return value | python | def get_env_value(name: str, required: bool=False, default: Any=empty) -> str:
"""
Core function for extracting the environment variable.
Enforces mutual exclusivity between `required` and `default` keywords.
The `empty` sentinal value is used as the default `default` value to allow
other function to handle default/empty logic in the appropriate way.
"""
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
elif required:
try:
value = os.environ[name]
except KeyError:
raise KeyError(
"Must set environment variable {0}".format(name)
)
else:
value = os.environ.get(name, default)
return value | [
"def",
"get_env_value",
"(",
"name",
":",
"str",
",",
"required",
":",
"bool",
"=",
"False",
",",
"default",
":",
"Any",
"=",
"empty",
")",
"->",
"str",
":",
"if",
"required",
"and",
"default",
"is",
"not",
"empty",
":",
"raise",
"ValueError",
"(",
"... | Core function for extracting the environment variable.
Enforces mutual exclusivity between `required` and `default` keywords.
The `empty` sentinal value is used as the default `default` value to allow
other function to handle default/empty logic in the appropriate way. | [
"Core",
"function",
"for",
"extracting",
"the",
"environment",
"variable",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/_utils/env.py#L36-L56 | train | 225,019 |
ethereum/py-evm | eth/vm/base.py | BaseVM.make_receipt | def make_receipt(self,
base_header: BlockHeader,
transaction: BaseTransaction,
computation: BaseComputation,
state: BaseState) -> Receipt:
"""
Generate the receipt resulting from applying the transaction.
:param base_header: the header of the block before the transaction was applied.
:param transaction: the transaction used to generate the receipt
:param computation: the result of running the transaction computation
:param state: the resulting state, after executing the computation
:return: receipt
"""
raise NotImplementedError("VM classes must implement this method") | python | def make_receipt(self,
base_header: BlockHeader,
transaction: BaseTransaction,
computation: BaseComputation,
state: BaseState) -> Receipt:
"""
Generate the receipt resulting from applying the transaction.
:param base_header: the header of the block before the transaction was applied.
:param transaction: the transaction used to generate the receipt
:param computation: the result of running the transaction computation
:param state: the resulting state, after executing the computation
:return: receipt
"""
raise NotImplementedError("VM classes must implement this method") | [
"def",
"make_receipt",
"(",
"self",
",",
"base_header",
":",
"BlockHeader",
",",
"transaction",
":",
"BaseTransaction",
",",
"computation",
":",
"BaseComputation",
",",
"state",
":",
"BaseState",
")",
"->",
"Receipt",
":",
"raise",
"NotImplementedError",
"(",
"\... | Generate the receipt resulting from applying the transaction.
:param base_header: the header of the block before the transaction was applied.
:param transaction: the transaction used to generate the receipt
:param computation: the result of running the transaction computation
:param state: the resulting state, after executing the computation
:return: receipt | [
"Generate",
"the",
"receipt",
"resulting",
"from",
"applying",
"the",
"transaction",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L155-L170 | train | 225,020 |
ethereum/py-evm | eth/vm/base.py | VM.execute_bytecode | def execute_bytecode(self,
origin: Address,
gas_price: int,
gas: int,
to: Address,
sender: Address,
value: int,
data: bytes,
code: bytes,
code_address: Address=None,
) -> BaseComputation:
"""
Execute raw bytecode in the context of the current state of
the virtual machine.
"""
if origin is None:
origin = sender
# Construct a message
message = Message(
gas=gas,
to=to,
sender=sender,
value=value,
data=data,
code=code,
code_address=code_address,
)
# Construction a tx context
transaction_context = self.state.get_transaction_context_class()(
gas_price=gas_price,
origin=origin,
)
# Execute it in the VM
return self.state.get_computation(message, transaction_context).apply_computation(
self.state,
message,
transaction_context,
) | python | def execute_bytecode(self,
origin: Address,
gas_price: int,
gas: int,
to: Address,
sender: Address,
value: int,
data: bytes,
code: bytes,
code_address: Address=None,
) -> BaseComputation:
"""
Execute raw bytecode in the context of the current state of
the virtual machine.
"""
if origin is None:
origin = sender
# Construct a message
message = Message(
gas=gas,
to=to,
sender=sender,
value=value,
data=data,
code=code,
code_address=code_address,
)
# Construction a tx context
transaction_context = self.state.get_transaction_context_class()(
gas_price=gas_price,
origin=origin,
)
# Execute it in the VM
return self.state.get_computation(message, transaction_context).apply_computation(
self.state,
message,
transaction_context,
) | [
"def",
"execute_bytecode",
"(",
"self",
",",
"origin",
":",
"Address",
",",
"gas_price",
":",
"int",
",",
"gas",
":",
"int",
",",
"to",
":",
"Address",
",",
"sender",
":",
"Address",
",",
"value",
":",
"int",
",",
"data",
":",
"bytes",
",",
"code",
... | Execute raw bytecode in the context of the current state of
the virtual machine. | [
"Execute",
"raw",
"bytecode",
"in",
"the",
"context",
"of",
"the",
"current",
"state",
"of",
"the",
"virtual",
"machine",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L470-L510 | train | 225,021 |
ethereum/py-evm | eth/vm/base.py | VM.import_block | def import_block(self, block: BaseBlock) -> BaseBlock:
"""
Import the given block to the chain.
"""
if self.block.number != block.number:
raise ValidationError(
"This VM can only import blocks at number #{}, the attempted block was #{}".format(
self.block.number,
block.number,
)
)
self.block = self.block.copy(
header=self.configure_header(
coinbase=block.header.coinbase,
gas_limit=block.header.gas_limit,
timestamp=block.header.timestamp,
extra_data=block.header.extra_data,
mix_hash=block.header.mix_hash,
nonce=block.header.nonce,
uncles_hash=keccak(rlp.encode(block.uncles)),
),
uncles=block.uncles,
)
# we need to re-initialize the `state` to update the execution context.
self._state = self.build_state(self.chaindb.db, self.header, self.previous_hashes)
# run all of the transactions.
new_header, receipts, _ = self.apply_all_transactions(block.transactions, self.header)
self.block = self.set_block_transactions(
self.block,
new_header,
block.transactions,
receipts,
)
return self.mine_block() | python | def import_block(self, block: BaseBlock) -> BaseBlock:
"""
Import the given block to the chain.
"""
if self.block.number != block.number:
raise ValidationError(
"This VM can only import blocks at number #{}, the attempted block was #{}".format(
self.block.number,
block.number,
)
)
self.block = self.block.copy(
header=self.configure_header(
coinbase=block.header.coinbase,
gas_limit=block.header.gas_limit,
timestamp=block.header.timestamp,
extra_data=block.header.extra_data,
mix_hash=block.header.mix_hash,
nonce=block.header.nonce,
uncles_hash=keccak(rlp.encode(block.uncles)),
),
uncles=block.uncles,
)
# we need to re-initialize the `state` to update the execution context.
self._state = self.build_state(self.chaindb.db, self.header, self.previous_hashes)
# run all of the transactions.
new_header, receipts, _ = self.apply_all_transactions(block.transactions, self.header)
self.block = self.set_block_transactions(
self.block,
new_header,
block.transactions,
receipts,
)
return self.mine_block() | [
"def",
"import_block",
"(",
"self",
",",
"block",
":",
"BaseBlock",
")",
"->",
"BaseBlock",
":",
"if",
"self",
".",
"block",
".",
"number",
"!=",
"block",
".",
"number",
":",
"raise",
"ValidationError",
"(",
"\"This VM can only import blocks at number #{}, the att... | Import the given block to the chain. | [
"Import",
"the",
"given",
"block",
"to",
"the",
"chain",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L557-L594 | train | 225,022 |
ethereum/py-evm | eth/vm/base.py | VM.mine_block | def mine_block(self, *args: Any, **kwargs: Any) -> BaseBlock:
"""
Mine the current block. Proxies to self.pack_block method.
"""
packed_block = self.pack_block(self.block, *args, **kwargs)
final_block = self.finalize_block(packed_block)
# Perform validation
self.validate_block(final_block)
return final_block | python | def mine_block(self, *args: Any, **kwargs: Any) -> BaseBlock:
"""
Mine the current block. Proxies to self.pack_block method.
"""
packed_block = self.pack_block(self.block, *args, **kwargs)
final_block = self.finalize_block(packed_block)
# Perform validation
self.validate_block(final_block)
return final_block | [
"def",
"mine_block",
"(",
"self",
",",
"*",
"args",
":",
"Any",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"BaseBlock",
":",
"packed_block",
"=",
"self",
".",
"pack_block",
"(",
"self",
".",
"block",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
... | Mine the current block. Proxies to self.pack_block method. | [
"Mine",
"the",
"current",
"block",
".",
"Proxies",
"to",
"self",
".",
"pack_block",
"method",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L596-L607 | train | 225,023 |
ethereum/py-evm | eth/vm/base.py | VM.finalize_block | def finalize_block(self, block: BaseBlock) -> BaseBlock:
"""
Perform any finalization steps like awarding the block mining reward,
and persisting the final state root.
"""
if block.number > 0:
self._assign_block_rewards(block)
# We need to call `persist` here since the state db batches
# all writes until we tell it to write to the underlying db
self.state.persist()
return block.copy(header=block.header.copy(state_root=self.state.state_root)) | python | def finalize_block(self, block: BaseBlock) -> BaseBlock:
"""
Perform any finalization steps like awarding the block mining reward,
and persisting the final state root.
"""
if block.number > 0:
self._assign_block_rewards(block)
# We need to call `persist` here since the state db batches
# all writes until we tell it to write to the underlying db
self.state.persist()
return block.copy(header=block.header.copy(state_root=self.state.state_root)) | [
"def",
"finalize_block",
"(",
"self",
",",
"block",
":",
"BaseBlock",
")",
"->",
"BaseBlock",
":",
"if",
"block",
".",
"number",
">",
"0",
":",
"self",
".",
"_assign_block_rewards",
"(",
"block",
")",
"# We need to call `persist` here since the state db batches",
... | Perform any finalization steps like awarding the block mining reward,
and persisting the final state root. | [
"Perform",
"any",
"finalization",
"steps",
"like",
"awarding",
"the",
"block",
"mining",
"reward",
"and",
"persisting",
"the",
"final",
"state",
"root",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L653-L665 | train | 225,024 |
ethereum/py-evm | eth/vm/base.py | VM.pack_block | def pack_block(self, block: BaseBlock, *args: Any, **kwargs: Any) -> BaseBlock:
"""
Pack block for mining.
:param bytes coinbase: 20-byte public address to receive block reward
:param bytes uncles_hash: 32 bytes
:param bytes state_root: 32 bytes
:param bytes transaction_root: 32 bytes
:param bytes receipt_root: 32 bytes
:param int bloom:
:param int gas_used:
:param bytes extra_data: 32 bytes
:param bytes mix_hash: 32 bytes
:param bytes nonce: 8 bytes
"""
if 'uncles' in kwargs:
uncles = kwargs.pop('uncles')
kwargs.setdefault('uncles_hash', keccak(rlp.encode(uncles)))
else:
uncles = block.uncles
provided_fields = set(kwargs.keys())
known_fields = set(BlockHeader._meta.field_names)
unknown_fields = provided_fields.difference(known_fields)
if unknown_fields:
raise AttributeError(
"Unable to set the field(s) {0} on the `BlockHeader` class. "
"Received the following unexpected fields: {1}.".format(
", ".join(known_fields),
", ".join(unknown_fields),
)
)
header = block.header.copy(**kwargs)
packed_block = block.copy(uncles=uncles, header=header)
return packed_block | python | def pack_block(self, block: BaseBlock, *args: Any, **kwargs: Any) -> BaseBlock:
"""
Pack block for mining.
:param bytes coinbase: 20-byte public address to receive block reward
:param bytes uncles_hash: 32 bytes
:param bytes state_root: 32 bytes
:param bytes transaction_root: 32 bytes
:param bytes receipt_root: 32 bytes
:param int bloom:
:param int gas_used:
:param bytes extra_data: 32 bytes
:param bytes mix_hash: 32 bytes
:param bytes nonce: 8 bytes
"""
if 'uncles' in kwargs:
uncles = kwargs.pop('uncles')
kwargs.setdefault('uncles_hash', keccak(rlp.encode(uncles)))
else:
uncles = block.uncles
provided_fields = set(kwargs.keys())
known_fields = set(BlockHeader._meta.field_names)
unknown_fields = provided_fields.difference(known_fields)
if unknown_fields:
raise AttributeError(
"Unable to set the field(s) {0} on the `BlockHeader` class. "
"Received the following unexpected fields: {1}.".format(
", ".join(known_fields),
", ".join(unknown_fields),
)
)
header = block.header.copy(**kwargs)
packed_block = block.copy(uncles=uncles, header=header)
return packed_block | [
"def",
"pack_block",
"(",
"self",
",",
"block",
":",
"BaseBlock",
",",
"*",
"args",
":",
"Any",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"BaseBlock",
":",
"if",
"'uncles'",
"in",
"kwargs",
":",
"uncles",
"=",
"kwargs",
".",
"pop",
"(",
"'unc... | Pack block for mining.
:param bytes coinbase: 20-byte public address to receive block reward
:param bytes uncles_hash: 32 bytes
:param bytes state_root: 32 bytes
:param bytes transaction_root: 32 bytes
:param bytes receipt_root: 32 bytes
:param int bloom:
:param int gas_used:
:param bytes extra_data: 32 bytes
:param bytes mix_hash: 32 bytes
:param bytes nonce: 8 bytes | [
"Pack",
"block",
"for",
"mining",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L667-L704 | train | 225,025 |
ethereum/py-evm | eth/vm/base.py | VM.generate_block_from_parent_header_and_coinbase | def generate_block_from_parent_header_and_coinbase(cls,
parent_header: BlockHeader,
coinbase: Address) -> BaseBlock:
"""
Generate block from parent header and coinbase.
"""
block_header = generate_header_from_parent_header(
cls.compute_difficulty,
parent_header,
coinbase,
timestamp=parent_header.timestamp + 1,
)
block = cls.get_block_class()(
block_header,
transactions=[],
uncles=[],
)
return block | python | def generate_block_from_parent_header_and_coinbase(cls,
parent_header: BlockHeader,
coinbase: Address) -> BaseBlock:
"""
Generate block from parent header and coinbase.
"""
block_header = generate_header_from_parent_header(
cls.compute_difficulty,
parent_header,
coinbase,
timestamp=parent_header.timestamp + 1,
)
block = cls.get_block_class()(
block_header,
transactions=[],
uncles=[],
)
return block | [
"def",
"generate_block_from_parent_header_and_coinbase",
"(",
"cls",
",",
"parent_header",
":",
"BlockHeader",
",",
"coinbase",
":",
"Address",
")",
"->",
"BaseBlock",
":",
"block_header",
"=",
"generate_header_from_parent_header",
"(",
"cls",
".",
"compute_difficulty",
... | Generate block from parent header and coinbase. | [
"Generate",
"block",
"from",
"parent",
"header",
"and",
"coinbase",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L710-L727 | train | 225,026 |
ethereum/py-evm | eth/vm/base.py | VM.previous_hashes | def previous_hashes(self) -> Optional[Iterable[Hash32]]:
"""
Convenience API for accessing the previous 255 block hashes.
"""
return self.get_prev_hashes(self.header.parent_hash, self.chaindb) | python | def previous_hashes(self) -> Optional[Iterable[Hash32]]:
"""
Convenience API for accessing the previous 255 block hashes.
"""
return self.get_prev_hashes(self.header.parent_hash, self.chaindb) | [
"def",
"previous_hashes",
"(",
"self",
")",
"->",
"Optional",
"[",
"Iterable",
"[",
"Hash32",
"]",
"]",
":",
"return",
"self",
".",
"get_prev_hashes",
"(",
"self",
".",
"header",
".",
"parent_hash",
",",
"self",
".",
"chaindb",
")"
] | Convenience API for accessing the previous 255 block hashes. | [
"Convenience",
"API",
"for",
"accessing",
"the",
"previous",
"255",
"block",
"hashes",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L756-L760 | train | 225,027 |
ethereum/py-evm | eth/vm/base.py | VM.create_transaction | def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
"""
Proxy for instantiating a signed transaction for this VM.
"""
return self.get_transaction_class()(*args, **kwargs) | python | def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
"""
Proxy for instantiating a signed transaction for this VM.
"""
return self.get_transaction_class()(*args, **kwargs) | [
"def",
"create_transaction",
"(",
"self",
",",
"*",
"args",
":",
"Any",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"BaseTransaction",
":",
"return",
"self",
".",
"get_transaction_class",
"(",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Proxy for instantiating a signed transaction for this VM. | [
"Proxy",
"for",
"instantiating",
"a",
"signed",
"transaction",
"for",
"this",
"VM",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L765-L769 | train | 225,028 |
ethereum/py-evm | eth/vm/base.py | VM.create_unsigned_transaction | def create_unsigned_transaction(cls,
*,
nonce: int,
gas_price: int,
gas: int,
to: Address,
value: int,
data: bytes) -> 'BaseUnsignedTransaction':
"""
Proxy for instantiating an unsigned transaction for this VM.
"""
return cls.get_transaction_class().create_unsigned_transaction(
nonce=nonce,
gas_price=gas_price,
gas=gas,
to=to,
value=value,
data=data
) | python | def create_unsigned_transaction(cls,
*,
nonce: int,
gas_price: int,
gas: int,
to: Address,
value: int,
data: bytes) -> 'BaseUnsignedTransaction':
"""
Proxy for instantiating an unsigned transaction for this VM.
"""
return cls.get_transaction_class().create_unsigned_transaction(
nonce=nonce,
gas_price=gas_price,
gas=gas,
to=to,
value=value,
data=data
) | [
"def",
"create_unsigned_transaction",
"(",
"cls",
",",
"*",
",",
"nonce",
":",
"int",
",",
"gas_price",
":",
"int",
",",
"gas",
":",
"int",
",",
"to",
":",
"Address",
",",
"value",
":",
"int",
",",
"data",
":",
"bytes",
")",
"->",
"'BaseUnsignedTransac... | Proxy for instantiating an unsigned transaction for this VM. | [
"Proxy",
"for",
"instantiating",
"an",
"unsigned",
"transaction",
"for",
"this",
"VM",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L772-L790 | train | 225,029 |
ethereum/py-evm | eth/vm/base.py | VM.validate_block | def validate_block(self, block: BaseBlock) -> None:
"""
Validate the the given block.
"""
if not isinstance(block, self.get_block_class()):
raise ValidationError(
"This vm ({0!r}) is not equipped to validate a block of type {1!r}".format(
self,
block,
)
)
if block.is_genesis:
validate_length_lte(block.header.extra_data, 32, title="BlockHeader.extra_data")
else:
parent_header = get_parent_header(block.header, self.chaindb)
self.validate_header(block.header, parent_header)
tx_root_hash, _ = make_trie_root_and_nodes(block.transactions)
if tx_root_hash != block.header.transaction_root:
raise ValidationError(
"Block's transaction_root ({0}) does not match expected value: {1}".format(
block.header.transaction_root, tx_root_hash))
if len(block.uncles) > MAX_UNCLES:
raise ValidationError(
"Blocks may have a maximum of {0} uncles. Found "
"{1}.".format(MAX_UNCLES, len(block.uncles))
)
if not self.chaindb.exists(block.header.state_root):
raise ValidationError(
"`state_root` was not found in the db.\n"
"- state_root: {0}".format(
block.header.state_root,
)
)
local_uncle_hash = keccak(rlp.encode(block.uncles))
if local_uncle_hash != block.header.uncles_hash:
raise ValidationError(
"`uncles_hash` and block `uncles` do not match.\n"
" - num_uncles : {0}\n"
" - block uncle_hash : {1}\n"
" - header uncle_hash: {2}".format(
len(block.uncles),
local_uncle_hash,
block.header.uncles_hash,
)
) | python | def validate_block(self, block: BaseBlock) -> None:
"""
Validate the the given block.
"""
if not isinstance(block, self.get_block_class()):
raise ValidationError(
"This vm ({0!r}) is not equipped to validate a block of type {1!r}".format(
self,
block,
)
)
if block.is_genesis:
validate_length_lte(block.header.extra_data, 32, title="BlockHeader.extra_data")
else:
parent_header = get_parent_header(block.header, self.chaindb)
self.validate_header(block.header, parent_header)
tx_root_hash, _ = make_trie_root_and_nodes(block.transactions)
if tx_root_hash != block.header.transaction_root:
raise ValidationError(
"Block's transaction_root ({0}) does not match expected value: {1}".format(
block.header.transaction_root, tx_root_hash))
if len(block.uncles) > MAX_UNCLES:
raise ValidationError(
"Blocks may have a maximum of {0} uncles. Found "
"{1}.".format(MAX_UNCLES, len(block.uncles))
)
if not self.chaindb.exists(block.header.state_root):
raise ValidationError(
"`state_root` was not found in the db.\n"
"- state_root: {0}".format(
block.header.state_root,
)
)
local_uncle_hash = keccak(rlp.encode(block.uncles))
if local_uncle_hash != block.header.uncles_hash:
raise ValidationError(
"`uncles_hash` and block `uncles` do not match.\n"
" - num_uncles : {0}\n"
" - block uncle_hash : {1}\n"
" - header uncle_hash: {2}".format(
len(block.uncles),
local_uncle_hash,
block.header.uncles_hash,
)
) | [
"def",
"validate_block",
"(",
"self",
",",
"block",
":",
"BaseBlock",
")",
"->",
"None",
":",
"if",
"not",
"isinstance",
"(",
"block",
",",
"self",
".",
"get_block_class",
"(",
")",
")",
":",
"raise",
"ValidationError",
"(",
"\"This vm ({0!r}) is not equipped ... | Validate the the given block. | [
"Validate",
"the",
"the",
"given",
"block",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L828-L876 | train | 225,030 |
ethereum/py-evm | eth/vm/base.py | VM.validate_uncle | def validate_uncle(cls, block: BaseBlock, uncle: BaseBlock, uncle_parent: BaseBlock) -> None:
"""
Validate the given uncle in the context of the given block.
"""
if uncle.block_number >= block.number:
raise ValidationError(
"Uncle number ({0}) is higher than block number ({1})".format(
uncle.block_number, block.number))
if uncle.block_number != uncle_parent.block_number + 1:
raise ValidationError(
"Uncle number ({0}) is not one above ancestor's number ({1})".format(
uncle.block_number, uncle_parent.block_number))
if uncle.timestamp < uncle_parent.timestamp:
raise ValidationError(
"Uncle timestamp ({0}) is before ancestor's timestamp ({1})".format(
uncle.timestamp, uncle_parent.timestamp))
if uncle.gas_used > uncle.gas_limit:
raise ValidationError(
"Uncle's gas usage ({0}) is above the limit ({1})".format(
uncle.gas_used, uncle.gas_limit)) | python | def validate_uncle(cls, block: BaseBlock, uncle: BaseBlock, uncle_parent: BaseBlock) -> None:
"""
Validate the given uncle in the context of the given block.
"""
if uncle.block_number >= block.number:
raise ValidationError(
"Uncle number ({0}) is higher than block number ({1})".format(
uncle.block_number, block.number))
if uncle.block_number != uncle_parent.block_number + 1:
raise ValidationError(
"Uncle number ({0}) is not one above ancestor's number ({1})".format(
uncle.block_number, uncle_parent.block_number))
if uncle.timestamp < uncle_parent.timestamp:
raise ValidationError(
"Uncle timestamp ({0}) is before ancestor's timestamp ({1})".format(
uncle.timestamp, uncle_parent.timestamp))
if uncle.gas_used > uncle.gas_limit:
raise ValidationError(
"Uncle's gas usage ({0}) is above the limit ({1})".format(
uncle.gas_used, uncle.gas_limit)) | [
"def",
"validate_uncle",
"(",
"cls",
",",
"block",
":",
"BaseBlock",
",",
"uncle",
":",
"BaseBlock",
",",
"uncle_parent",
":",
"BaseBlock",
")",
"->",
"None",
":",
"if",
"uncle",
".",
"block_number",
">=",
"block",
".",
"number",
":",
"raise",
"ValidationE... | Validate the given uncle in the context of the given block. | [
"Validate",
"the",
"given",
"uncle",
"in",
"the",
"context",
"of",
"the",
"given",
"block",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L927-L947 | train | 225,031 |
ethereum/py-evm | eth/tools/_utils/mappings.py | is_cleanly_mergable | def is_cleanly_mergable(*dicts: Dict[Any, Any]) -> bool:
"""Check that nothing will be overwritten when dictionaries are merged using `deep_merge`.
Examples:
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"c": 3})
True
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"a": 0, c": 3})
False
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"c": 3, {"b": {"bb": 4}})
True
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"b": {"ba": 4}})
False
"""
if len(dicts) <= 1:
return True
elif len(dicts) == 2:
if not all(isinstance(d, Mapping) for d in dicts):
return False
else:
shared_keys = set(dicts[0].keys()) & set(dicts[1].keys())
return all(is_cleanly_mergable(dicts[0][key], dicts[1][key]) for key in shared_keys)
else:
dict_combinations = itertools.combinations(dicts, 2)
return all(is_cleanly_mergable(*combination) for combination in dict_combinations) | python | def is_cleanly_mergable(*dicts: Dict[Any, Any]) -> bool:
"""Check that nothing will be overwritten when dictionaries are merged using `deep_merge`.
Examples:
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"c": 3})
True
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"a": 0, c": 3})
False
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"c": 3, {"b": {"bb": 4}})
True
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"b": {"ba": 4}})
False
"""
if len(dicts) <= 1:
return True
elif len(dicts) == 2:
if not all(isinstance(d, Mapping) for d in dicts):
return False
else:
shared_keys = set(dicts[0].keys()) & set(dicts[1].keys())
return all(is_cleanly_mergable(dicts[0][key], dicts[1][key]) for key in shared_keys)
else:
dict_combinations = itertools.combinations(dicts, 2)
return all(is_cleanly_mergable(*combination) for combination in dict_combinations) | [
"def",
"is_cleanly_mergable",
"(",
"*",
"dicts",
":",
"Dict",
"[",
"Any",
",",
"Any",
"]",
")",
"->",
"bool",
":",
"if",
"len",
"(",
"dicts",
")",
"<=",
"1",
":",
"return",
"True",
"elif",
"len",
"(",
"dicts",
")",
"==",
"2",
":",
"if",
"not",
... | Check that nothing will be overwritten when dictionaries are merged using `deep_merge`.
Examples:
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"c": 3})
True
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"a": 0, c": 3})
False
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"c": 3, {"b": {"bb": 4}})
True
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"b": {"ba": 4}})
False | [
"Check",
"that",
"nothing",
"will",
"be",
"overwritten",
"when",
"dictionaries",
"are",
"merged",
"using",
"deep_merge",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/tools/_utils/mappings.py#L24-L49 | train | 225,032 |
ethereum/py-evm | eth/db/diff.py | DBDiff.deleted_keys | def deleted_keys(self) -> Iterable[bytes]:
"""
List all the keys that have been deleted.
"""
for key, value in self._changes.items():
if value is DELETED:
yield key | python | def deleted_keys(self) -> Iterable[bytes]:
"""
List all the keys that have been deleted.
"""
for key, value in self._changes.items():
if value is DELETED:
yield key | [
"def",
"deleted_keys",
"(",
"self",
")",
"->",
"Iterable",
"[",
"bytes",
"]",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_changes",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"DELETED",
":",
"yield",
"key"
] | List all the keys that have been deleted. | [
"List",
"all",
"the",
"keys",
"that",
"have",
"been",
"deleted",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/diff.py#L160-L166 | train | 225,033 |
ethereum/py-evm | eth/db/diff.py | DBDiff.apply_to | def apply_to(self,
db: Union[BaseDB, ABC_Mutable_Mapping],
apply_deletes: bool = True) -> None:
"""
Apply the changes in this diff to the given database.
You may choose to opt out of deleting any underlying keys.
:param apply_deletes: whether the pending deletes should be
applied to the database
"""
for key, value in self._changes.items():
if value is DELETED:
if apply_deletes:
try:
del db[key]
except KeyError:
pass
else:
pass
else:
db[key] = value | python | def apply_to(self,
db: Union[BaseDB, ABC_Mutable_Mapping],
apply_deletes: bool = True) -> None:
"""
Apply the changes in this diff to the given database.
You may choose to opt out of deleting any underlying keys.
:param apply_deletes: whether the pending deletes should be
applied to the database
"""
for key, value in self._changes.items():
if value is DELETED:
if apply_deletes:
try:
del db[key]
except KeyError:
pass
else:
pass
else:
db[key] = value | [
"def",
"apply_to",
"(",
"self",
",",
"db",
":",
"Union",
"[",
"BaseDB",
",",
"ABC_Mutable_Mapping",
"]",
",",
"apply_deletes",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_changes",
".",
"items",
... | Apply the changes in this diff to the given database.
You may choose to opt out of deleting any underlying keys.
:param apply_deletes: whether the pending deletes should be
applied to the database | [
"Apply",
"the",
"changes",
"in",
"this",
"diff",
"to",
"the",
"given",
"database",
".",
"You",
"may",
"choose",
"to",
"opt",
"out",
"of",
"deleting",
"any",
"underlying",
"keys",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/diff.py#L188-L208 | train | 225,034 |
ethereum/py-evm | eth/db/diff.py | DBDiff.join | def join(cls, diffs: Iterable['DBDiff']) -> 'DBDiff':
"""
Join several DBDiff objects into a single DBDiff object.
In case of a conflict, changes in diffs that come later
in ``diffs`` will overwrite changes from earlier changes.
"""
tracker = DBDiffTracker()
for diff in diffs:
diff.apply_to(tracker)
return tracker.diff() | python | def join(cls, diffs: Iterable['DBDiff']) -> 'DBDiff':
"""
Join several DBDiff objects into a single DBDiff object.
In case of a conflict, changes in diffs that come later
in ``diffs`` will overwrite changes from earlier changes.
"""
tracker = DBDiffTracker()
for diff in diffs:
diff.apply_to(tracker)
return tracker.diff() | [
"def",
"join",
"(",
"cls",
",",
"diffs",
":",
"Iterable",
"[",
"'DBDiff'",
"]",
")",
"->",
"'DBDiff'",
":",
"tracker",
"=",
"DBDiffTracker",
"(",
")",
"for",
"diff",
"in",
"diffs",
":",
"diff",
".",
"apply_to",
"(",
"tracker",
")",
"return",
"tracker",... | Join several DBDiff objects into a single DBDiff object.
In case of a conflict, changes in diffs that come later
in ``diffs`` will overwrite changes from earlier changes. | [
"Join",
"several",
"DBDiff",
"objects",
"into",
"a",
"single",
"DBDiff",
"object",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/diff.py#L211-L221 | train | 225,035 |
ethereum/py-evm | eth/tools/_utils/hashing.py | hash_log_entries | def hash_log_entries(log_entries: Iterable[Tuple[bytes, List[int], bytes]]) -> Hash32:
"""
Helper function for computing the RLP hash of the logs from transaction
execution.
"""
logs = [Log(*entry) for entry in log_entries]
encoded_logs = rlp.encode(logs)
logs_hash = keccak(encoded_logs)
return logs_hash | python | def hash_log_entries(log_entries: Iterable[Tuple[bytes, List[int], bytes]]) -> Hash32:
"""
Helper function for computing the RLP hash of the logs from transaction
execution.
"""
logs = [Log(*entry) for entry in log_entries]
encoded_logs = rlp.encode(logs)
logs_hash = keccak(encoded_logs)
return logs_hash | [
"def",
"hash_log_entries",
"(",
"log_entries",
":",
"Iterable",
"[",
"Tuple",
"[",
"bytes",
",",
"List",
"[",
"int",
"]",
",",
"bytes",
"]",
"]",
")",
"->",
"Hash32",
":",
"logs",
"=",
"[",
"Log",
"(",
"*",
"entry",
")",
"for",
"entry",
"in",
"log_... | Helper function for computing the RLP hash of the logs from transaction
execution. | [
"Helper",
"function",
"for",
"computing",
"the",
"RLP",
"hash",
"of",
"the",
"logs",
"from",
"transaction",
"execution",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/tools/_utils/hashing.py#L18-L26 | train | 225,036 |
ethereum/py-evm | eth/chains/base.py | BaseChain.get_vm_class_for_block_number | def get_vm_class_for_block_number(cls, block_number: BlockNumber) -> Type['BaseVM']:
"""
Returns the VM class for the given block number.
"""
if cls.vm_configuration is None:
raise AttributeError("Chain classes must define the VMs in vm_configuration")
validate_block_number(block_number)
for start_block, vm_class in reversed(cls.vm_configuration):
if block_number >= start_block:
return vm_class
else:
raise VMNotFound("No vm available for block #{0}".format(block_number)) | python | def get_vm_class_for_block_number(cls, block_number: BlockNumber) -> Type['BaseVM']:
"""
Returns the VM class for the given block number.
"""
if cls.vm_configuration is None:
raise AttributeError("Chain classes must define the VMs in vm_configuration")
validate_block_number(block_number)
for start_block, vm_class in reversed(cls.vm_configuration):
if block_number >= start_block:
return vm_class
else:
raise VMNotFound("No vm available for block #{0}".format(block_number)) | [
"def",
"get_vm_class_for_block_number",
"(",
"cls",
",",
"block_number",
":",
"BlockNumber",
")",
"->",
"Type",
"[",
"'BaseVM'",
"]",
":",
"if",
"cls",
".",
"vm_configuration",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"Chain classes must define the VMs i... | Returns the VM class for the given block number. | [
"Returns",
"the",
"VM",
"class",
"for",
"the",
"given",
"block",
"number",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L182-L194 | train | 225,037 |
ethereum/py-evm | eth/chains/base.py | BaseChain.validate_chain | def validate_chain(
cls,
root: BlockHeader,
descendants: Tuple[BlockHeader, ...],
seal_check_random_sample_rate: int = 1) -> None:
"""
Validate that all of the descendents are valid, given that the root header is valid.
By default, check the seal validity (Proof-of-Work on Ethereum 1.x mainnet) of all headers.
This can be expensive. Instead, check a random sample of seals using
seal_check_random_sample_rate.
"""
all_indices = range(len(descendants))
if seal_check_random_sample_rate == 1:
indices_to_check_seal = set(all_indices)
else:
sample_size = len(all_indices) // seal_check_random_sample_rate
indices_to_check_seal = set(random.sample(all_indices, sample_size))
header_pairs = sliding_window(2, concatv([root], descendants))
for index, (parent, child) in enumerate(header_pairs):
if child.parent_hash != parent.hash:
raise ValidationError(
"Invalid header chain; {} has parent {}, but expected {}".format(
child, child.parent_hash, parent.hash))
should_check_seal = index in indices_to_check_seal
vm_class = cls.get_vm_class_for_block_number(child.block_number)
try:
vm_class.validate_header(child, parent, check_seal=should_check_seal)
except ValidationError as exc:
raise ValidationError(
"%s is not a valid child of %s: %s" % (
child,
parent,
exc,
)
) from exc | python | def validate_chain(
cls,
root: BlockHeader,
descendants: Tuple[BlockHeader, ...],
seal_check_random_sample_rate: int = 1) -> None:
"""
Validate that all of the descendents are valid, given that the root header is valid.
By default, check the seal validity (Proof-of-Work on Ethereum 1.x mainnet) of all headers.
This can be expensive. Instead, check a random sample of seals using
seal_check_random_sample_rate.
"""
all_indices = range(len(descendants))
if seal_check_random_sample_rate == 1:
indices_to_check_seal = set(all_indices)
else:
sample_size = len(all_indices) // seal_check_random_sample_rate
indices_to_check_seal = set(random.sample(all_indices, sample_size))
header_pairs = sliding_window(2, concatv([root], descendants))
for index, (parent, child) in enumerate(header_pairs):
if child.parent_hash != parent.hash:
raise ValidationError(
"Invalid header chain; {} has parent {}, but expected {}".format(
child, child.parent_hash, parent.hash))
should_check_seal = index in indices_to_check_seal
vm_class = cls.get_vm_class_for_block_number(child.block_number)
try:
vm_class.validate_header(child, parent, check_seal=should_check_seal)
except ValidationError as exc:
raise ValidationError(
"%s is not a valid child of %s: %s" % (
child,
parent,
exc,
)
) from exc | [
"def",
"validate_chain",
"(",
"cls",
",",
"root",
":",
"BlockHeader",
",",
"descendants",
":",
"Tuple",
"[",
"BlockHeader",
",",
"...",
"]",
",",
"seal_check_random_sample_rate",
":",
"int",
"=",
"1",
")",
"->",
"None",
":",
"all_indices",
"=",
"range",
"(... | Validate that all of the descendents are valid, given that the root header is valid.
By default, check the seal validity (Proof-of-Work on Ethereum 1.x mainnet) of all headers.
This can be expensive. Instead, check a random sample of seals using
seal_check_random_sample_rate. | [
"Validate",
"that",
"all",
"of",
"the",
"descendents",
"are",
"valid",
"given",
"that",
"the",
"root",
"header",
"is",
"valid",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L326-L364 | train | 225,038 |
ethereum/py-evm | eth/chains/base.py | Chain.from_genesis | def from_genesis(cls,
base_db: BaseAtomicDB,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None) -> 'BaseChain':
"""
Initializes the Chain from a genesis state.
"""
genesis_vm_class = cls.get_vm_class_for_block_number(BlockNumber(0))
pre_genesis_header = BlockHeader(difficulty=0, block_number=-1, gas_limit=0)
state = genesis_vm_class.build_state(base_db, pre_genesis_header)
if genesis_state is None:
genesis_state = {}
# mutation
apply_state_dict(state, genesis_state)
state.persist()
if 'state_root' not in genesis_params:
# If the genesis state_root was not specified, use the value
# computed from the initialized state database.
genesis_params = assoc(genesis_params, 'state_root', state.state_root)
elif genesis_params['state_root'] != state.state_root:
# If the genesis state_root was specified, validate that it matches
# the computed state from the initialized state database.
raise ValidationError(
"The provided genesis state root does not match the computed "
"genesis state root. Got {0}. Expected {1}".format(
state.state_root,
genesis_params['state_root'],
)
)
genesis_header = BlockHeader(**genesis_params)
return cls.from_genesis_header(base_db, genesis_header) | python | def from_genesis(cls,
base_db: BaseAtomicDB,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None) -> 'BaseChain':
"""
Initializes the Chain from a genesis state.
"""
genesis_vm_class = cls.get_vm_class_for_block_number(BlockNumber(0))
pre_genesis_header = BlockHeader(difficulty=0, block_number=-1, gas_limit=0)
state = genesis_vm_class.build_state(base_db, pre_genesis_header)
if genesis_state is None:
genesis_state = {}
# mutation
apply_state_dict(state, genesis_state)
state.persist()
if 'state_root' not in genesis_params:
# If the genesis state_root was not specified, use the value
# computed from the initialized state database.
genesis_params = assoc(genesis_params, 'state_root', state.state_root)
elif genesis_params['state_root'] != state.state_root:
# If the genesis state_root was specified, validate that it matches
# the computed state from the initialized state database.
raise ValidationError(
"The provided genesis state root does not match the computed "
"genesis state root. Got {0}. Expected {1}".format(
state.state_root,
genesis_params['state_root'],
)
)
genesis_header = BlockHeader(**genesis_params)
return cls.from_genesis_header(base_db, genesis_header) | [
"def",
"from_genesis",
"(",
"cls",
",",
"base_db",
":",
"BaseAtomicDB",
",",
"genesis_params",
":",
"Dict",
"[",
"str",
",",
"HeaderParams",
"]",
",",
"genesis_state",
":",
"AccountState",
"=",
"None",
")",
"->",
"'BaseChain'",
":",
"genesis_vm_class",
"=",
... | Initializes the Chain from a genesis state. | [
"Initializes",
"the",
"Chain",
"from",
"a",
"genesis",
"state",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L405-L440 | train | 225,039 |
ethereum/py-evm | eth/chains/base.py | Chain.get_vm | def get_vm(self, at_header: BlockHeader=None) -> 'BaseVM':
"""
Returns the VM instance for the given block number.
"""
header = self.ensure_header(at_header)
vm_class = self.get_vm_class_for_block_number(header.block_number)
return vm_class(header=header, chaindb=self.chaindb) | python | def get_vm(self, at_header: BlockHeader=None) -> 'BaseVM':
"""
Returns the VM instance for the given block number.
"""
header = self.ensure_header(at_header)
vm_class = self.get_vm_class_for_block_number(header.block_number)
return vm_class(header=header, chaindb=self.chaindb) | [
"def",
"get_vm",
"(",
"self",
",",
"at_header",
":",
"BlockHeader",
"=",
"None",
")",
"->",
"'BaseVM'",
":",
"header",
"=",
"self",
".",
"ensure_header",
"(",
"at_header",
")",
"vm_class",
"=",
"self",
".",
"get_vm_class_for_block_number",
"(",
"header",
"."... | Returns the VM instance for the given block number. | [
"Returns",
"the",
"VM",
"instance",
"for",
"the",
"given",
"block",
"number",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L456-L462 | train | 225,040 |
ethereum/py-evm | eth/chains/base.py | Chain.create_header_from_parent | def create_header_from_parent(self,
parent_header: BlockHeader,
**header_params: HeaderParams) -> BlockHeader:
"""
Passthrough helper to the VM class of the block descending from the
given header.
"""
return self.get_vm_class_for_block_number(
block_number=parent_header.block_number + 1,
).create_header_from_parent(parent_header, **header_params) | python | def create_header_from_parent(self,
parent_header: BlockHeader,
**header_params: HeaderParams) -> BlockHeader:
"""
Passthrough helper to the VM class of the block descending from the
given header.
"""
return self.get_vm_class_for_block_number(
block_number=parent_header.block_number + 1,
).create_header_from_parent(parent_header, **header_params) | [
"def",
"create_header_from_parent",
"(",
"self",
",",
"parent_header",
":",
"BlockHeader",
",",
"*",
"*",
"header_params",
":",
"HeaderParams",
")",
"->",
"BlockHeader",
":",
"return",
"self",
".",
"get_vm_class_for_block_number",
"(",
"block_number",
"=",
"parent_h... | Passthrough helper to the VM class of the block descending from the
given header. | [
"Passthrough",
"helper",
"to",
"the",
"VM",
"class",
"of",
"the",
"block",
"descending",
"from",
"the",
"given",
"header",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L467-L476 | train | 225,041 |
ethereum/py-evm | eth/chains/base.py | Chain.ensure_header | def ensure_header(self, header: BlockHeader=None) -> BlockHeader:
"""
Return ``header`` if it is not ``None``, otherwise return the header
of the canonical head.
"""
if header is None:
head = self.get_canonical_head()
return self.create_header_from_parent(head)
else:
return header | python | def ensure_header(self, header: BlockHeader=None) -> BlockHeader:
"""
Return ``header`` if it is not ``None``, otherwise return the header
of the canonical head.
"""
if header is None:
head = self.get_canonical_head()
return self.create_header_from_parent(head)
else:
return header | [
"def",
"ensure_header",
"(",
"self",
",",
"header",
":",
"BlockHeader",
"=",
"None",
")",
"->",
"BlockHeader",
":",
"if",
"header",
"is",
"None",
":",
"head",
"=",
"self",
".",
"get_canonical_head",
"(",
")",
"return",
"self",
".",
"create_header_from_parent... | Return ``header`` if it is not ``None``, otherwise return the header
of the canonical head. | [
"Return",
"header",
"if",
"it",
"is",
"not",
"None",
"otherwise",
"return",
"the",
"header",
"of",
"the",
"canonical",
"head",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L503-L512 | train | 225,042 |
ethereum/py-evm | eth/chains/base.py | Chain.get_ancestors | def get_ancestors(self, limit: int, header: BlockHeader) -> Tuple[BaseBlock, ...]:
"""
Return `limit` number of ancestor blocks from the current canonical head.
"""
ancestor_count = min(header.block_number, limit)
# We construct a temporary block object
vm_class = self.get_vm_class_for_block_number(header.block_number)
block_class = vm_class.get_block_class()
block = block_class(header=header, uncles=[])
ancestor_generator = iterate(compose(
self.get_block_by_hash,
operator.attrgetter('parent_hash'),
operator.attrgetter('header'),
), block)
# we peel off the first element from the iterator which will be the
# temporary block object we constructed.
next(ancestor_generator)
return tuple(take(ancestor_count, ancestor_generator)) | python | def get_ancestors(self, limit: int, header: BlockHeader) -> Tuple[BaseBlock, ...]:
"""
Return `limit` number of ancestor blocks from the current canonical head.
"""
ancestor_count = min(header.block_number, limit)
# We construct a temporary block object
vm_class = self.get_vm_class_for_block_number(header.block_number)
block_class = vm_class.get_block_class()
block = block_class(header=header, uncles=[])
ancestor_generator = iterate(compose(
self.get_block_by_hash,
operator.attrgetter('parent_hash'),
operator.attrgetter('header'),
), block)
# we peel off the first element from the iterator which will be the
# temporary block object we constructed.
next(ancestor_generator)
return tuple(take(ancestor_count, ancestor_generator)) | [
"def",
"get_ancestors",
"(",
"self",
",",
"limit",
":",
"int",
",",
"header",
":",
"BlockHeader",
")",
"->",
"Tuple",
"[",
"BaseBlock",
",",
"...",
"]",
":",
"ancestor_count",
"=",
"min",
"(",
"header",
".",
"block_number",
",",
"limit",
")",
"# We const... | Return `limit` number of ancestor blocks from the current canonical head. | [
"Return",
"limit",
"number",
"of",
"ancestor",
"blocks",
"from",
"the",
"current",
"canonical",
"head",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L517-L537 | train | 225,043 |
ethereum/py-evm | eth/chains/base.py | Chain.get_block_by_hash | def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
"""
Returns the requested block as specified by block hash.
"""
validate_word(block_hash, title="Block Hash")
block_header = self.get_block_header_by_hash(block_hash)
return self.get_block_by_header(block_header) | python | def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
"""
Returns the requested block as specified by block hash.
"""
validate_word(block_hash, title="Block Hash")
block_header = self.get_block_header_by_hash(block_hash)
return self.get_block_by_header(block_header) | [
"def",
"get_block_by_hash",
"(",
"self",
",",
"block_hash",
":",
"Hash32",
")",
"->",
"BaseBlock",
":",
"validate_word",
"(",
"block_hash",
",",
"title",
"=",
"\"Block Hash\"",
")",
"block_header",
"=",
"self",
".",
"get_block_header_by_hash",
"(",
"block_hash",
... | Returns the requested block as specified by block hash. | [
"Returns",
"the",
"requested",
"block",
"as",
"specified",
"by",
"block",
"hash",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L545-L551 | train | 225,044 |
ethereum/py-evm | eth/chains/base.py | Chain.get_block_by_header | def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
"""
Returns the requested block as specified by the block header.
"""
vm = self.get_vm(block_header)
return vm.block | python | def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
"""
Returns the requested block as specified by the block header.
"""
vm = self.get_vm(block_header)
return vm.block | [
"def",
"get_block_by_header",
"(",
"self",
",",
"block_header",
":",
"BlockHeader",
")",
"->",
"BaseBlock",
":",
"vm",
"=",
"self",
".",
"get_vm",
"(",
"block_header",
")",
"return",
"vm",
".",
"block"
] | Returns the requested block as specified by the block header. | [
"Returns",
"the",
"requested",
"block",
"as",
"specified",
"by",
"the",
"block",
"header",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L553-L558 | train | 225,045 |
ethereum/py-evm | eth/chains/base.py | Chain.get_canonical_block_by_number | def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
"""
Returns the block with the given number in the canonical chain.
Raises BlockNotFound if there's no block with the given number in the
canonical chain.
"""
validate_uint256(block_number, title="Block Number")
return self.get_block_by_hash(self.chaindb.get_canonical_block_hash(block_number)) | python | def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
"""
Returns the block with the given number in the canonical chain.
Raises BlockNotFound if there's no block with the given number in the
canonical chain.
"""
validate_uint256(block_number, title="Block Number")
return self.get_block_by_hash(self.chaindb.get_canonical_block_hash(block_number)) | [
"def",
"get_canonical_block_by_number",
"(",
"self",
",",
"block_number",
":",
"BlockNumber",
")",
"->",
"BaseBlock",
":",
"validate_uint256",
"(",
"block_number",
",",
"title",
"=",
"\"Block Number\"",
")",
"return",
"self",
".",
"get_block_by_hash",
"(",
"self",
... | Returns the block with the given number in the canonical chain.
Raises BlockNotFound if there's no block with the given number in the
canonical chain. | [
"Returns",
"the",
"block",
"with",
"the",
"given",
"number",
"in",
"the",
"canonical",
"chain",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L560-L568 | train | 225,046 |
ethereum/py-evm | eth/chains/base.py | Chain.get_canonical_transaction | def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
"""
Returns the requested transaction as specified by the transaction hash
from the canonical chain.
Raises TransactionNotFound if no transaction with the specified hash is
found in the main chain.
"""
(block_num, index) = self.chaindb.get_transaction_index(transaction_hash)
VM_class = self.get_vm_class_for_block_number(block_num)
transaction = self.chaindb.get_transaction_by_index(
block_num,
index,
VM_class.get_transaction_class(),
)
if transaction.hash == transaction_hash:
return transaction
else:
raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format(
encode_hex(transaction.hash),
encode_hex(transaction_hash),
block_num,
index,
)) | python | def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
"""
Returns the requested transaction as specified by the transaction hash
from the canonical chain.
Raises TransactionNotFound if no transaction with the specified hash is
found in the main chain.
"""
(block_num, index) = self.chaindb.get_transaction_index(transaction_hash)
VM_class = self.get_vm_class_for_block_number(block_num)
transaction = self.chaindb.get_transaction_by_index(
block_num,
index,
VM_class.get_transaction_class(),
)
if transaction.hash == transaction_hash:
return transaction
else:
raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format(
encode_hex(transaction.hash),
encode_hex(transaction_hash),
block_num,
index,
)) | [
"def",
"get_canonical_transaction",
"(",
"self",
",",
"transaction_hash",
":",
"Hash32",
")",
"->",
"BaseTransaction",
":",
"(",
"block_num",
",",
"index",
")",
"=",
"self",
".",
"chaindb",
".",
"get_transaction_index",
"(",
"transaction_hash",
")",
"VM_class",
... | Returns the requested transaction as specified by the transaction hash
from the canonical chain.
Raises TransactionNotFound if no transaction with the specified hash is
found in the main chain. | [
"Returns",
"the",
"requested",
"transaction",
"as",
"specified",
"by",
"the",
"transaction",
"hash",
"from",
"the",
"canonical",
"chain",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L604-L629 | train | 225,047 |
ethereum/py-evm | eth/chains/base.py | Chain.estimate_gas | def estimate_gas(
self,
transaction: BaseOrSpoofTransaction,
at_header: BlockHeader=None) -> int:
"""
Returns an estimation of the amount of gas the given transaction will
use if executed on top of the block specified by the given header.
"""
if at_header is None:
at_header = self.get_canonical_head()
with self.get_vm(at_header).state_in_temp_block() as state:
return self.gas_estimator(state, transaction) | python | def estimate_gas(
self,
transaction: BaseOrSpoofTransaction,
at_header: BlockHeader=None) -> int:
"""
Returns an estimation of the amount of gas the given transaction will
use if executed on top of the block specified by the given header.
"""
if at_header is None:
at_header = self.get_canonical_head()
with self.get_vm(at_header).state_in_temp_block() as state:
return self.gas_estimator(state, transaction) | [
"def",
"estimate_gas",
"(",
"self",
",",
"transaction",
":",
"BaseOrSpoofTransaction",
",",
"at_header",
":",
"BlockHeader",
"=",
"None",
")",
"->",
"int",
":",
"if",
"at_header",
"is",
"None",
":",
"at_header",
"=",
"self",
".",
"get_canonical_head",
"(",
"... | Returns an estimation of the amount of gas the given transaction will
use if executed on top of the block specified by the given header. | [
"Returns",
"an",
"estimation",
"of",
"the",
"amount",
"of",
"gas",
"the",
"given",
"transaction",
"will",
"use",
"if",
"executed",
"on",
"top",
"of",
"the",
"block",
"specified",
"by",
"the",
"given",
"header",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L685-L696 | train | 225,048 |
ethereum/py-evm | eth/chains/base.py | Chain.import_block | def import_block(self,
block: BaseBlock,
perform_validation: bool=True
) -> Tuple[BaseBlock, Tuple[BaseBlock, ...], Tuple[BaseBlock, ...]]:
"""
Imports a complete block and returns a 3-tuple
- the imported block
- a tuple of blocks which are now part of the canonical chain.
- a tuple of blocks which were canonical and now are no longer canonical.
"""
try:
parent_header = self.get_block_header_by_hash(block.header.parent_hash)
except HeaderNotFound:
raise ValidationError(
"Attempt to import block #{}. Cannot import block {} before importing "
"its parent block at {}".format(
block.number,
block.hash,
block.header.parent_hash,
)
)
base_header_for_import = self.create_header_from_parent(parent_header)
imported_block = self.get_vm(base_header_for_import).import_block(block)
# Validate the imported block.
if perform_validation:
validate_imported_block_unchanged(imported_block, block)
self.validate_block(imported_block)
(
new_canonical_hashes,
old_canonical_hashes,
) = self.chaindb.persist_block(imported_block)
self.logger.debug(
'IMPORTED_BLOCK: number %s | hash %s',
imported_block.number,
encode_hex(imported_block.hash),
)
new_canonical_blocks = tuple(
self.get_block_by_hash(header_hash)
for header_hash
in new_canonical_hashes
)
old_canonical_blocks = tuple(
self.get_block_by_hash(header_hash)
for header_hash
in old_canonical_hashes
)
return imported_block, new_canonical_blocks, old_canonical_blocks | python | def import_block(self,
block: BaseBlock,
perform_validation: bool=True
) -> Tuple[BaseBlock, Tuple[BaseBlock, ...], Tuple[BaseBlock, ...]]:
"""
Imports a complete block and returns a 3-tuple
- the imported block
- a tuple of blocks which are now part of the canonical chain.
- a tuple of blocks which were canonical and now are no longer canonical.
"""
try:
parent_header = self.get_block_header_by_hash(block.header.parent_hash)
except HeaderNotFound:
raise ValidationError(
"Attempt to import block #{}. Cannot import block {} before importing "
"its parent block at {}".format(
block.number,
block.hash,
block.header.parent_hash,
)
)
base_header_for_import = self.create_header_from_parent(parent_header)
imported_block = self.get_vm(base_header_for_import).import_block(block)
# Validate the imported block.
if perform_validation:
validate_imported_block_unchanged(imported_block, block)
self.validate_block(imported_block)
(
new_canonical_hashes,
old_canonical_hashes,
) = self.chaindb.persist_block(imported_block)
self.logger.debug(
'IMPORTED_BLOCK: number %s | hash %s',
imported_block.number,
encode_hex(imported_block.hash),
)
new_canonical_blocks = tuple(
self.get_block_by_hash(header_hash)
for header_hash
in new_canonical_hashes
)
old_canonical_blocks = tuple(
self.get_block_by_hash(header_hash)
for header_hash
in old_canonical_hashes
)
return imported_block, new_canonical_blocks, old_canonical_blocks | [
"def",
"import_block",
"(",
"self",
",",
"block",
":",
"BaseBlock",
",",
"perform_validation",
":",
"bool",
"=",
"True",
")",
"->",
"Tuple",
"[",
"BaseBlock",
",",
"Tuple",
"[",
"BaseBlock",
",",
"...",
"]",
",",
"Tuple",
"[",
"BaseBlock",
",",
"...",
... | Imports a complete block and returns a 3-tuple
- the imported block
- a tuple of blocks which are now part of the canonical chain.
- a tuple of blocks which were canonical and now are no longer canonical. | [
"Imports",
"a",
"complete",
"block",
"and",
"returns",
"a",
"3",
"-",
"tuple"
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L698-L752 | train | 225,049 |
ethereum/py-evm | eth/chains/base.py | Chain.validate_block | def validate_block(self, block: BaseBlock) -> None:
"""
Performs validation on a block that is either being mined or imported.
Since block validation (specifically the uncle validation) must have
access to the ancestor blocks, this validation must occur at the Chain
level.
Cannot be used to validate genesis block.
"""
if block.is_genesis:
raise ValidationError("Cannot validate genesis block this way")
VM_class = self.get_vm_class_for_block_number(BlockNumber(block.number))
parent_block = self.get_block_by_hash(block.header.parent_hash)
VM_class.validate_header(block.header, parent_block.header, check_seal=True)
self.validate_uncles(block)
self.validate_gaslimit(block.header) | python | def validate_block(self, block: BaseBlock) -> None:
"""
Performs validation on a block that is either being mined or imported.
Since block validation (specifically the uncle validation) must have
access to the ancestor blocks, this validation must occur at the Chain
level.
Cannot be used to validate genesis block.
"""
if block.is_genesis:
raise ValidationError("Cannot validate genesis block this way")
VM_class = self.get_vm_class_for_block_number(BlockNumber(block.number))
parent_block = self.get_block_by_hash(block.header.parent_hash)
VM_class.validate_header(block.header, parent_block.header, check_seal=True)
self.validate_uncles(block)
self.validate_gaslimit(block.header) | [
"def",
"validate_block",
"(",
"self",
",",
"block",
":",
"BaseBlock",
")",
"->",
"None",
":",
"if",
"block",
".",
"is_genesis",
":",
"raise",
"ValidationError",
"(",
"\"Cannot validate genesis block this way\"",
")",
"VM_class",
"=",
"self",
".",
"get_vm_class_for... | Performs validation on a block that is either being mined or imported.
Since block validation (specifically the uncle validation) must have
access to the ancestor blocks, this validation must occur at the Chain
level.
Cannot be used to validate genesis block. | [
"Performs",
"validation",
"on",
"a",
"block",
"that",
"is",
"either",
"being",
"mined",
"or",
"imported",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L761-L777 | train | 225,050 |
ethereum/py-evm | eth/chains/base.py | Chain.validate_gaslimit | def validate_gaslimit(self, header: BlockHeader) -> None:
"""
Validate the gas limit on the given header.
"""
parent_header = self.get_block_header_by_hash(header.parent_hash)
low_bound, high_bound = compute_gas_limit_bounds(parent_header)
if header.gas_limit < low_bound:
raise ValidationError(
"The gas limit on block {0} is too low: {1}. It must be at least {2}".format(
encode_hex(header.hash), header.gas_limit, low_bound))
elif header.gas_limit > high_bound:
raise ValidationError(
"The gas limit on block {0} is too high: {1}. It must be at most {2}".format(
encode_hex(header.hash), header.gas_limit, high_bound)) | python | def validate_gaslimit(self, header: BlockHeader) -> None:
"""
Validate the gas limit on the given header.
"""
parent_header = self.get_block_header_by_hash(header.parent_hash)
low_bound, high_bound = compute_gas_limit_bounds(parent_header)
if header.gas_limit < low_bound:
raise ValidationError(
"The gas limit on block {0} is too low: {1}. It must be at least {2}".format(
encode_hex(header.hash), header.gas_limit, low_bound))
elif header.gas_limit > high_bound:
raise ValidationError(
"The gas limit on block {0} is too high: {1}. It must be at most {2}".format(
encode_hex(header.hash), header.gas_limit, high_bound)) | [
"def",
"validate_gaslimit",
"(",
"self",
",",
"header",
":",
"BlockHeader",
")",
"->",
"None",
":",
"parent_header",
"=",
"self",
".",
"get_block_header_by_hash",
"(",
"header",
".",
"parent_hash",
")",
"low_bound",
",",
"high_bound",
"=",
"compute_gas_limit_bound... | Validate the gas limit on the given header. | [
"Validate",
"the",
"gas",
"limit",
"on",
"the",
"given",
"header",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L786-L799 | train | 225,051 |
ethereum/py-evm | eth/chains/base.py | Chain.validate_uncles | def validate_uncles(self, block: BaseBlock) -> None:
"""
Validate the uncles for the given block.
"""
has_uncles = len(block.uncles) > 0
should_have_uncles = block.header.uncles_hash != EMPTY_UNCLE_HASH
if not has_uncles and not should_have_uncles:
# optimization to avoid loading ancestors from DB, since the block has no uncles
return
elif has_uncles and not should_have_uncles:
raise ValidationError("Block has uncles but header suggests uncles should be empty")
elif should_have_uncles and not has_uncles:
raise ValidationError("Header suggests block should have uncles but block has none")
# Check for duplicates
uncle_groups = groupby(operator.attrgetter('hash'), block.uncles)
duplicate_uncles = tuple(sorted(
hash for hash, twins in uncle_groups.items() if len(twins) > 1
))
if duplicate_uncles:
raise ValidationError(
"Block contains duplicate uncles:\n"
" - {0}".format(' - '.join(duplicate_uncles))
)
recent_ancestors = tuple(
ancestor
for ancestor
in self.get_ancestors(MAX_UNCLE_DEPTH + 1, header=block.header)
)
recent_ancestor_hashes = {ancestor.hash for ancestor in recent_ancestors}
recent_uncle_hashes = _extract_uncle_hashes(recent_ancestors)
for uncle in block.uncles:
if uncle.hash == block.hash:
raise ValidationError("Uncle has same hash as block")
# ensure the uncle has not already been included.
if uncle.hash in recent_uncle_hashes:
raise ValidationError(
"Duplicate uncle: {0}".format(encode_hex(uncle.hash))
)
# ensure that the uncle is not one of the canonical chain blocks.
if uncle.hash in recent_ancestor_hashes:
raise ValidationError(
"Uncle {0} cannot be an ancestor of {1}".format(
encode_hex(uncle.hash), encode_hex(block.hash)))
# ensure that the uncle was built off of one of the canonical chain
# blocks.
if uncle.parent_hash not in recent_ancestor_hashes or (
uncle.parent_hash == block.header.parent_hash):
raise ValidationError(
"Uncle's parent {0} is not an ancestor of {1}".format(
encode_hex(uncle.parent_hash), encode_hex(block.hash)))
# Now perform VM level validation of the uncle
self.validate_seal(uncle)
try:
uncle_parent = self.get_block_header_by_hash(uncle.parent_hash)
except HeaderNotFound:
raise ValidationError(
"Uncle ancestor not found: {0}".format(uncle.parent_hash)
)
uncle_vm_class = self.get_vm_class_for_block_number(uncle.block_number)
uncle_vm_class.validate_uncle(block, uncle, uncle_parent) | python | def validate_uncles(self, block: BaseBlock) -> None:
"""
Validate the uncles for the given block.
"""
has_uncles = len(block.uncles) > 0
should_have_uncles = block.header.uncles_hash != EMPTY_UNCLE_HASH
if not has_uncles and not should_have_uncles:
# optimization to avoid loading ancestors from DB, since the block has no uncles
return
elif has_uncles and not should_have_uncles:
raise ValidationError("Block has uncles but header suggests uncles should be empty")
elif should_have_uncles and not has_uncles:
raise ValidationError("Header suggests block should have uncles but block has none")
# Check for duplicates
uncle_groups = groupby(operator.attrgetter('hash'), block.uncles)
duplicate_uncles = tuple(sorted(
hash for hash, twins in uncle_groups.items() if len(twins) > 1
))
if duplicate_uncles:
raise ValidationError(
"Block contains duplicate uncles:\n"
" - {0}".format(' - '.join(duplicate_uncles))
)
recent_ancestors = tuple(
ancestor
for ancestor
in self.get_ancestors(MAX_UNCLE_DEPTH + 1, header=block.header)
)
recent_ancestor_hashes = {ancestor.hash for ancestor in recent_ancestors}
recent_uncle_hashes = _extract_uncle_hashes(recent_ancestors)
for uncle in block.uncles:
if uncle.hash == block.hash:
raise ValidationError("Uncle has same hash as block")
# ensure the uncle has not already been included.
if uncle.hash in recent_uncle_hashes:
raise ValidationError(
"Duplicate uncle: {0}".format(encode_hex(uncle.hash))
)
# ensure that the uncle is not one of the canonical chain blocks.
if uncle.hash in recent_ancestor_hashes:
raise ValidationError(
"Uncle {0} cannot be an ancestor of {1}".format(
encode_hex(uncle.hash), encode_hex(block.hash)))
# ensure that the uncle was built off of one of the canonical chain
# blocks.
if uncle.parent_hash not in recent_ancestor_hashes or (
uncle.parent_hash == block.header.parent_hash):
raise ValidationError(
"Uncle's parent {0} is not an ancestor of {1}".format(
encode_hex(uncle.parent_hash), encode_hex(block.hash)))
# Now perform VM level validation of the uncle
self.validate_seal(uncle)
try:
uncle_parent = self.get_block_header_by_hash(uncle.parent_hash)
except HeaderNotFound:
raise ValidationError(
"Uncle ancestor not found: {0}".format(uncle.parent_hash)
)
uncle_vm_class = self.get_vm_class_for_block_number(uncle.block_number)
uncle_vm_class.validate_uncle(block, uncle, uncle_parent) | [
"def",
"validate_uncles",
"(",
"self",
",",
"block",
":",
"BaseBlock",
")",
"->",
"None",
":",
"has_uncles",
"=",
"len",
"(",
"block",
".",
"uncles",
")",
">",
"0",
"should_have_uncles",
"=",
"block",
".",
"header",
".",
"uncles_hash",
"!=",
"EMPTY_UNCLE_H... | Validate the uncles for the given block. | [
"Validate",
"the",
"uncles",
"for",
"the",
"given",
"block",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L801-L870 | train | 225,052 |
ethereum/py-evm | eth/chains/base.py | MiningChain.apply_transaction | def apply_transaction(self,
transaction: BaseTransaction
) -> Tuple[BaseBlock, Receipt, BaseComputation]:
"""
Applies the transaction to the current tip block.
WARNING: Receipt and Transaction trie generation is computationally
heavy and incurs significant performance overhead.
"""
vm = self.get_vm(self.header)
base_block = vm.block
receipt, computation = vm.apply_transaction(base_block.header, transaction)
header_with_receipt = vm.add_receipt_to_header(base_block.header, receipt)
# since we are building the block locally, we have to persist all the incremental state
vm.state.persist()
new_header = header_with_receipt.copy(state_root=vm.state.state_root)
transactions = base_block.transactions + (transaction, )
receipts = base_block.get_receipts(self.chaindb) + (receipt, )
new_block = vm.set_block_transactions(base_block, new_header, transactions, receipts)
self.header = new_block.header
return new_block, receipt, computation | python | def apply_transaction(self,
transaction: BaseTransaction
) -> Tuple[BaseBlock, Receipt, BaseComputation]:
"""
Applies the transaction to the current tip block.
WARNING: Receipt and Transaction trie generation is computationally
heavy and incurs significant performance overhead.
"""
vm = self.get_vm(self.header)
base_block = vm.block
receipt, computation = vm.apply_transaction(base_block.header, transaction)
header_with_receipt = vm.add_receipt_to_header(base_block.header, receipt)
# since we are building the block locally, we have to persist all the incremental state
vm.state.persist()
new_header = header_with_receipt.copy(state_root=vm.state.state_root)
transactions = base_block.transactions + (transaction, )
receipts = base_block.get_receipts(self.chaindb) + (receipt, )
new_block = vm.set_block_transactions(base_block, new_header, transactions, receipts)
self.header = new_block.header
return new_block, receipt, computation | [
"def",
"apply_transaction",
"(",
"self",
",",
"transaction",
":",
"BaseTransaction",
")",
"->",
"Tuple",
"[",
"BaseBlock",
",",
"Receipt",
",",
"BaseComputation",
"]",
":",
"vm",
"=",
"self",
".",
"get_vm",
"(",
"self",
".",
"header",
")",
"base_block",
"=... | Applies the transaction to the current tip block.
WARNING: Receipt and Transaction trie generation is computationally
heavy and incurs significant performance overhead. | [
"Applies",
"the",
"transaction",
"to",
"the",
"current",
"tip",
"block",
"."
] | 58346848f076116381d3274bbcea96b9e2cfcbdf | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L887-L913 | train | 225,053 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | wait_for_host | def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None,
ssl_pymongo_options=None):
"""
Ping server and wait for response.
Ping a mongod or mongos every `interval` seconds until it responds, or
`timeout` seconds have passed. If `to_start` is set to False, will wait for
the node to shut down instead. This function can be called as a separate
thread.
If queue is provided, it will place the results in the message queue and
return, otherwise it will just return the result directly.
"""
host = 'localhost:%i' % port
start_time = time.time()
while True:
if (time.time() - start_time) > timeout:
if queue:
queue.put_nowait((port, False))
return False
try:
# make connection and ping host
con = MongoConnection(host, **(ssl_pymongo_options or {}))
con.admin.command('ping')
if to_start:
if queue:
queue.put_nowait((port, True))
return True
else:
time.sleep(interval)
except Exception:
if to_start:
time.sleep(interval)
else:
if queue:
queue.put_nowait((port, True))
return True | python | def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None,
ssl_pymongo_options=None):
"""
Ping server and wait for response.
Ping a mongod or mongos every `interval` seconds until it responds, or
`timeout` seconds have passed. If `to_start` is set to False, will wait for
the node to shut down instead. This function can be called as a separate
thread.
If queue is provided, it will place the results in the message queue and
return, otherwise it will just return the result directly.
"""
host = 'localhost:%i' % port
start_time = time.time()
while True:
if (time.time() - start_time) > timeout:
if queue:
queue.put_nowait((port, False))
return False
try:
# make connection and ping host
con = MongoConnection(host, **(ssl_pymongo_options or {}))
con.admin.command('ping')
if to_start:
if queue:
queue.put_nowait((port, True))
return True
else:
time.sleep(interval)
except Exception:
if to_start:
time.sleep(interval)
else:
if queue:
queue.put_nowait((port, True))
return True | [
"def",
"wait_for_host",
"(",
"port",
",",
"interval",
"=",
"1",
",",
"timeout",
"=",
"30",
",",
"to_start",
"=",
"True",
",",
"queue",
"=",
"None",
",",
"ssl_pymongo_options",
"=",
"None",
")",
":",
"host",
"=",
"'localhost:%i'",
"%",
"port",
"start_time... | Ping server and wait for response.
Ping a mongod or mongos every `interval` seconds until it responds, or
`timeout` seconds have passed. If `to_start` is set to False, will wait for
the node to shut down instead. This function can be called as a separate
thread.
If queue is provided, it will place the results in the message queue and
return, otherwise it will just return the result directly. | [
"Ping",
"server",
"and",
"wait",
"for",
"response",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L73-L110 | train | 225,054 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | shutdown_host | def shutdown_host(port, username=None, password=None, authdb=None):
"""
Send the shutdown command to a mongod or mongos on given port.
This function can be called as a separate thread.
"""
host = 'localhost:%i' % port
try:
mc = MongoConnection(host)
try:
if username and password and authdb:
if authdb != "admin":
raise RuntimeError("given username/password is not for "
"admin database")
else:
try:
mc.admin.authenticate(name=username, password=password)
except OperationFailure:
# perhaps auth is not required
pass
mc.admin.command('shutdown', force=True)
except AutoReconnect:
pass
except OperationFailure:
print("Error: cannot authenticate to shut down %s." % host)
return
except ConnectionFailure:
pass
else:
mc.close() | python | def shutdown_host(port, username=None, password=None, authdb=None):
"""
Send the shutdown command to a mongod or mongos on given port.
This function can be called as a separate thread.
"""
host = 'localhost:%i' % port
try:
mc = MongoConnection(host)
try:
if username and password and authdb:
if authdb != "admin":
raise RuntimeError("given username/password is not for "
"admin database")
else:
try:
mc.admin.authenticate(name=username, password=password)
except OperationFailure:
# perhaps auth is not required
pass
mc.admin.command('shutdown', force=True)
except AutoReconnect:
pass
except OperationFailure:
print("Error: cannot authenticate to shut down %s." % host)
return
except ConnectionFailure:
pass
else:
mc.close() | [
"def",
"shutdown_host",
"(",
"port",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"authdb",
"=",
"None",
")",
":",
"host",
"=",
"'localhost:%i'",
"%",
"port",
"try",
":",
"mc",
"=",
"MongoConnection",
"(",
"host",
")",
"try",
":",
... | Send the shutdown command to a mongod or mongos on given port.
This function can be called as a separate thread. | [
"Send",
"the",
"shutdown",
"command",
"to",
"a",
"mongod",
"or",
"mongos",
"on",
"given",
"port",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L113-L144 | train | 225,055 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool.start | def start(self):
"""Sub-command start."""
self.discover()
# startup_info only gets loaded from protocol version 2 on,
# check if it's loaded
if not self.startup_info:
# hack to make environment startable with older protocol
# versions < 2: try to start nodes via init if all nodes are down
if len(self.get_tagged(['down'])) == len(self.get_tagged(['all'])):
self.args = self.loaded_args
print("upgrading mlaunch environment meta-data.")
return self.init()
else:
raise SystemExit("These nodes were created with an older "
"version of mlaunch (v1.1.1 or below). To "
"upgrade this environment and make use of "
"the start/stop/list commands, stop all "
"nodes manually, then run 'mlaunch start' "
"again. You only have to do this once.")
# if new unknown_args are present, compare them with loaded ones
# (here we can be certain of protocol v2+)
if (self.args['binarypath'] is not None or
(self.unknown_args and
set(self.unknown_args) != set(self.loaded_unknown_args))):
# store current args, use self.args from file (self.loaded_args)
start_args = self.args
self.args = self.loaded_args
self.args['binarypath'] = start_args['binarypath']
# construct new startup strings with updated unknown args.
# They are for this start only and will not be persisted in
# the .mlaunch_startup file
self._construct_cmdlines()
# reset to original args for this start command
self.args = start_args
matches = self._get_ports_from_args(self.args, 'down')
if len(matches) == 0:
raise SystemExit('no nodes started.')
# start config servers first
config_matches = self.get_tagged(['config']).intersection(matches)
self._start_on_ports(config_matches, wait=True)
# start shards next
mongod_matches = (self.get_tagged(['mongod']) -
self.get_tagged(['config']))
mongod_matches = mongod_matches.intersection(matches)
self._start_on_ports(mongod_matches, wait=True)
# now start mongos
mongos_matches = self.get_tagged(['mongos']).intersection(matches)
self._start_on_ports(mongos_matches)
# wait for all matched nodes to be running
self.wait_for(matches)
# refresh discover
self.discover() | python | def start(self):
"""Sub-command start."""
self.discover()
# startup_info only gets loaded from protocol version 2 on,
# check if it's loaded
if not self.startup_info:
# hack to make environment startable with older protocol
# versions < 2: try to start nodes via init if all nodes are down
if len(self.get_tagged(['down'])) == len(self.get_tagged(['all'])):
self.args = self.loaded_args
print("upgrading mlaunch environment meta-data.")
return self.init()
else:
raise SystemExit("These nodes were created with an older "
"version of mlaunch (v1.1.1 or below). To "
"upgrade this environment and make use of "
"the start/stop/list commands, stop all "
"nodes manually, then run 'mlaunch start' "
"again. You only have to do this once.")
# if new unknown_args are present, compare them with loaded ones
# (here we can be certain of protocol v2+)
if (self.args['binarypath'] is not None or
(self.unknown_args and
set(self.unknown_args) != set(self.loaded_unknown_args))):
# store current args, use self.args from file (self.loaded_args)
start_args = self.args
self.args = self.loaded_args
self.args['binarypath'] = start_args['binarypath']
# construct new startup strings with updated unknown args.
# They are for this start only and will not be persisted in
# the .mlaunch_startup file
self._construct_cmdlines()
# reset to original args for this start command
self.args = start_args
matches = self._get_ports_from_args(self.args, 'down')
if len(matches) == 0:
raise SystemExit('no nodes started.')
# start config servers first
config_matches = self.get_tagged(['config']).intersection(matches)
self._start_on_ports(config_matches, wait=True)
# start shards next
mongod_matches = (self.get_tagged(['mongod']) -
self.get_tagged(['config']))
mongod_matches = mongod_matches.intersection(matches)
self._start_on_ports(mongod_matches, wait=True)
# now start mongos
mongos_matches = self.get_tagged(['mongos']).intersection(matches)
self._start_on_ports(mongos_matches)
# wait for all matched nodes to be running
self.wait_for(matches)
# refresh discover
self.discover() | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"discover",
"(",
")",
"# startup_info only gets loaded from protocol version 2 on,",
"# check if it's loaded",
"if",
"not",
"self",
".",
"startup_info",
":",
"# hack to make environment startable with older protocol",
"# ver... | Sub-command start. | [
"Sub",
"-",
"command",
"start",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L861-L923 | train | 225,056 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool.is_running | def is_running(self, port):
"""Return True if a host on a specific port is running."""
try:
con = self.client('localhost:%s' % port)
con.admin.command('ping')
return True
except (AutoReconnect, ConnectionFailure, OperationFailure):
# Catch OperationFailure to work around SERVER-31916.
return False | python | def is_running(self, port):
"""Return True if a host on a specific port is running."""
try:
con = self.client('localhost:%s' % port)
con.admin.command('ping')
return True
except (AutoReconnect, ConnectionFailure, OperationFailure):
# Catch OperationFailure to work around SERVER-31916.
return False | [
"def",
"is_running",
"(",
"self",
",",
"port",
")",
":",
"try",
":",
"con",
"=",
"self",
".",
"client",
"(",
"'localhost:%s'",
"%",
"port",
")",
"con",
".",
"admin",
".",
"command",
"(",
"'ping'",
")",
"return",
"True",
"except",
"(",
"AutoReconnect",
... | Return True if a host on a specific port is running. | [
"Return",
"True",
"if",
"a",
"host",
"on",
"a",
"specific",
"port",
"is",
"running",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1290-L1298 | train | 225,057 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool.get_tagged | def get_tagged(self, tags):
"""
Tag format.
The format for the tags list is tuples for tags: mongos, config, shard,
secondary tags of the form (tag, number), e.g. ('mongos', 2) which
references the second mongos in the list. For all other tags, it is
simply the string, e.g. 'primary'.
"""
# if tags is a simple string, make it a list (note: tuples like
# ('mongos', 2) must be in a surrounding list)
if not hasattr(tags, '__iter__') and type(tags) == str:
tags = [tags]
nodes = set(self.cluster_tags['all'])
for tag in tags:
if re.match(r"\w+ \d{1,2}", tag):
# special case for tuple tags: mongos, config, shard,
# secondary. These can contain a number
tag, number = tag.split()
try:
branch = self.cluster_tree[tag][int(number) - 1]
except (IndexError, KeyError):
continue
if hasattr(branch, '__iter__'):
subset = set(branch)
else:
subset = set([branch])
else:
# otherwise use tags dict to get the subset
subset = set(self.cluster_tags[tag])
nodes = nodes.intersection(subset)
return nodes | python | def get_tagged(self, tags):
"""
Tag format.
The format for the tags list is tuples for tags: mongos, config, shard,
secondary tags of the form (tag, number), e.g. ('mongos', 2) which
references the second mongos in the list. For all other tags, it is
simply the string, e.g. 'primary'.
"""
# if tags is a simple string, make it a list (note: tuples like
# ('mongos', 2) must be in a surrounding list)
if not hasattr(tags, '__iter__') and type(tags) == str:
tags = [tags]
nodes = set(self.cluster_tags['all'])
for tag in tags:
if re.match(r"\w+ \d{1,2}", tag):
# special case for tuple tags: mongos, config, shard,
# secondary. These can contain a number
tag, number = tag.split()
try:
branch = self.cluster_tree[tag][int(number) - 1]
except (IndexError, KeyError):
continue
if hasattr(branch, '__iter__'):
subset = set(branch)
else:
subset = set([branch])
else:
# otherwise use tags dict to get the subset
subset = set(self.cluster_tags[tag])
nodes = nodes.intersection(subset)
return nodes | [
"def",
"get_tagged",
"(",
"self",
",",
"tags",
")",
":",
"# if tags is a simple string, make it a list (note: tuples like",
"# ('mongos', 2) must be in a surrounding list)",
"if",
"not",
"hasattr",
"(",
"tags",
",",
"'__iter__'",
")",
"and",
"type",
"(",
"tags",
")",
"=... | Tag format.
The format for the tags list is tuples for tags: mongos, config, shard,
secondary tags of the form (tag, number), e.g. ('mongos', 2) which
references the second mongos in the list. For all other tags, it is
simply the string, e.g. 'primary'. | [
"Tag",
"format",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1300-L1337 | train | 225,058 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool.get_tags_of_port | def get_tags_of_port(self, port):
"""
Get all tags related to a given port.
This is the inverse of what is stored in self.cluster_tags).
"""
return(sorted([tag for tag in self.cluster_tags
if port in self.cluster_tags[tag]])) | python | def get_tags_of_port(self, port):
"""
Get all tags related to a given port.
This is the inverse of what is stored in self.cluster_tags).
"""
return(sorted([tag for tag in self.cluster_tags
if port in self.cluster_tags[tag]])) | [
"def",
"get_tags_of_port",
"(",
"self",
",",
"port",
")",
":",
"return",
"(",
"sorted",
"(",
"[",
"tag",
"for",
"tag",
"in",
"self",
".",
"cluster_tags",
"if",
"port",
"in",
"self",
".",
"cluster_tags",
"[",
"tag",
"]",
"]",
")",
")"
] | Get all tags related to a given port.
This is the inverse of what is stored in self.cluster_tags). | [
"Get",
"all",
"tags",
"related",
"to",
"a",
"given",
"port",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1339-L1346 | train | 225,059 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool.wait_for | def wait_for(self, ports, interval=1.0, timeout=30, to_start=True):
"""
Spawn threads to ping host using a list of ports.
Returns when all hosts are running (if to_start=True) / shut down (if
to_start=False).
"""
threads = []
queue = Queue.Queue()
for port in ports:
threads.append(threading.Thread(target=wait_for_host, args=(
port, interval, timeout, to_start, queue,
self.ssl_pymongo_options)))
if self.args and 'verbose' in self.args and self.args['verbose']:
print("waiting for nodes %s..."
% ('to start' if to_start else 'to shutdown'))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# get all results back and return tuple
return tuple(queue.get_nowait() for _ in ports) | python | def wait_for(self, ports, interval=1.0, timeout=30, to_start=True):
"""
Spawn threads to ping host using a list of ports.
Returns when all hosts are running (if to_start=True) / shut down (if
to_start=False).
"""
threads = []
queue = Queue.Queue()
for port in ports:
threads.append(threading.Thread(target=wait_for_host, args=(
port, interval, timeout, to_start, queue,
self.ssl_pymongo_options)))
if self.args and 'verbose' in self.args and self.args['verbose']:
print("waiting for nodes %s..."
% ('to start' if to_start else 'to shutdown'))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# get all results back and return tuple
return tuple(queue.get_nowait() for _ in ports) | [
"def",
"wait_for",
"(",
"self",
",",
"ports",
",",
"interval",
"=",
"1.0",
",",
"timeout",
"=",
"30",
",",
"to_start",
"=",
"True",
")",
":",
"threads",
"=",
"[",
"]",
"queue",
"=",
"Queue",
".",
"Queue",
"(",
")",
"for",
"port",
"in",
"ports",
"... | Spawn threads to ping host using a list of ports.
Returns when all hosts are running (if to_start=True) / shut down (if
to_start=False). | [
"Spawn",
"threads",
"to",
"ping",
"host",
"using",
"a",
"list",
"of",
"ports",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1348-L1374 | train | 225,060 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._load_parameters | def _load_parameters(self):
"""
Load the .mlaunch_startup file that exists in each datadir.
Handles different protocol versions.
"""
datapath = self.dir
startup_file = os.path.join(datapath, '.mlaunch_startup')
if not os.path.exists(startup_file):
return False
in_dict = json.load(open(startup_file, 'rb'))
# handle legacy version without versioned protocol
if 'protocol_version' not in in_dict:
in_dict['protocol_version'] = 1
self.loaded_args = in_dict
self.startup_info = {}
# hostname was added recently
self.loaded_args['hostname'] = socket.gethostname()
elif in_dict['protocol_version'] == 2:
self.startup_info = in_dict['startup_info']
self.loaded_unknown_args = in_dict['unknown_args']
self.loaded_args = in_dict['parsed_args']
# changed 'authentication' to 'auth', if present (from old env) rename
if 'authentication' in self.loaded_args:
self.loaded_args['auth'] = self.loaded_args['authentication']
del self.loaded_args['authentication']
return True | python | def _load_parameters(self):
"""
Load the .mlaunch_startup file that exists in each datadir.
Handles different protocol versions.
"""
datapath = self.dir
startup_file = os.path.join(datapath, '.mlaunch_startup')
if not os.path.exists(startup_file):
return False
in_dict = json.load(open(startup_file, 'rb'))
# handle legacy version without versioned protocol
if 'protocol_version' not in in_dict:
in_dict['protocol_version'] = 1
self.loaded_args = in_dict
self.startup_info = {}
# hostname was added recently
self.loaded_args['hostname'] = socket.gethostname()
elif in_dict['protocol_version'] == 2:
self.startup_info = in_dict['startup_info']
self.loaded_unknown_args = in_dict['unknown_args']
self.loaded_args = in_dict['parsed_args']
# changed 'authentication' to 'auth', if present (from old env) rename
if 'authentication' in self.loaded_args:
self.loaded_args['auth'] = self.loaded_args['authentication']
del self.loaded_args['authentication']
return True | [
"def",
"_load_parameters",
"(",
"self",
")",
":",
"datapath",
"=",
"self",
".",
"dir",
"startup_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"datapath",
",",
"'.mlaunch_startup'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"startup_file... | Load the .mlaunch_startup file that exists in each datadir.
Handles different protocol versions. | [
"Load",
"the",
".",
"mlaunch_startup",
"file",
"that",
"exists",
"in",
"each",
"datadir",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1378-L1410 | train | 225,061 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._create_paths | def _create_paths(self, basedir, name=None):
"""Create datadir and subdir paths."""
if name:
datapath = os.path.join(basedir, name)
else:
datapath = basedir
dbpath = os.path.join(datapath, 'db')
if not os.path.exists(dbpath):
os.makedirs(dbpath)
if self.args['verbose']:
print('creating directory: %s' % dbpath)
return datapath | python | def _create_paths(self, basedir, name=None):
"""Create datadir and subdir paths."""
if name:
datapath = os.path.join(basedir, name)
else:
datapath = basedir
dbpath = os.path.join(datapath, 'db')
if not os.path.exists(dbpath):
os.makedirs(dbpath)
if self.args['verbose']:
print('creating directory: %s' % dbpath)
return datapath | [
"def",
"_create_paths",
"(",
"self",
",",
"basedir",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"datapath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"name",
")",
"else",
":",
"datapath",
"=",
"basedir",
"dbpath",
"=",
"... | Create datadir and subdir paths. | [
"Create",
"datadir",
"and",
"subdir",
"paths",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1433-L1446 | train | 225,062 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._filter_valid_arguments | def _filter_valid_arguments(self, arguments, binary="mongod",
config=False):
"""
Return a list of accepted arguments.
Check which arguments in list are accepted by the specified binary
(mongod, mongos). If an argument does not start with '-' but its
preceding argument was accepted, then it is accepted as well. Example
['--slowms', '1000'] both arguments would be accepted for a mongod.
"""
# get the help list of the binary
if self.args and self.args['binarypath']:
binary = os.path.join(self.args['binarypath'], binary)
ret = (subprocess.Popen(['%s' % binary, '--help'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False))
out, err = ret.communicate()
accepted_arguments = []
# extract all arguments starting with a '-'
for line in [option for option in out.decode('utf-8').split('\n')]:
line = line.lstrip()
if line.startswith('-'):
argument = line.split()[0]
# exception: don't allow unsupported config server arguments
if config and argument in ['--oplogSize', '--storageEngine',
'--smallfiles', '--nojournal']:
continue
accepted_arguments.append(argument)
# add undocumented options
accepted_arguments.append('--setParameter')
if binary.endswith('mongod'):
accepted_arguments.append('--wiredTigerEngineConfigString')
# filter valid arguments
result = []
for i, arg in enumerate(arguments):
if arg.startswith('-'):
# check if the binary accepts this argument
# or special case -vvv for any number of v
argname = arg.split('=', 1)[0]
if argname in accepted_arguments or re.match(r'-v+', arg):
result.append(arg)
elif (binary.endswith('mongod') and
argname in self.UNDOCUMENTED_MONGOD_ARGS):
result.append(arg)
elif self.ignored_arguments.get(binary + argname) is None:
# warn once for each combination of binary and unknown arg
self.ignored_arguments[binary + argname] = True
if not (binary.endswith("mongos") and
arg in self.UNSUPPORTED_MONGOS_ARGS):
print("warning: ignoring unknown argument %s for %s" %
(arg, binary))
elif i > 0 and arguments[i - 1] in result:
# if it doesn't start with a '-', it could be the value of
# the last argument, e.g. `--slowms 1000`
result.append(arg)
# return valid arguments as joined string
return ' '.join(result) | python | def _filter_valid_arguments(self, arguments, binary="mongod",
config=False):
"""
Return a list of accepted arguments.
Check which arguments in list are accepted by the specified binary
(mongod, mongos). If an argument does not start with '-' but its
preceding argument was accepted, then it is accepted as well. Example
['--slowms', '1000'] both arguments would be accepted for a mongod.
"""
# get the help list of the binary
if self.args and self.args['binarypath']:
binary = os.path.join(self.args['binarypath'], binary)
ret = (subprocess.Popen(['%s' % binary, '--help'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False))
out, err = ret.communicate()
accepted_arguments = []
# extract all arguments starting with a '-'
for line in [option for option in out.decode('utf-8').split('\n')]:
line = line.lstrip()
if line.startswith('-'):
argument = line.split()[0]
# exception: don't allow unsupported config server arguments
if config and argument in ['--oplogSize', '--storageEngine',
'--smallfiles', '--nojournal']:
continue
accepted_arguments.append(argument)
# add undocumented options
accepted_arguments.append('--setParameter')
if binary.endswith('mongod'):
accepted_arguments.append('--wiredTigerEngineConfigString')
# filter valid arguments
result = []
for i, arg in enumerate(arguments):
if arg.startswith('-'):
# check if the binary accepts this argument
# or special case -vvv for any number of v
argname = arg.split('=', 1)[0]
if argname in accepted_arguments or re.match(r'-v+', arg):
result.append(arg)
elif (binary.endswith('mongod') and
argname in self.UNDOCUMENTED_MONGOD_ARGS):
result.append(arg)
elif self.ignored_arguments.get(binary + argname) is None:
# warn once for each combination of binary and unknown arg
self.ignored_arguments[binary + argname] = True
if not (binary.endswith("mongos") and
arg in self.UNSUPPORTED_MONGOS_ARGS):
print("warning: ignoring unknown argument %s for %s" %
(arg, binary))
elif i > 0 and arguments[i - 1] in result:
# if it doesn't start with a '-', it could be the value of
# the last argument, e.g. `--slowms 1000`
result.append(arg)
# return valid arguments as joined string
return ' '.join(result) | [
"def",
"_filter_valid_arguments",
"(",
"self",
",",
"arguments",
",",
"binary",
"=",
"\"mongod\"",
",",
"config",
"=",
"False",
")",
":",
"# get the help list of the binary",
"if",
"self",
".",
"args",
"and",
"self",
".",
"args",
"[",
"'binarypath'",
"]",
":",... | Return a list of accepted arguments.
Check which arguments in list are accepted by the specified binary
(mongod, mongos). If an argument does not start with '-' but its
preceding argument was accepted, then it is accepted as well. Example
['--slowms', '1000'] both arguments would be accepted for a mongod. | [
"Return",
"a",
"list",
"of",
"accepted",
"arguments",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1479-L1538 | train | 225,063 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._initiate_replset | def _initiate_replset(self, port, name, maxwait=30):
"""Initiate replica set."""
if not self.args['replicaset'] and name != 'configRepl':
if self.args['verbose']:
print('Skipping replica set initialization for %s' % name)
return
con = self.client('localhost:%i' % port)
try:
rs_status = con['admin'].command({'replSetGetStatus': 1})
return rs_status
except OperationFailure as e:
# not initiated yet
for i in range(maxwait):
try:
con['admin'].command({'replSetInitiate':
self.config_docs[name]})
break
except OperationFailure as e:
print(e.message + " - will retry")
time.sleep(1)
if self.args['verbose']:
print("initializing replica set '%s' with configuration: %s"
% (name, self.config_docs[name]))
print("replica set '%s' initialized." % name) | python | def _initiate_replset(self, port, name, maxwait=30):
"""Initiate replica set."""
if not self.args['replicaset'] and name != 'configRepl':
if self.args['verbose']:
print('Skipping replica set initialization for %s' % name)
return
con = self.client('localhost:%i' % port)
try:
rs_status = con['admin'].command({'replSetGetStatus': 1})
return rs_status
except OperationFailure as e:
# not initiated yet
for i in range(maxwait):
try:
con['admin'].command({'replSetInitiate':
self.config_docs[name]})
break
except OperationFailure as e:
print(e.message + " - will retry")
time.sleep(1)
if self.args['verbose']:
print("initializing replica set '%s' with configuration: %s"
% (name, self.config_docs[name]))
print("replica set '%s' initialized." % name) | [
"def",
"_initiate_replset",
"(",
"self",
",",
"port",
",",
"name",
",",
"maxwait",
"=",
"30",
")",
":",
"if",
"not",
"self",
".",
"args",
"[",
"'replicaset'",
"]",
"and",
"name",
"!=",
"'configRepl'",
":",
"if",
"self",
".",
"args",
"[",
"'verbose'",
... | Initiate replica set. | [
"Initiate",
"replica",
"set",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1667-L1692 | train | 225,064 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._construct_sharded | def _construct_sharded(self):
"""Construct command line strings for a sharded cluster."""
current_version = self.getMongoDVersion()
num_mongos = self.args['mongos'] if self.args['mongos'] > 0 else 1
shard_names = self._get_shard_names(self.args)
# create shards as stand-alones or replica sets
nextport = self.args['port'] + num_mongos
for shard in shard_names:
if (self.args['single'] and
LooseVersion(current_version) >= LooseVersion("3.6.0")):
errmsg = " \n * In MongoDB 3.6 and above a Shard must be " \
"made up of a replica set. Please use --replicaset " \
"option when starting a sharded cluster.*"
raise SystemExit(errmsg)
elif (self.args['single'] and
LooseVersion(current_version) < LooseVersion("3.6.0")):
self.shard_connection_str.append(
self._construct_single(
self.dir, nextport, name=shard, extra='--shardsvr'))
nextport += 1
elif self.args['replicaset']:
self.shard_connection_str.append(
self._construct_replset(
self.dir, nextport, shard,
num_nodes=list(range(self.args['nodes'])),
arbiter=self.args['arbiter'], extra='--shardsvr'))
nextport += self.args['nodes']
if self.args['arbiter']:
nextport += 1
# start up config server(s)
config_string = []
# SCCC config servers (MongoDB <3.3.0)
if not self.args['csrs'] and self.args['config'] >= 3:
config_names = ['config1', 'config2', 'config3']
else:
config_names = ['config']
# CSRS config servers (MongoDB >=3.1.0)
if self.args['csrs']:
config_string.append(self._construct_config(self.dir, nextport,
"configRepl", True))
else:
for name in config_names:
self._construct_config(self.dir, nextport, name)
config_string.append('%s:%i' % (self.args['hostname'],
nextport))
nextport += 1
# multiple mongos use <datadir>/mongos/ as subdir for log files
if num_mongos > 1:
mongosdir = os.path.join(self.dir, 'mongos')
if not os.path.exists(mongosdir):
if self.args['verbose']:
print("creating directory: %s" % mongosdir)
os.makedirs(mongosdir)
# start up mongos, but put them to the front of the port range
nextport = self.args['port']
for i in range(num_mongos):
if num_mongos > 1:
mongos_logfile = 'mongos/mongos_%i.log' % nextport
else:
mongos_logfile = 'mongos.log'
self._construct_mongos(os.path.join(self.dir, mongos_logfile),
nextport, ','.join(config_string))
nextport += 1 | python | def _construct_sharded(self):
"""Construct command line strings for a sharded cluster."""
current_version = self.getMongoDVersion()
num_mongos = self.args['mongos'] if self.args['mongos'] > 0 else 1
shard_names = self._get_shard_names(self.args)
# create shards as stand-alones or replica sets
nextport = self.args['port'] + num_mongos
for shard in shard_names:
if (self.args['single'] and
LooseVersion(current_version) >= LooseVersion("3.6.0")):
errmsg = " \n * In MongoDB 3.6 and above a Shard must be " \
"made up of a replica set. Please use --replicaset " \
"option when starting a sharded cluster.*"
raise SystemExit(errmsg)
elif (self.args['single'] and
LooseVersion(current_version) < LooseVersion("3.6.0")):
self.shard_connection_str.append(
self._construct_single(
self.dir, nextport, name=shard, extra='--shardsvr'))
nextport += 1
elif self.args['replicaset']:
self.shard_connection_str.append(
self._construct_replset(
self.dir, nextport, shard,
num_nodes=list(range(self.args['nodes'])),
arbiter=self.args['arbiter'], extra='--shardsvr'))
nextport += self.args['nodes']
if self.args['arbiter']:
nextport += 1
# start up config server(s)
config_string = []
# SCCC config servers (MongoDB <3.3.0)
if not self.args['csrs'] and self.args['config'] >= 3:
config_names = ['config1', 'config2', 'config3']
else:
config_names = ['config']
# CSRS config servers (MongoDB >=3.1.0)
if self.args['csrs']:
config_string.append(self._construct_config(self.dir, nextport,
"configRepl", True))
else:
for name in config_names:
self._construct_config(self.dir, nextport, name)
config_string.append('%s:%i' % (self.args['hostname'],
nextport))
nextport += 1
# multiple mongos use <datadir>/mongos/ as subdir for log files
if num_mongos > 1:
mongosdir = os.path.join(self.dir, 'mongos')
if not os.path.exists(mongosdir):
if self.args['verbose']:
print("creating directory: %s" % mongosdir)
os.makedirs(mongosdir)
# start up mongos, but put them to the front of the port range
nextport = self.args['port']
for i in range(num_mongos):
if num_mongos > 1:
mongos_logfile = 'mongos/mongos_%i.log' % nextport
else:
mongos_logfile = 'mongos.log'
self._construct_mongos(os.path.join(self.dir, mongos_logfile),
nextport, ','.join(config_string))
nextport += 1 | [
"def",
"_construct_sharded",
"(",
"self",
")",
":",
"current_version",
"=",
"self",
".",
"getMongoDVersion",
"(",
")",
"num_mongos",
"=",
"self",
".",
"args",
"[",
"'mongos'",
"]",
"if",
"self",
".",
"args",
"[",
"'mongos'",
"]",
">",
"0",
"else",
"1",
... | Construct command line strings for a sharded cluster. | [
"Construct",
"command",
"line",
"strings",
"for",
"a",
"sharded",
"cluster",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1820-L1892 | train | 225,065 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._construct_replset | def _construct_replset(self, basedir, portstart, name, num_nodes,
arbiter, extra=''):
"""
Construct command line strings for a replicaset.
Handles single set or sharded cluster.
"""
self.config_docs[name] = {'_id': name, 'members': []}
# Construct individual replica set nodes
for i in num_nodes:
datapath = self._create_paths(basedir, '%s/rs%i' % (name, i + 1))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + i, replset=name, extra=extra)
host = '%s:%i' % (self.args['hostname'], portstart + i)
member_config = {
'_id': len(self.config_docs[name]['members']),
'host': host,
}
# First node gets increased priority.
if i == 0 and self.args['priority']:
member_config['priority'] = 10
if i >= 7:
member_config['votes'] = 0
member_config['priority'] = 0
self.config_docs[name]['members'].append(member_config)
# launch arbiter if True
if arbiter:
datapath = self._create_paths(basedir, '%s/arb' % (name))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + self.args['nodes'],
replset=name)
host = '%s:%i' % (self.args['hostname'],
portstart + self.args['nodes'])
(self.config_docs[name]['members']
.append({'_id': len(self.config_docs[name]['members']),
'host': host,
'arbiterOnly': True}))
return(name + '/' +
','.join([c['host']
for c in self.config_docs[name]['members']])) | python | def _construct_replset(self, basedir, portstart, name, num_nodes,
arbiter, extra=''):
"""
Construct command line strings for a replicaset.
Handles single set or sharded cluster.
"""
self.config_docs[name] = {'_id': name, 'members': []}
# Construct individual replica set nodes
for i in num_nodes:
datapath = self._create_paths(basedir, '%s/rs%i' % (name, i + 1))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + i, replset=name, extra=extra)
host = '%s:%i' % (self.args['hostname'], portstart + i)
member_config = {
'_id': len(self.config_docs[name]['members']),
'host': host,
}
# First node gets increased priority.
if i == 0 and self.args['priority']:
member_config['priority'] = 10
if i >= 7:
member_config['votes'] = 0
member_config['priority'] = 0
self.config_docs[name]['members'].append(member_config)
# launch arbiter if True
if arbiter:
datapath = self._create_paths(basedir, '%s/arb' % (name))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + self.args['nodes'],
replset=name)
host = '%s:%i' % (self.args['hostname'],
portstart + self.args['nodes'])
(self.config_docs[name]['members']
.append({'_id': len(self.config_docs[name]['members']),
'host': host,
'arbiterOnly': True}))
return(name + '/' +
','.join([c['host']
for c in self.config_docs[name]['members']])) | [
"def",
"_construct_replset",
"(",
"self",
",",
"basedir",
",",
"portstart",
",",
"name",
",",
"num_nodes",
",",
"arbiter",
",",
"extra",
"=",
"''",
")",
":",
"self",
".",
"config_docs",
"[",
"name",
"]",
"=",
"{",
"'_id'",
":",
"name",
",",
"'members'"... | Construct command line strings for a replicaset.
Handles single set or sharded cluster. | [
"Construct",
"command",
"line",
"strings",
"for",
"a",
"replicaset",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1894-L1943 | train | 225,066 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._construct_config | def _construct_config(self, basedir, port, name=None, isreplset=False):
"""Construct command line strings for a config server."""
if isreplset:
return self._construct_replset(basedir=basedir, portstart=port,
name=name,
num_nodes=list(range(
self.args['config'])),
arbiter=False, extra='--configsvr')
else:
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
port, replset=None, extra='--configsvr') | python | def _construct_config(self, basedir, port, name=None, isreplset=False):
"""Construct command line strings for a config server."""
if isreplset:
return self._construct_replset(basedir=basedir, portstart=port,
name=name,
num_nodes=list(range(
self.args['config'])),
arbiter=False, extra='--configsvr')
else:
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
port, replset=None, extra='--configsvr') | [
"def",
"_construct_config",
"(",
"self",
",",
"basedir",
",",
"port",
",",
"name",
"=",
"None",
",",
"isreplset",
"=",
"False",
")",
":",
"if",
"isreplset",
":",
"return",
"self",
".",
"_construct_replset",
"(",
"basedir",
"=",
"basedir",
",",
"portstart",... | Construct command line strings for a config server. | [
"Construct",
"command",
"line",
"strings",
"for",
"a",
"config",
"server",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1945-L1957 | train | 225,067 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._construct_single | def _construct_single(self, basedir, port, name=None, extra=''):
"""
Construct command line strings for a single node.
Handles shards and stand-alones.
"""
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'), port,
replset=None, extra=extra)
host = '%s:%i' % (self.args['hostname'], port)
return host | python | def _construct_single(self, basedir, port, name=None, extra=''):
"""
Construct command line strings for a single node.
Handles shards and stand-alones.
"""
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'), port,
replset=None, extra=extra)
host = '%s:%i' % (self.args['hostname'], port)
return host | [
"def",
"_construct_single",
"(",
"self",
",",
"basedir",
",",
"port",
",",
"name",
"=",
"None",
",",
"extra",
"=",
"''",
")",
":",
"datapath",
"=",
"self",
".",
"_create_paths",
"(",
"basedir",
",",
"name",
")",
"self",
".",
"_construct_mongod",
"(",
"... | Construct command line strings for a single node.
Handles shards and stand-alones. | [
"Construct",
"command",
"line",
"strings",
"for",
"a",
"single",
"node",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1959-L1972 | train | 225,068 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._construct_mongod | def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''):
"""Construct command line strings for mongod process."""
rs_param = ''
if replset:
rs_param = '--replSet %s' % replset
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s' % key_path
if self.unknown_args:
config = '--configsvr' in extra
extra = self._filter_valid_arguments(self.unknown_args, "mongod",
config=config) + ' ' + extra
# set WiredTiger cache size to 1 GB by default
if ('--wiredTigerCacheSizeGB' not in extra and
self._filter_valid_arguments(['--wiredTigerCacheSizeGB'],
'mongod')):
extra += ' --wiredTigerCacheSizeGB 1 '
current_version = self.getMongoDVersion()
# Exit with error if hostname is specified but not bind_ip options
if (self.args['hostname'] != 'localhost'
and LooseVersion(current_version) >= LooseVersion("3.6.0")
and (self.args['sharded'] or self.args['replicaset'])
and '--bind_ip' not in extra):
os.removedirs(dbpath)
errmsg = " \n * If hostname is specified, please include "\
"'--bind_ip_all' or '--bind_ip' options when deploying "\
"replica sets or sharded cluster with MongoDB version 3.6.0 "\
"or greater"
raise SystemExit(errmsg)
extra += self._get_ssl_server_args()
path = self.args['binarypath'] or ''
if os.name == 'nt':
newdbpath = dbpath.replace('\\', '\\\\')
newlogpath = logpath.replace('\\', '\\\\')
command_str = ("start /b \"\" \"%s\" %s --dbpath \"%s\" "
" --logpath \"%s\" --port %i "
"%s %s" % (os.path.join(path, 'mongod.exe'),
rs_param, newdbpath, newlogpath, port,
auth_param, extra))
else:
command_str = ("\"%s\" %s --dbpath \"%s\" --logpath \"%s\" "
"--port %i --fork "
"%s %s" % (os.path.join(path, 'mongod'), rs_param,
dbpath, logpath, port, auth_param,
extra))
# store parameters in startup_info
self.startup_info[str(port)] = command_str | python | def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''):
"""Construct command line strings for mongod process."""
rs_param = ''
if replset:
rs_param = '--replSet %s' % replset
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s' % key_path
if self.unknown_args:
config = '--configsvr' in extra
extra = self._filter_valid_arguments(self.unknown_args, "mongod",
config=config) + ' ' + extra
# set WiredTiger cache size to 1 GB by default
if ('--wiredTigerCacheSizeGB' not in extra and
self._filter_valid_arguments(['--wiredTigerCacheSizeGB'],
'mongod')):
extra += ' --wiredTigerCacheSizeGB 1 '
current_version = self.getMongoDVersion()
# Exit with error if hostname is specified but not bind_ip options
if (self.args['hostname'] != 'localhost'
and LooseVersion(current_version) >= LooseVersion("3.6.0")
and (self.args['sharded'] or self.args['replicaset'])
and '--bind_ip' not in extra):
os.removedirs(dbpath)
errmsg = " \n * If hostname is specified, please include "\
"'--bind_ip_all' or '--bind_ip' options when deploying "\
"replica sets or sharded cluster with MongoDB version 3.6.0 "\
"or greater"
raise SystemExit(errmsg)
extra += self._get_ssl_server_args()
path = self.args['binarypath'] or ''
if os.name == 'nt':
newdbpath = dbpath.replace('\\', '\\\\')
newlogpath = logpath.replace('\\', '\\\\')
command_str = ("start /b \"\" \"%s\" %s --dbpath \"%s\" "
" --logpath \"%s\" --port %i "
"%s %s" % (os.path.join(path, 'mongod.exe'),
rs_param, newdbpath, newlogpath, port,
auth_param, extra))
else:
command_str = ("\"%s\" %s --dbpath \"%s\" --logpath \"%s\" "
"--port %i --fork "
"%s %s" % (os.path.join(path, 'mongod'), rs_param,
dbpath, logpath, port, auth_param,
extra))
# store parameters in startup_info
self.startup_info[str(port)] = command_str | [
"def",
"_construct_mongod",
"(",
"self",
",",
"dbpath",
",",
"logpath",
",",
"port",
",",
"replset",
"=",
"None",
",",
"extra",
"=",
"''",
")",
":",
"rs_param",
"=",
"''",
"if",
"replset",
":",
"rs_param",
"=",
"'--replSet %s'",
"%",
"replset",
"auth_par... | Construct command line strings for mongod process. | [
"Construct",
"command",
"line",
"strings",
"for",
"mongod",
"process",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1974-L2029 | train | 225,069 |
rueckstiess/mtools | mtools/mlaunch/mlaunch.py | MLaunchTool._construct_mongos | def _construct_mongos(self, logpath, port, configdb):
"""Construct command line strings for a mongos process."""
extra = ''
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s' % key_path
if self.unknown_args:
extra = self._filter_valid_arguments(self.unknown_args,
"mongos") + extra
extra += ' ' + self._get_ssl_server_args()
path = self.args['binarypath'] or ''
if os.name == 'nt':
newlogpath = logpath.replace('\\', '\\\\')
command_str = ("start /b %s --logpath \"%s\" --port %i --configdb %s "
"%s %s " % (os.path.join(path, 'mongos'),
newlogpath, port, configdb,
auth_param, extra))
else:
command_str = ("%s --logpath \"%s\" --port %i --configdb %s %s %s "
"--fork" % (os.path.join(path, 'mongos'), logpath,
port, configdb, auth_param, extra))
# store parameters in startup_info
self.startup_info[str(port)] = command_str | python | def _construct_mongos(self, logpath, port, configdb):
"""Construct command line strings for a mongos process."""
extra = ''
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s' % key_path
if self.unknown_args:
extra = self._filter_valid_arguments(self.unknown_args,
"mongos") + extra
extra += ' ' + self._get_ssl_server_args()
path = self.args['binarypath'] or ''
if os.name == 'nt':
newlogpath = logpath.replace('\\', '\\\\')
command_str = ("start /b %s --logpath \"%s\" --port %i --configdb %s "
"%s %s " % (os.path.join(path, 'mongos'),
newlogpath, port, configdb,
auth_param, extra))
else:
command_str = ("%s --logpath \"%s\" --port %i --configdb %s %s %s "
"--fork" % (os.path.join(path, 'mongos'), logpath,
port, configdb, auth_param, extra))
# store parameters in startup_info
self.startup_info[str(port)] = command_str | [
"def",
"_construct_mongos",
"(",
"self",
",",
"logpath",
",",
"port",
",",
"configdb",
")",
":",
"extra",
"=",
"''",
"auth_param",
"=",
"''",
"if",
"self",
".",
"args",
"[",
"'auth'",
"]",
":",
"key_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",... | Construct command line strings for a mongos process. | [
"Construct",
"command",
"line",
"strings",
"for",
"a",
"mongos",
"process",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L2031-L2059 | train | 225,070 |
rueckstiess/mtools | mtools/util/logcodeline.py | LogCodeLine.addMatch | def addMatch(self, version, filename, lineno, loglevel, trigger):
"""
Add a match to the LogCodeLine.
Include the version, filename of the source file, the line number, and
the loglevel.
"""
self.versions.add(version)
self.matches[version].append((filename, lineno, loglevel, trigger)) | python | def addMatch(self, version, filename, lineno, loglevel, trigger):
"""
Add a match to the LogCodeLine.
Include the version, filename of the source file, the line number, and
the loglevel.
"""
self.versions.add(version)
self.matches[version].append((filename, lineno, loglevel, trigger)) | [
"def",
"addMatch",
"(",
"self",
",",
"version",
",",
"filename",
",",
"lineno",
",",
"loglevel",
",",
"trigger",
")",
":",
"self",
".",
"versions",
".",
"add",
"(",
"version",
")",
"self",
".",
"matches",
"[",
"version",
"]",
".",
"append",
"(",
"(",... | Add a match to the LogCodeLine.
Include the version, filename of the source file, the line number, and
the loglevel. | [
"Add",
"a",
"match",
"to",
"the",
"LogCodeLine",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logcodeline.py#L29-L37 | train | 225,071 |
rueckstiess/mtools | mtools/mplotqueries/plottypes/event_type.py | RSStatePlotType.accept_line | def accept_line(self, logevent):
"""
Return True on match.
Only match log lines containing 'is now in state' (reflects other
node's state changes) or of type "[rsMgr] replSet PRIMARY" (reflects
own state changes).
"""
if ("is now in state" in logevent.line_str and
logevent.split_tokens[-1] in self.states):
return True
if ("replSet" in logevent.line_str and
logevent.thread == "rsMgr" and
logevent.split_tokens[-1] in self.states):
return True
return False | python | def accept_line(self, logevent):
"""
Return True on match.
Only match log lines containing 'is now in state' (reflects other
node's state changes) or of type "[rsMgr] replSet PRIMARY" (reflects
own state changes).
"""
if ("is now in state" in logevent.line_str and
logevent.split_tokens[-1] in self.states):
return True
if ("replSet" in logevent.line_str and
logevent.thread == "rsMgr" and
logevent.split_tokens[-1] in self.states):
return True
return False | [
"def",
"accept_line",
"(",
"self",
",",
"logevent",
")",
":",
"if",
"(",
"\"is now in state\"",
"in",
"logevent",
".",
"line_str",
"and",
"logevent",
".",
"split_tokens",
"[",
"-",
"1",
"]",
"in",
"self",
".",
"states",
")",
":",
"return",
"True",
"if",
... | Return True on match.
Only match log lines containing 'is now in state' (reflects other
node's state changes) or of type "[rsMgr] replSet PRIMARY" (reflects
own state changes). | [
"Return",
"True",
"on",
"match",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/event_type.py#L73-L90 | train | 225,072 |
rueckstiess/mtools | mtools/mplotqueries/plottypes/event_type.py | RSStatePlotType.color_map | def color_map(cls, group):
print("Group %s" % group)
"""
Change default color behavior.
Map certain states always to the same colors (similar to MMS).
"""
try:
state_idx = cls.states.index(group)
except ValueError:
# on any unexpected state, return black
state_idx = 5
return cls.colors[state_idx], cls.markers[0] | python | def color_map(cls, group):
print("Group %s" % group)
"""
Change default color behavior.
Map certain states always to the same colors (similar to MMS).
"""
try:
state_idx = cls.states.index(group)
except ValueError:
# on any unexpected state, return black
state_idx = 5
return cls.colors[state_idx], cls.markers[0] | [
"def",
"color_map",
"(",
"cls",
",",
"group",
")",
":",
"print",
"(",
"\"Group %s\"",
"%",
"group",
")",
"try",
":",
"state_idx",
"=",
"cls",
".",
"states",
".",
"index",
"(",
"group",
")",
"except",
"ValueError",
":",
"# on any unexpected state, return blac... | Change default color behavior.
Map certain states always to the same colors (similar to MMS). | [
"Change",
"default",
"color",
"behavior",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/event_type.py#L97-L109 | train | 225,073 |
rueckstiess/mtools | mtools/mplotqueries/plottypes/base_type.py | BasePlotType.add_line | def add_line(self, logevent):
"""Append log line to this plot type."""
key = None
self.empty = False
self.groups.setdefault(key, list()).append(logevent) | python | def add_line(self, logevent):
"""Append log line to this plot type."""
key = None
self.empty = False
self.groups.setdefault(key, list()).append(logevent) | [
"def",
"add_line",
"(",
"self",
",",
"logevent",
")",
":",
"key",
"=",
"None",
"self",
".",
"empty",
"=",
"False",
"self",
".",
"groups",
".",
"setdefault",
"(",
"key",
",",
"list",
"(",
")",
")",
".",
"append",
"(",
"logevent",
")"
] | Append log line to this plot type. | [
"Append",
"log",
"line",
"to",
"this",
"plot",
"type",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/base_type.py#L53-L57 | train | 225,074 |
rueckstiess/mtools | mtools/mplotqueries/plottypes/base_type.py | BasePlotType.logevents | def logevents(self):
"""Iterator yielding all logevents from groups dictionary."""
for key in self.groups:
for logevent in self.groups[key]:
yield logevent | python | def logevents(self):
"""Iterator yielding all logevents from groups dictionary."""
for key in self.groups:
for logevent in self.groups[key]:
yield logevent | [
"def",
"logevents",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
".",
"groups",
":",
"for",
"logevent",
"in",
"self",
".",
"groups",
"[",
"key",
"]",
":",
"yield",
"logevent"
] | Iterator yielding all logevents from groups dictionary. | [
"Iterator",
"yielding",
"all",
"logevents",
"from",
"groups",
"dictionary",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/base_type.py#L60-L64 | train | 225,075 |
rueckstiess/mtools | mtools/mplotqueries/plottypes/histogram_type.py | HistogramPlotType.clicked | def clicked(self, event):
"""Print group name and number of items in bin."""
group = event.artist._mt_group
n = event.artist._mt_n
dt = num2date(event.artist._mt_bin)
print("%4i %s events in %s sec beginning at %s"
% (n, group, self.bucketsize, dt.strftime("%b %d %H:%M:%S"))) | python | def clicked(self, event):
"""Print group name and number of items in bin."""
group = event.artist._mt_group
n = event.artist._mt_n
dt = num2date(event.artist._mt_bin)
print("%4i %s events in %s sec beginning at %s"
% (n, group, self.bucketsize, dt.strftime("%b %d %H:%M:%S"))) | [
"def",
"clicked",
"(",
"self",
",",
"event",
")",
":",
"group",
"=",
"event",
".",
"artist",
".",
"_mt_group",
"n",
"=",
"event",
".",
"artist",
".",
"_mt_n",
"dt",
"=",
"num2date",
"(",
"event",
".",
"artist",
".",
"_mt_bin",
")",
"print",
"(",
"\... | Print group name and number of items in bin. | [
"Print",
"group",
"name",
"and",
"number",
"of",
"items",
"in",
"bin",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/histogram_type.py#L153-L159 | train | 225,076 |
rueckstiess/mtools | mtools/util/grouping.py | Grouping.add | def add(self, item, group_by=None):
"""General purpose class to group items by certain criteria."""
key = None
if not group_by:
group_by = self.group_by
if group_by:
# if group_by is a function, use it with item as argument
if hasattr(group_by, '__call__'):
key = group_by(item)
# if the item has attribute of group_by as string, use that as key
elif isinstance(group_by, str) and hasattr(item, group_by):
key = getattr(item, group_by)
else:
key = None
# try to match str(item) with regular expression
if isinstance(group_by, str):
match = re.search(group_by, str(item))
if match:
if len(match.groups()) > 0:
key = match.group(1)
else:
key = match.group()
self.groups.setdefault(key, list()).append(item) | python | def add(self, item, group_by=None):
"""General purpose class to group items by certain criteria."""
key = None
if not group_by:
group_by = self.group_by
if group_by:
# if group_by is a function, use it with item as argument
if hasattr(group_by, '__call__'):
key = group_by(item)
# if the item has attribute of group_by as string, use that as key
elif isinstance(group_by, str) and hasattr(item, group_by):
key = getattr(item, group_by)
else:
key = None
# try to match str(item) with regular expression
if isinstance(group_by, str):
match = re.search(group_by, str(item))
if match:
if len(match.groups()) > 0:
key = match.group(1)
else:
key = match.group()
self.groups.setdefault(key, list()).append(item) | [
"def",
"add",
"(",
"self",
",",
"item",
",",
"group_by",
"=",
"None",
")",
":",
"key",
"=",
"None",
"if",
"not",
"group_by",
":",
"group_by",
"=",
"self",
".",
"group_by",
"if",
"group_by",
":",
"# if group_by is a function, use it with item as argument",
"if"... | General purpose class to group items by certain criteria. | [
"General",
"purpose",
"class",
"to",
"group",
"items",
"by",
"certain",
"criteria",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L23-L50 | train | 225,077 |
rueckstiess/mtools | mtools/util/grouping.py | Grouping.regroup | def regroup(self, group_by=None):
"""Regroup items."""
if not group_by:
group_by = self.group_by
groups = self.groups
self.groups = {}
for g in groups:
for item in groups[g]:
self.add(item, group_by) | python | def regroup(self, group_by=None):
"""Regroup items."""
if not group_by:
group_by = self.group_by
groups = self.groups
self.groups = {}
for g in groups:
for item in groups[g]:
self.add(item, group_by) | [
"def",
"regroup",
"(",
"self",
",",
"group_by",
"=",
"None",
")",
":",
"if",
"not",
"group_by",
":",
"group_by",
"=",
"self",
".",
"group_by",
"groups",
"=",
"self",
".",
"groups",
"self",
".",
"groups",
"=",
"{",
"}",
"for",
"g",
"in",
"groups",
"... | Regroup items. | [
"Regroup",
"items",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L78-L88 | train | 225,078 |
rueckstiess/mtools | mtools/util/grouping.py | Grouping.move_items | def move_items(self, from_group, to_group):
"""Take all elements from the from_group and add it to the to_group."""
if from_group not in self.keys() or len(self.groups[from_group]) == 0:
return
self.groups.setdefault(to_group, list()).extend(self.groups.get
(from_group, list()))
if from_group in self.groups:
del self.groups[from_group] | python | def move_items(self, from_group, to_group):
"""Take all elements from the from_group and add it to the to_group."""
if from_group not in self.keys() or len(self.groups[from_group]) == 0:
return
self.groups.setdefault(to_group, list()).extend(self.groups.get
(from_group, list()))
if from_group in self.groups:
del self.groups[from_group] | [
"def",
"move_items",
"(",
"self",
",",
"from_group",
",",
"to_group",
")",
":",
"if",
"from_group",
"not",
"in",
"self",
".",
"keys",
"(",
")",
"or",
"len",
"(",
"self",
".",
"groups",
"[",
"from_group",
"]",
")",
"==",
"0",
":",
"return",
"self",
... | Take all elements from the from_group and add it to the to_group. | [
"Take",
"all",
"elements",
"from",
"the",
"from_group",
"and",
"add",
"it",
"to",
"the",
"to_group",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L90-L98 | train | 225,079 |
rueckstiess/mtools | mtools/util/grouping.py | Grouping.sort_by_size | def sort_by_size(self, group_limit=None, discard_others=False,
others_label='others'):
"""
Sort the groups by the number of elements they contain, descending.
Also has option to limit the number of groups. If this option is
chosen, the remaining elements are placed into another group with the
name specified with others_label. if discard_others is True, the others
group is removed instead.
"""
# sort groups by number of elements
self.groups = OrderedDict(sorted(six.iteritems(self.groups),
key=lambda x: len(x[1]),
reverse=True))
# if group-limit is provided, combine remaining groups
if group_limit is not None:
# now group together all groups that did not make the limit
if not discard_others:
group_keys = self.groups.keys()[group_limit - 1:]
self.groups.setdefault(others_label, list())
else:
group_keys = self.groups.keys()[group_limit:]
# only go to second last (-1), since the 'others' group is now last
for g in group_keys:
if not discard_others:
self.groups[others_label].extend(self.groups[g])
del self.groups[g]
# remove if empty
if (others_label in self.groups and
len(self.groups[others_label]) == 0):
del self.groups[others_label]
# remove others group regardless of limit if requested
if discard_others and others_label in self.groups:
del self.groups[others_label] | python | def sort_by_size(self, group_limit=None, discard_others=False,
others_label='others'):
"""
Sort the groups by the number of elements they contain, descending.
Also has option to limit the number of groups. If this option is
chosen, the remaining elements are placed into another group with the
name specified with others_label. if discard_others is True, the others
group is removed instead.
"""
# sort groups by number of elements
self.groups = OrderedDict(sorted(six.iteritems(self.groups),
key=lambda x: len(x[1]),
reverse=True))
# if group-limit is provided, combine remaining groups
if group_limit is not None:
# now group together all groups that did not make the limit
if not discard_others:
group_keys = self.groups.keys()[group_limit - 1:]
self.groups.setdefault(others_label, list())
else:
group_keys = self.groups.keys()[group_limit:]
# only go to second last (-1), since the 'others' group is now last
for g in group_keys:
if not discard_others:
self.groups[others_label].extend(self.groups[g])
del self.groups[g]
# remove if empty
if (others_label in self.groups and
len(self.groups[others_label]) == 0):
del self.groups[others_label]
# remove others group regardless of limit if requested
if discard_others and others_label in self.groups:
del self.groups[others_label] | [
"def",
"sort_by_size",
"(",
"self",
",",
"group_limit",
"=",
"None",
",",
"discard_others",
"=",
"False",
",",
"others_label",
"=",
"'others'",
")",
":",
"# sort groups by number of elements",
"self",
".",
"groups",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"six",... | Sort the groups by the number of elements they contain, descending.
Also has option to limit the number of groups. If this option is
chosen, the remaining elements are placed into another group with the
name specified with others_label. if discard_others is True, the others
group is removed instead. | [
"Sort",
"the",
"groups",
"by",
"the",
"number",
"of",
"elements",
"they",
"contain",
"descending",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L100-L138 | train | 225,080 |
rueckstiess/mtools | mtools/util/log2code.py | import_l2c_db | def import_l2c_db():
"""
Static import helper function.
Checks if the log2code.pickle exists first, otherwise raises ImportError.
"""
data_path = os.path.join(os.path.dirname(mtools.__file__), 'data')
if os.path.exists(os.path.join(data_path, 'log2code.pickle')):
av, lv, lbw, lcl = cPickle.load(open(os.path.join(data_path,
'log2code.pickle'),
'rb'))
return av, lv, lbw, lcl
else:
raise ImportError('log2code.pickle not found in %s.' % data_path) | python | def import_l2c_db():
"""
Static import helper function.
Checks if the log2code.pickle exists first, otherwise raises ImportError.
"""
data_path = os.path.join(os.path.dirname(mtools.__file__), 'data')
if os.path.exists(os.path.join(data_path, 'log2code.pickle')):
av, lv, lbw, lcl = cPickle.load(open(os.path.join(data_path,
'log2code.pickle'),
'rb'))
return av, lv, lbw, lcl
else:
raise ImportError('log2code.pickle not found in %s.' % data_path) | [
"def",
"import_l2c_db",
"(",
")",
":",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"mtools",
".",
"__file__",
")",
",",
"'data'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"pat... | Static import helper function.
Checks if the log2code.pickle exists first, otherwise raises ImportError. | [
"Static",
"import",
"helper",
"function",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L15-L29 | train | 225,081 |
rueckstiess/mtools | mtools/util/log2code.py | Log2CodeConverter._strip_counters | def _strip_counters(self, sub_line):
"""Find the codeline end by taking out the counters and durations."""
try:
end = sub_line.rindex('}')
except ValueError:
return sub_line
else:
return sub_line[:(end + 1)] | python | def _strip_counters(self, sub_line):
"""Find the codeline end by taking out the counters and durations."""
try:
end = sub_line.rindex('}')
except ValueError:
return sub_line
else:
return sub_line[:(end + 1)] | [
"def",
"_strip_counters",
"(",
"self",
",",
"sub_line",
")",
":",
"try",
":",
"end",
"=",
"sub_line",
".",
"rindex",
"(",
"'}'",
")",
"except",
"ValueError",
":",
"return",
"sub_line",
"else",
":",
"return",
"sub_line",
"[",
":",
"(",
"end",
"+",
"1",
... | Find the codeline end by taking out the counters and durations. | [
"Find",
"the",
"codeline",
"end",
"by",
"taking",
"out",
"the",
"counters",
"and",
"durations",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L78-L85 | train | 225,082 |
rueckstiess/mtools | mtools/util/log2code.py | Log2CodeConverter._strip_datetime | def _strip_datetime(self, sub_line):
"""Strip datetime and other parts so that there is no redundancy."""
try:
begin = sub_line.index(']')
except ValueError:
return sub_line
else:
# create a "" in place character for the beginnings..
# needed when interleaving the lists
sub = sub_line[begin + 1:]
return sub | python | def _strip_datetime(self, sub_line):
"""Strip datetime and other parts so that there is no redundancy."""
try:
begin = sub_line.index(']')
except ValueError:
return sub_line
else:
# create a "" in place character for the beginnings..
# needed when interleaving the lists
sub = sub_line[begin + 1:]
return sub | [
"def",
"_strip_datetime",
"(",
"self",
",",
"sub_line",
")",
":",
"try",
":",
"begin",
"=",
"sub_line",
".",
"index",
"(",
"']'",
")",
"except",
"ValueError",
":",
"return",
"sub_line",
"else",
":",
"# create a \"\" in place character for the beginnings..",
"# nee... | Strip datetime and other parts so that there is no redundancy. | [
"Strip",
"datetime",
"and",
"other",
"parts",
"so",
"that",
"there",
"is",
"no",
"redundancy",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L87-L97 | train | 225,083 |
rueckstiess/mtools | mtools/util/log2code.py | Log2CodeConverter._find_variable | def _find_variable(self, pattern, logline):
"""
Return the variable parts of the code given a tuple of strings pattern.
Example: (this, is, a, pattern) -> 'this is a good pattern' -> [good]
"""
var_subs = []
# find the beginning of the pattern
first_index = logline.index(pattern[0])
beg_str = logline[:first_index]
# strip the beginning substring
var_subs.append(self._strip_datetime(beg_str))
for patt, patt_next in zip(pattern[:-1], pattern[1:]):
# regular expression pattern that finds what's in the middle of
# two substrings
pat = re.escape(patt) + '(.*)' + re.escape(patt_next)
# extract whats in the middle of the two substrings
between = re.search(pat, logline)
try:
# add what's in between if the search isn't none
var_subs.append(between.group(1))
except Exception:
pass
rest_of_string = logline.rindex(pattern[-1]) + len(pattern[-1])
# add the rest of the string to end minus the counters and durations
end_str = logline[rest_of_string:]
var_subs.append(self._strip_counters(end_str))
# strip whitespace from each string, but keep the strings themselves
# var_subs = [v.strip() for v in var_subs]
return var_subs | python | def _find_variable(self, pattern, logline):
"""
Return the variable parts of the code given a tuple of strings pattern.
Example: (this, is, a, pattern) -> 'this is a good pattern' -> [good]
"""
var_subs = []
# find the beginning of the pattern
first_index = logline.index(pattern[0])
beg_str = logline[:first_index]
# strip the beginning substring
var_subs.append(self._strip_datetime(beg_str))
for patt, patt_next in zip(pattern[:-1], pattern[1:]):
# regular expression pattern that finds what's in the middle of
# two substrings
pat = re.escape(patt) + '(.*)' + re.escape(patt_next)
# extract whats in the middle of the two substrings
between = re.search(pat, logline)
try:
# add what's in between if the search isn't none
var_subs.append(between.group(1))
except Exception:
pass
rest_of_string = logline.rindex(pattern[-1]) + len(pattern[-1])
# add the rest of the string to end minus the counters and durations
end_str = logline[rest_of_string:]
var_subs.append(self._strip_counters(end_str))
# strip whitespace from each string, but keep the strings themselves
# var_subs = [v.strip() for v in var_subs]
return var_subs | [
"def",
"_find_variable",
"(",
"self",
",",
"pattern",
",",
"logline",
")",
":",
"var_subs",
"=",
"[",
"]",
"# find the beginning of the pattern",
"first_index",
"=",
"logline",
".",
"index",
"(",
"pattern",
"[",
"0",
"]",
")",
"beg_str",
"=",
"logline",
"[",... | Return the variable parts of the code given a tuple of strings pattern.
Example: (this, is, a, pattern) -> 'this is a good pattern' -> [good] | [
"Return",
"the",
"variable",
"parts",
"of",
"the",
"code",
"given",
"a",
"tuple",
"of",
"strings",
"pattern",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L99-L132 | train | 225,084 |
rueckstiess/mtools | mtools/util/log2code.py | Log2CodeConverter._variable_parts | def _variable_parts(self, line, codeline):
"""Return variable parts of the codeline, given the static parts."""
var_subs = []
# codeline has pattern and then has the outputs in different versions
if codeline:
var_subs = self._find_variable(codeline.pattern, line)
else:
# make variable part of the line string without all the other stuff
line_str = self._strip_datetime(self._strip_counters(line))
var_subs = [line_str.strip()]
return var_subs | python | def _variable_parts(self, line, codeline):
"""Return variable parts of the codeline, given the static parts."""
var_subs = []
# codeline has pattern and then has the outputs in different versions
if codeline:
var_subs = self._find_variable(codeline.pattern, line)
else:
# make variable part of the line string without all the other stuff
line_str = self._strip_datetime(self._strip_counters(line))
var_subs = [line_str.strip()]
return var_subs | [
"def",
"_variable_parts",
"(",
"self",
",",
"line",
",",
"codeline",
")",
":",
"var_subs",
"=",
"[",
"]",
"# codeline has pattern and then has the outputs in different versions",
"if",
"codeline",
":",
"var_subs",
"=",
"self",
".",
"_find_variable",
"(",
"codeline",
... | Return variable parts of the codeline, given the static parts. | [
"Return",
"variable",
"parts",
"of",
"the",
"codeline",
"given",
"the",
"static",
"parts",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L134-L144 | train | 225,085 |
rueckstiess/mtools | mtools/util/log2code.py | Log2CodeConverter.combine | def combine(self, pattern, variable):
"""Combine a pattern and variable parts to be a line string again."""
inter_zip = izip_longest(variable, pattern, fillvalue='')
interleaved = [elt for pair in inter_zip for elt in pair]
return ''.join(interleaved) | python | def combine(self, pattern, variable):
"""Combine a pattern and variable parts to be a line string again."""
inter_zip = izip_longest(variable, pattern, fillvalue='')
interleaved = [elt for pair in inter_zip for elt in pair]
return ''.join(interleaved) | [
"def",
"combine",
"(",
"self",
",",
"pattern",
",",
"variable",
")",
":",
"inter_zip",
"=",
"izip_longest",
"(",
"variable",
",",
"pattern",
",",
"fillvalue",
"=",
"''",
")",
"interleaved",
"=",
"[",
"elt",
"for",
"pair",
"in",
"inter_zip",
"for",
"elt",... | Combine a pattern and variable parts to be a line string again. | [
"Combine",
"a",
"pattern",
"and",
"variable",
"parts",
"to",
"be",
"a",
"line",
"string",
"again",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L154-L158 | train | 225,086 |
rueckstiess/mtools | mtools/util/cmdlinetool.py | BaseCmdLineTool.run | def run(self, arguments=None, get_unknowns=False):
"""
Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)).
"""
# redirect PIPE signal to quiet kill script, if not on Windows
if os.name != 'nt':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if get_unknowns:
if arguments:
self.args, self.unknown_args = (self.argparser.parse_known_args
(args=arguments.split()))
else:
(self.args,
self.unknown_args) = self.argparser.parse_known_args()
self.args = vars(self.args)
else:
if arguments:
myargs = arguments.split()
self.args = vars(self.argparser.parse_args
(args=myargs))
else:
self.args = vars(self.argparser.parse_args())
self.progress_bar_enabled = (not (self.args['no_progressbar'] or
self.is_stdin)) | python | def run(self, arguments=None, get_unknowns=False):
"""
Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)).
"""
# redirect PIPE signal to quiet kill script, if not on Windows
if os.name != 'nt':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if get_unknowns:
if arguments:
self.args, self.unknown_args = (self.argparser.parse_known_args
(args=arguments.split()))
else:
(self.args,
self.unknown_args) = self.argparser.parse_known_args()
self.args = vars(self.args)
else:
if arguments:
myargs = arguments.split()
self.args = vars(self.argparser.parse_args
(args=myargs))
else:
self.args = vars(self.argparser.parse_args())
self.progress_bar_enabled = (not (self.args['no_progressbar'] or
self.is_stdin)) | [
"def",
"run",
"(",
"self",
",",
"arguments",
"=",
"None",
",",
"get_unknowns",
"=",
"False",
")",
":",
"# redirect PIPE signal to quiet kill script, if not on Windows",
"if",
"os",
".",
"name",
"!=",
"'nt'",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"S... | Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)). | [
"Init",
"point",
"to",
"execute",
"the",
"script",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/cmdlinetool.py#L110-L139 | train | 225,087 |
rueckstiess/mtools | mtools/util/cmdlinetool.py | BaseCmdLineTool.update_progress | def update_progress(self, progress, prefix=''):
"""
Print a progress bar for longer-running scripts.
The progress value is a value between 0.0 and 1.0. If a prefix is
present, it will be printed before the progress bar.
"""
total_length = 40
if progress == 1.:
sys.stderr.write('\r' + ' ' * (total_length + len(prefix) + 50))
sys.stderr.write('\n')
sys.stderr.flush()
else:
bar_length = int(round(total_length * progress))
sys.stderr.write('\r%s [%s%s] %.1f %% '
% (prefix, '=' * bar_length,
' ' * (total_length - bar_length),
progress * 100))
sys.stderr.flush() | python | def update_progress(self, progress, prefix=''):
"""
Print a progress bar for longer-running scripts.
The progress value is a value between 0.0 and 1.0. If a prefix is
present, it will be printed before the progress bar.
"""
total_length = 40
if progress == 1.:
sys.stderr.write('\r' + ' ' * (total_length + len(prefix) + 50))
sys.stderr.write('\n')
sys.stderr.flush()
else:
bar_length = int(round(total_length * progress))
sys.stderr.write('\r%s [%s%s] %.1f %% '
% (prefix, '=' * bar_length,
' ' * (total_length - bar_length),
progress * 100))
sys.stderr.flush() | [
"def",
"update_progress",
"(",
"self",
",",
"progress",
",",
"prefix",
"=",
"''",
")",
":",
"total_length",
"=",
"40",
"if",
"progress",
"==",
"1.",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'\\r'",
"+",
"' '",
"*",
"(",
"total_length",
"+",
"le... | Print a progress bar for longer-running scripts.
The progress value is a value between 0.0 and 1.0. If a prefix is
present, it will be printed before the progress bar. | [
"Print",
"a",
"progress",
"bar",
"for",
"longer",
"-",
"running",
"scripts",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/cmdlinetool.py#L153-L172 | train | 225,088 |
rueckstiess/mtools | mtools/mplotqueries/plottypes/scatter_type.py | ScatterPlotType.accept_line | def accept_line(self, logevent):
"""Return True if the log line has the nominated yaxis field."""
if self.regex_mode:
return bool(re.search(self.field, logevent.line_str))
else:
return getattr(logevent, self.field) is not None | python | def accept_line(self, logevent):
"""Return True if the log line has the nominated yaxis field."""
if self.regex_mode:
return bool(re.search(self.field, logevent.line_str))
else:
return getattr(logevent, self.field) is not None | [
"def",
"accept_line",
"(",
"self",
",",
"logevent",
")",
":",
"if",
"self",
".",
"regex_mode",
":",
"return",
"bool",
"(",
"re",
".",
"search",
"(",
"self",
".",
"field",
",",
"logevent",
".",
"line_str",
")",
")",
"else",
":",
"return",
"getattr",
"... | Return True if the log line has the nominated yaxis field. | [
"Return",
"True",
"if",
"the",
"log",
"line",
"has",
"the",
"nominated",
"yaxis",
"field",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/scatter_type.py#L54-L59 | train | 225,089 |
rueckstiess/mtools | mtools/mplotqueries/plottypes/scatter_type.py | ScatterPlotType.clicked | def clicked(self, event):
"""
Call if an element of this plottype is clicked.
Implement in sub class.
"""
group = event.artist._mt_group
indices = event.ind
# double click only supported on 1.2 or later
major, minor, _ = mpl_version.split('.')
if (int(major), int(minor)) < (1, 2) or not event.mouseevent.dblclick:
for i in indices:
print(self.groups[group][i].line_str)
else:
# toggle durline
first = indices[0]
logevent = self.groups[group][first]
try:
# remove triangle for this event
idx = map(itemgetter(0), self.durlines).index(logevent)
_, poly = self.durlines[idx]
poly.remove()
plt.gcf().canvas.draw()
del self.durlines[idx]
except ValueError:
# construct triangle and add to list of durlines
if self.args['optime_start']:
pts = [[date2num(logevent.datetime), 0],
[date2num(logevent.datetime), logevent.duration],
[date2num(logevent.datetime +
timedelta(milliseconds=logevent.duration)
), 0]]
else:
pts = [[date2num(logevent.datetime), 0],
[date2num(logevent.datetime), logevent.duration],
[date2num(logevent.datetime -
timedelta(milliseconds=logevent.duration)
), 0]]
poly = Polygon(pts, closed=True, alpha=0.2, linewidth=0,
facecolor=event.artist.get_markerfacecolor(),
edgecolor=None, zorder=-10000)
ax = plt.gca()
ax.add_patch(poly)
plt.gcf().canvas.draw()
self.durlines.append((logevent, poly)) | python | def clicked(self, event):
"""
Call if an element of this plottype is clicked.
Implement in sub class.
"""
group = event.artist._mt_group
indices = event.ind
# double click only supported on 1.2 or later
major, minor, _ = mpl_version.split('.')
if (int(major), int(minor)) < (1, 2) or not event.mouseevent.dblclick:
for i in indices:
print(self.groups[group][i].line_str)
else:
# toggle durline
first = indices[0]
logevent = self.groups[group][first]
try:
# remove triangle for this event
idx = map(itemgetter(0), self.durlines).index(logevent)
_, poly = self.durlines[idx]
poly.remove()
plt.gcf().canvas.draw()
del self.durlines[idx]
except ValueError:
# construct triangle and add to list of durlines
if self.args['optime_start']:
pts = [[date2num(logevent.datetime), 0],
[date2num(logevent.datetime), logevent.duration],
[date2num(logevent.datetime +
timedelta(milliseconds=logevent.duration)
), 0]]
else:
pts = [[date2num(logevent.datetime), 0],
[date2num(logevent.datetime), logevent.duration],
[date2num(logevent.datetime -
timedelta(milliseconds=logevent.duration)
), 0]]
poly = Polygon(pts, closed=True, alpha=0.2, linewidth=0,
facecolor=event.artist.get_markerfacecolor(),
edgecolor=None, zorder=-10000)
ax = plt.gca()
ax.add_patch(poly)
plt.gcf().canvas.draw()
self.durlines.append((logevent, poly)) | [
"def",
"clicked",
"(",
"self",
",",
"event",
")",
":",
"group",
"=",
"event",
".",
"artist",
".",
"_mt_group",
"indices",
"=",
"event",
".",
"ind",
"# double click only supported on 1.2 or later",
"major",
",",
"minor",
",",
"_",
"=",
"mpl_version",
".",
"sp... | Call if an element of this plottype is clicked.
Implement in sub class. | [
"Call",
"if",
"an",
"element",
"of",
"this",
"plottype",
"is",
"clicked",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/scatter_type.py#L88-L140 | train | 225,090 |
rueckstiess/mtools | mtools/mloginfo/mloginfo.py | MLogInfoTool.run | def run(self, arguments=None):
"""Print useful information about the log file."""
LogFileTool.run(self, arguments)
for i, self.logfile in enumerate(self.args['logfile']):
if i > 0:
print("\n ------------------------------------------\n")
if self.logfile.datetime_format == 'ctime-pre2.4':
# no milliseconds when datetime format doesn't support it
start_time = (self.logfile.start.strftime("%Y %b %d %H:%M:%S")
if self.logfile.start else "unknown")
end_time = (self.logfile.end.strftime("%Y %b %d %H:%M:%S")
if self.logfile.start else "unknown")
else:
# include milliseconds
start_time = (self.logfile.start.strftime("%Y %b %d "
"%H:%M:%S.%f")[:-3]
if self.logfile.start else "unknown")
end_time = (self.logfile.end.strftime("%Y %b %d "
"%H:%M:%S.%f")[:-3]
if self.logfile.start else "unknown")
print(" source: %s" % self.logfile.name)
print(" host: %s"
% (self.logfile.hostname + ':' + str(self.logfile.port)
if self.logfile.hostname else "unknown"))
print(" start: %s" % (start_time))
print(" end: %s" % (end_time))
# TODO: add timezone if iso8601 format
print("date format: %s" % self.logfile.datetime_format)
print(" length: %s" % len(self.logfile))
print(" binary: %s" % (self.logfile.binary or "unknown"))
version = (' -> '.join(self.logfile.versions) or "unknown")
# if version is unknown, go by date
if version == 'unknown':
if self.logfile.datetime_format == 'ctime-pre2.4':
version = '< 2.4 (no milliseconds)'
elif self.logfile.datetime_format == 'ctime':
version = '>= 2.4.x ctime (milliseconds present)'
elif (self.logfile.datetime_format == "iso8601-utc" or
self.logfile.datetime_format == "iso8601-local"):
if self.logfile.has_level:
version = '>= 3.0 (iso8601 format, level, component)'
else:
version = '= 2.6.x (iso8601 format)'
print(" version: %s" % version)
print(" storage: %s"
% (self.logfile.storage_engine or 'unknown'))
# now run all sections
for section in self.sections:
if section.active:
print("\n%s" % section.name.upper())
section.run() | python | def run(self, arguments=None):
"""Print useful information about the log file."""
LogFileTool.run(self, arguments)
for i, self.logfile in enumerate(self.args['logfile']):
if i > 0:
print("\n ------------------------------------------\n")
if self.logfile.datetime_format == 'ctime-pre2.4':
# no milliseconds when datetime format doesn't support it
start_time = (self.logfile.start.strftime("%Y %b %d %H:%M:%S")
if self.logfile.start else "unknown")
end_time = (self.logfile.end.strftime("%Y %b %d %H:%M:%S")
if self.logfile.start else "unknown")
else:
# include milliseconds
start_time = (self.logfile.start.strftime("%Y %b %d "
"%H:%M:%S.%f")[:-3]
if self.logfile.start else "unknown")
end_time = (self.logfile.end.strftime("%Y %b %d "
"%H:%M:%S.%f")[:-3]
if self.logfile.start else "unknown")
print(" source: %s" % self.logfile.name)
print(" host: %s"
% (self.logfile.hostname + ':' + str(self.logfile.port)
if self.logfile.hostname else "unknown"))
print(" start: %s" % (start_time))
print(" end: %s" % (end_time))
# TODO: add timezone if iso8601 format
print("date format: %s" % self.logfile.datetime_format)
print(" length: %s" % len(self.logfile))
print(" binary: %s" % (self.logfile.binary or "unknown"))
version = (' -> '.join(self.logfile.versions) or "unknown")
# if version is unknown, go by date
if version == 'unknown':
if self.logfile.datetime_format == 'ctime-pre2.4':
version = '< 2.4 (no milliseconds)'
elif self.logfile.datetime_format == 'ctime':
version = '>= 2.4.x ctime (milliseconds present)'
elif (self.logfile.datetime_format == "iso8601-utc" or
self.logfile.datetime_format == "iso8601-local"):
if self.logfile.has_level:
version = '>= 3.0 (iso8601 format, level, component)'
else:
version = '= 2.6.x (iso8601 format)'
print(" version: %s" % version)
print(" storage: %s"
% (self.logfile.storage_engine or 'unknown'))
# now run all sections
for section in self.sections:
if section.active:
print("\n%s" % section.name.upper())
section.run() | [
"def",
"run",
"(",
"self",
",",
"arguments",
"=",
"None",
")",
":",
"LogFileTool",
".",
"run",
"(",
"self",
",",
"arguments",
")",
"for",
"i",
",",
"self",
".",
"logfile",
"in",
"enumerate",
"(",
"self",
".",
"args",
"[",
"'logfile'",
"]",
")",
":"... | Print useful information about the log file. | [
"Print",
"useful",
"information",
"about",
"the",
"log",
"file",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mloginfo/mloginfo.py#L32-L90 | train | 225,091 |
rueckstiess/mtools | mtools/util/logfile.py | LogFile.filesize | def filesize(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._filesize:
self._calculate_bounds()
return self._filesize | python | def filesize(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._filesize:
self._calculate_bounds()
return self._filesize | [
"def",
"filesize",
"(",
"self",
")",
":",
"if",
"self",
".",
"from_stdin",
":",
"return",
"None",
"if",
"not",
"self",
".",
"_filesize",
":",
"self",
".",
"_calculate_bounds",
"(",
")",
"return",
"self",
".",
"_filesize"
] | Lazy evaluation of start and end of logfile.
Returns None for stdin input currently. | [
"Lazy",
"evaluation",
"of",
"start",
"and",
"end",
"of",
"logfile",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L84-L94 | train | 225,092 |
rueckstiess/mtools | mtools/util/logfile.py | LogFile.num_lines | def num_lines(self):
"""
Lazy evaluation of the number of lines.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._num_lines:
self._iterate_lines()
return self._num_lines | python | def num_lines(self):
"""
Lazy evaluation of the number of lines.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._num_lines:
self._iterate_lines()
return self._num_lines | [
"def",
"num_lines",
"(",
"self",
")",
":",
"if",
"self",
".",
"from_stdin",
":",
"return",
"None",
"if",
"not",
"self",
".",
"_num_lines",
":",
"self",
".",
"_iterate_lines",
"(",
")",
"return",
"self",
".",
"_num_lines"
] | Lazy evaluation of the number of lines.
Returns None for stdin input currently. | [
"Lazy",
"evaluation",
"of",
"the",
"number",
"of",
"lines",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L118-L128 | train | 225,093 |
rueckstiess/mtools | mtools/util/logfile.py | LogFile.versions | def versions(self):
"""Return all version changes."""
versions = []
for v, _ in self.restarts:
if len(versions) == 0 or v != versions[-1]:
versions.append(v)
return versions | python | def versions(self):
"""Return all version changes."""
versions = []
for v, _ in self.restarts:
if len(versions) == 0 or v != versions[-1]:
versions.append(v)
return versions | [
"def",
"versions",
"(",
"self",
")",
":",
"versions",
"=",
"[",
"]",
"for",
"v",
",",
"_",
"in",
"self",
".",
"restarts",
":",
"if",
"len",
"(",
"versions",
")",
"==",
"0",
"or",
"v",
"!=",
"versions",
"[",
"-",
"1",
"]",
":",
"versions",
".",
... | Return all version changes. | [
"Return",
"all",
"version",
"changes",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L166-L172 | train | 225,094 |
rueckstiess/mtools | mtools/util/logfile.py | LogFile.next | def next(self):
"""Get next line, adjust for year rollover and hint datetime format."""
# use readline here because next() iterator uses internal readahead
# buffer so seek position is wrong
line = self.filehandle.readline()
line = line.decode('utf-8', 'replace')
if line == '':
raise StopIteration
line = line.rstrip('\n')
le = LogEvent(line)
# hint format and nextpos from previous line
if self._datetime_format and self._datetime_nextpos is not None:
ret = le.set_datetime_hint(self._datetime_format,
self._datetime_nextpos,
self.year_rollover)
if not ret:
# logevent indicates timestamp format has changed,
# invalidate hint info
self._datetime_format = None
self._datetime_nextpos = None
elif le.datetime:
# gather new hint info from another logevent
self._datetime_format = le.datetime_format
self._datetime_nextpos = le._datetime_nextpos
return le | python | def next(self):
"""Get next line, adjust for year rollover and hint datetime format."""
# use readline here because next() iterator uses internal readahead
# buffer so seek position is wrong
line = self.filehandle.readline()
line = line.decode('utf-8', 'replace')
if line == '':
raise StopIteration
line = line.rstrip('\n')
le = LogEvent(line)
# hint format and nextpos from previous line
if self._datetime_format and self._datetime_nextpos is not None:
ret = le.set_datetime_hint(self._datetime_format,
self._datetime_nextpos,
self.year_rollover)
if not ret:
# logevent indicates timestamp format has changed,
# invalidate hint info
self._datetime_format = None
self._datetime_nextpos = None
elif le.datetime:
# gather new hint info from another logevent
self._datetime_format = le.datetime_format
self._datetime_nextpos = le._datetime_nextpos
return le | [
"def",
"next",
"(",
"self",
")",
":",
"# use readline here because next() iterator uses internal readahead",
"# buffer so seek position is wrong",
"line",
"=",
"self",
".",
"filehandle",
".",
"readline",
"(",
")",
"line",
"=",
"line",
".",
"decode",
"(",
"'utf-8'",
",... | Get next line, adjust for year rollover and hint datetime format. | [
"Get",
"next",
"line",
"adjust",
"for",
"year",
"rollover",
"and",
"hint",
"datetime",
"format",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L210-L236 | train | 225,095 |
rueckstiess/mtools | mtools/util/logfile.py | LogFile._calculate_bounds | def _calculate_bounds(self):
"""Calculate beginning and end of logfile."""
if self._bounds_calculated:
# Assume no need to recalc bounds for lifetime of a Logfile object
return
if self.from_stdin:
return False
# we should be able to find a valid log line within max_start_lines
max_start_lines = 10
lines_checked = 0
# get start datetime
for line in self.filehandle:
logevent = LogEvent(line)
lines_checked += 1
if logevent.datetime:
self._start = logevent.datetime
self._timezone = logevent.datetime.tzinfo
self._datetime_format = logevent.datetime_format
self._datetime_nextpos = logevent._datetime_nextpos
break
if lines_checked > max_start_lines:
break
# sanity check before attempting to find end date
if (self._start is None):
raise SystemExit("Error: <%s> does not appear to be a supported "
"MongoDB log file format" % self.filehandle.name)
# get end datetime (lines are at most 10k,
# go back 30k at most to make sure we catch one)
self.filehandle.seek(0, 2)
self._filesize = self.filehandle.tell()
self.filehandle.seek(-min(self._filesize, 30000), 2)
for line in reversed(self.filehandle.readlines()):
logevent = LogEvent(line)
if logevent.datetime:
self._end = logevent.datetime
break
# if there was a roll-over, subtract 1 year from start time
if self._end < self._start:
self._start = self._start.replace(year=self._start.year - 1)
self._year_rollover = self._end
else:
self._year_rollover = False
# reset logfile
self.filehandle.seek(0)
self._bounds_calculated = True
return True | python | def _calculate_bounds(self):
"""Calculate beginning and end of logfile."""
if self._bounds_calculated:
# Assume no need to recalc bounds for lifetime of a Logfile object
return
if self.from_stdin:
return False
# we should be able to find a valid log line within max_start_lines
max_start_lines = 10
lines_checked = 0
# get start datetime
for line in self.filehandle:
logevent = LogEvent(line)
lines_checked += 1
if logevent.datetime:
self._start = logevent.datetime
self._timezone = logevent.datetime.tzinfo
self._datetime_format = logevent.datetime_format
self._datetime_nextpos = logevent._datetime_nextpos
break
if lines_checked > max_start_lines:
break
# sanity check before attempting to find end date
if (self._start is None):
raise SystemExit("Error: <%s> does not appear to be a supported "
"MongoDB log file format" % self.filehandle.name)
# get end datetime (lines are at most 10k,
# go back 30k at most to make sure we catch one)
self.filehandle.seek(0, 2)
self._filesize = self.filehandle.tell()
self.filehandle.seek(-min(self._filesize, 30000), 2)
for line in reversed(self.filehandle.readlines()):
logevent = LogEvent(line)
if logevent.datetime:
self._end = logevent.datetime
break
# if there was a roll-over, subtract 1 year from start time
if self._end < self._start:
self._start = self._start.replace(year=self._start.year - 1)
self._year_rollover = self._end
else:
self._year_rollover = False
# reset logfile
self.filehandle.seek(0)
self._bounds_calculated = True
return True | [
"def",
"_calculate_bounds",
"(",
"self",
")",
":",
"if",
"self",
".",
"_bounds_calculated",
":",
"# Assume no need to recalc bounds for lifetime of a Logfile object",
"return",
"if",
"self",
".",
"from_stdin",
":",
"return",
"False",
"# we should be able to find a valid log l... | Calculate beginning and end of logfile. | [
"Calculate",
"beginning",
"and",
"end",
"of",
"logfile",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L407-L461 | train | 225,096 |
rueckstiess/mtools | mtools/util/logfile.py | LogFile._find_curr_line | def _find_curr_line(self, prev=False):
"""
Internal helper function.
Find the current (or previous if prev=True) line in a log file based on
the current seek position.
"""
curr_pos = self.filehandle.tell()
# jump back 15k characters (at most) and find last newline char
jump_back = min(self.filehandle.tell(), 15000)
self.filehandle.seek(-jump_back, 1)
buff = self.filehandle.read(jump_back)
self.filehandle.seek(curr_pos, 0)
if prev and self.prev_pos is not None and self.prev_pos == curr_pos:
# Number of characters to show before/after the log offset
error_context = 300
self.filehandle.seek(-error_context, 1)
buff = self.filehandle.read(curr_pos)
hr = "-" * 60
print("Fatal log parsing loop detected trying to find previous "
"log line near offset %s in %s:\n\n%s\n%s\n"
"<--- (current log parsing offset) \n%s\n%s\n"
% (curr_pos, self.name, hr, buff[:error_context],
buff[error_context:error_context + 1], hr),
file=sys.stderr)
raise SystemExit("Cannot parse %s with requested options"
% self.filehandle.name)
else:
self.prev_pos = curr_pos
buff = buff.decode("utf-8", "replace")
newline_pos = buff.rfind('\n')
if prev:
newline_pos = buff[:newline_pos].rfind('\n')
# move back to last newline char
if newline_pos == -1:
self.filehandle.seek(0)
return self.next()
self.filehandle.seek(newline_pos - jump_back + 1, 1)
# roll forward until we found a line with a datetime
try:
logevent = self.next()
while not logevent.datetime:
logevent = self.next()
return logevent
except StopIteration:
# reached end of file
return None | python | def _find_curr_line(self, prev=False):
"""
Internal helper function.
Find the current (or previous if prev=True) line in a log file based on
the current seek position.
"""
curr_pos = self.filehandle.tell()
# jump back 15k characters (at most) and find last newline char
jump_back = min(self.filehandle.tell(), 15000)
self.filehandle.seek(-jump_back, 1)
buff = self.filehandle.read(jump_back)
self.filehandle.seek(curr_pos, 0)
if prev and self.prev_pos is not None and self.prev_pos == curr_pos:
# Number of characters to show before/after the log offset
error_context = 300
self.filehandle.seek(-error_context, 1)
buff = self.filehandle.read(curr_pos)
hr = "-" * 60
print("Fatal log parsing loop detected trying to find previous "
"log line near offset %s in %s:\n\n%s\n%s\n"
"<--- (current log parsing offset) \n%s\n%s\n"
% (curr_pos, self.name, hr, buff[:error_context],
buff[error_context:error_context + 1], hr),
file=sys.stderr)
raise SystemExit("Cannot parse %s with requested options"
% self.filehandle.name)
else:
self.prev_pos = curr_pos
buff = buff.decode("utf-8", "replace")
newline_pos = buff.rfind('\n')
if prev:
newline_pos = buff[:newline_pos].rfind('\n')
# move back to last newline char
if newline_pos == -1:
self.filehandle.seek(0)
return self.next()
self.filehandle.seek(newline_pos - jump_back + 1, 1)
# roll forward until we found a line with a datetime
try:
logevent = self.next()
while not logevent.datetime:
logevent = self.next()
return logevent
except StopIteration:
# reached end of file
return None | [
"def",
"_find_curr_line",
"(",
"self",
",",
"prev",
"=",
"False",
")",
":",
"curr_pos",
"=",
"self",
".",
"filehandle",
".",
"tell",
"(",
")",
"# jump back 15k characters (at most) and find last newline char",
"jump_back",
"=",
"min",
"(",
"self",
".",
"filehandle... | Internal helper function.
Find the current (or previous if prev=True) line in a log file based on
the current seek position. | [
"Internal",
"helper",
"function",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L463-L515 | train | 225,097 |
rueckstiess/mtools | mtools/util/logfile.py | LogFile.fast_forward | def fast_forward(self, start_dt):
"""
Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line).
"""
if self.from_stdin:
# skip lines until start_dt is reached
return
else:
# fast bisection path
max_mark = self.filesize
step_size = max_mark
# check if start_dt is already smaller than first datetime
self.filehandle.seek(0)
le = self.next()
if le.datetime and le.datetime >= start_dt:
self.filehandle.seek(0)
return
le = None
self.filehandle.seek(0)
# search for lower bound
while abs(step_size) > 100:
step_size = ceil(step_size / 2.)
self.filehandle.seek(step_size, 1)
le = self._find_curr_line()
if not le:
break
if le.datetime >= start_dt:
step_size = -abs(step_size)
else:
step_size = abs(step_size)
if not le:
return
# now walk backwards until we found a truly smaller line
while self.filehandle.tell() >= 2 and (le.datetime is None or
le.datetime >= start_dt):
self.filehandle.seek(-2, 1)
le = self._find_curr_line(prev=True) | python | def fast_forward(self, start_dt):
"""
Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line).
"""
if self.from_stdin:
# skip lines until start_dt is reached
return
else:
# fast bisection path
max_mark = self.filesize
step_size = max_mark
# check if start_dt is already smaller than first datetime
self.filehandle.seek(0)
le = self.next()
if le.datetime and le.datetime >= start_dt:
self.filehandle.seek(0)
return
le = None
self.filehandle.seek(0)
# search for lower bound
while abs(step_size) > 100:
step_size = ceil(step_size / 2.)
self.filehandle.seek(step_size, 1)
le = self._find_curr_line()
if not le:
break
if le.datetime >= start_dt:
step_size = -abs(step_size)
else:
step_size = abs(step_size)
if not le:
return
# now walk backwards until we found a truly smaller line
while self.filehandle.tell() >= 2 and (le.datetime is None or
le.datetime >= start_dt):
self.filehandle.seek(-2, 1)
le = self._find_curr_line(prev=True) | [
"def",
"fast_forward",
"(",
"self",
",",
"start_dt",
")",
":",
"if",
"self",
".",
"from_stdin",
":",
"# skip lines until start_dt is reached",
"return",
"else",
":",
"# fast bisection path",
"max_mark",
"=",
"self",
".",
"filesize",
"step_size",
"=",
"max_mark",
"... | Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line). | [
"Fast",
"-",
"forward",
"file",
"to",
"given",
"start_dt",
"datetime",
"obj",
"using",
"binary",
"search",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L517-L566 | train | 225,098 |
rueckstiess/mtools | mtools/mlogfilter/filters/datetime_filter.py | DateTimeFilter.setup | def setup(self):
"""Get start end end date of logfile before starting to parse."""
if self.mlogfilter.is_stdin:
# assume this year (we have no other info)
now = datetime.now()
self.startDateTime = datetime(now.year, 1, 1, tzinfo=tzutc())
self.endDateTime = datetime(MAXYEAR, 12, 31, tzinfo=tzutc())
else:
logfiles = self.mlogfilter.args['logfile']
self.startDateTime = min([lf.start +
timedelta(hours=self
.mlogfilter
.args['timezone'][i])
for i, lf in enumerate(logfiles)])
self.endDateTime = max([lf.end +
timedelta(hours=self
.mlogfilter.args['timezone'][i])
for i, lf in enumerate(logfiles)])
# now parse for further changes to from and to datetimes
dtbound = DateTimeBoundaries(self.startDateTime, self.endDateTime)
self.fromDateTime, self.toDateTime = dtbound(self.mlogfilter
.args['from'] or None,
self.mlogfilter
.args['to'] or None)
# define start_limit for mlogfilter's fast_forward method
self.start_limit = self.fromDateTime
# for single logfile, get file seek position of `to` datetime
if (len(self.mlogfilter.args['logfile']) == 1 and not
self.mlogfilter.is_stdin):
if self.mlogfilter.args['to'] != "end":
# fast forward, get seek value, then reset file
logfile = self.mlogfilter.args['logfile'][0]
logfile.fast_forward(self.toDateTime)
self.seek_to = logfile.filehandle.tell()
logfile.filehandle.seek(0)
else:
self.seek_to = -1
else:
self.seek_to = False | python | def setup(self):
"""Get start end end date of logfile before starting to parse."""
if self.mlogfilter.is_stdin:
# assume this year (we have no other info)
now = datetime.now()
self.startDateTime = datetime(now.year, 1, 1, tzinfo=tzutc())
self.endDateTime = datetime(MAXYEAR, 12, 31, tzinfo=tzutc())
else:
logfiles = self.mlogfilter.args['logfile']
self.startDateTime = min([lf.start +
timedelta(hours=self
.mlogfilter
.args['timezone'][i])
for i, lf in enumerate(logfiles)])
self.endDateTime = max([lf.end +
timedelta(hours=self
.mlogfilter.args['timezone'][i])
for i, lf in enumerate(logfiles)])
# now parse for further changes to from and to datetimes
dtbound = DateTimeBoundaries(self.startDateTime, self.endDateTime)
self.fromDateTime, self.toDateTime = dtbound(self.mlogfilter
.args['from'] or None,
self.mlogfilter
.args['to'] or None)
# define start_limit for mlogfilter's fast_forward method
self.start_limit = self.fromDateTime
# for single logfile, get file seek position of `to` datetime
if (len(self.mlogfilter.args['logfile']) == 1 and not
self.mlogfilter.is_stdin):
if self.mlogfilter.args['to'] != "end":
# fast forward, get seek value, then reset file
logfile = self.mlogfilter.args['logfile'][0]
logfile.fast_forward(self.toDateTime)
self.seek_to = logfile.filehandle.tell()
logfile.filehandle.seek(0)
else:
self.seek_to = -1
else:
self.seek_to = False | [
"def",
"setup",
"(",
"self",
")",
":",
"if",
"self",
".",
"mlogfilter",
".",
"is_stdin",
":",
"# assume this year (we have no other info)",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"startDateTime",
"=",
"datetime",
"(",
"now",
".",
"year",
... | Get start end end date of logfile before starting to parse. | [
"Get",
"start",
"end",
"end",
"date",
"of",
"logfile",
"before",
"starting",
"to",
"parse",
"."
] | a6a22910c3569c0c8a3908660ca218a4557e4249 | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/filters/datetime_filter.py#L108-L151 | train | 225,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.