id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
244,000
ethereum/py-evm
eth/chains/base.py
BaseChain.validate_chain
def validate_chain( cls, root: BlockHeader, descendants: Tuple[BlockHeader, ...], seal_check_random_sample_rate: int = 1) -> None: """ Validate that all of the descendents are valid, given that the root header is valid. By default, check the seal validity (Proof-of-Work on Ethereum 1.x mainnet) of all headers. This can be expensive. Instead, check a random sample of seals using seal_check_random_sample_rate. """ all_indices = range(len(descendants)) if seal_check_random_sample_rate == 1: indices_to_check_seal = set(all_indices) else: sample_size = len(all_indices) // seal_check_random_sample_rate indices_to_check_seal = set(random.sample(all_indices, sample_size)) header_pairs = sliding_window(2, concatv([root], descendants)) for index, (parent, child) in enumerate(header_pairs): if child.parent_hash != parent.hash: raise ValidationError( "Invalid header chain; {} has parent {}, but expected {}".format( child, child.parent_hash, parent.hash)) should_check_seal = index in indices_to_check_seal vm_class = cls.get_vm_class_for_block_number(child.block_number) try: vm_class.validate_header(child, parent, check_seal=should_check_seal) except ValidationError as exc: raise ValidationError( "%s is not a valid child of %s: %s" % ( child, parent, exc, ) ) from exc
python
def validate_chain( cls, root: BlockHeader, descendants: Tuple[BlockHeader, ...], seal_check_random_sample_rate: int = 1) -> None: all_indices = range(len(descendants)) if seal_check_random_sample_rate == 1: indices_to_check_seal = set(all_indices) else: sample_size = len(all_indices) // seal_check_random_sample_rate indices_to_check_seal = set(random.sample(all_indices, sample_size)) header_pairs = sliding_window(2, concatv([root], descendants)) for index, (parent, child) in enumerate(header_pairs): if child.parent_hash != parent.hash: raise ValidationError( "Invalid header chain; {} has parent {}, but expected {}".format( child, child.parent_hash, parent.hash)) should_check_seal = index in indices_to_check_seal vm_class = cls.get_vm_class_for_block_number(child.block_number) try: vm_class.validate_header(child, parent, check_seal=should_check_seal) except ValidationError as exc: raise ValidationError( "%s is not a valid child of %s: %s" % ( child, parent, exc, ) ) from exc
[ "def", "validate_chain", "(", "cls", ",", "root", ":", "BlockHeader", ",", "descendants", ":", "Tuple", "[", "BlockHeader", ",", "...", "]", ",", "seal_check_random_sample_rate", ":", "int", "=", "1", ")", "->", "None", ":", "all_indices", "=", "range", "(...
Validate that all of the descendents are valid, given that the root header is valid. By default, check the seal validity (Proof-of-Work on Ethereum 1.x mainnet) of all headers. This can be expensive. Instead, check a random sample of seals using seal_check_random_sample_rate.
[ "Validate", "that", "all", "of", "the", "descendents", "are", "valid", "given", "that", "the", "root", "header", "is", "valid", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L326-L364
244,001
ethereum/py-evm
eth/chains/base.py
Chain.from_genesis
def from_genesis(cls, base_db: BaseAtomicDB, genesis_params: Dict[str, HeaderParams], genesis_state: AccountState=None) -> 'BaseChain': """ Initializes the Chain from a genesis state. """ genesis_vm_class = cls.get_vm_class_for_block_number(BlockNumber(0)) pre_genesis_header = BlockHeader(difficulty=0, block_number=-1, gas_limit=0) state = genesis_vm_class.build_state(base_db, pre_genesis_header) if genesis_state is None: genesis_state = {} # mutation apply_state_dict(state, genesis_state) state.persist() if 'state_root' not in genesis_params: # If the genesis state_root was not specified, use the value # computed from the initialized state database. genesis_params = assoc(genesis_params, 'state_root', state.state_root) elif genesis_params['state_root'] != state.state_root: # If the genesis state_root was specified, validate that it matches # the computed state from the initialized state database. raise ValidationError( "The provided genesis state root does not match the computed " "genesis state root. Got {0}. Expected {1}".format( state.state_root, genesis_params['state_root'], ) ) genesis_header = BlockHeader(**genesis_params) return cls.from_genesis_header(base_db, genesis_header)
python
def from_genesis(cls, base_db: BaseAtomicDB, genesis_params: Dict[str, HeaderParams], genesis_state: AccountState=None) -> 'BaseChain': genesis_vm_class = cls.get_vm_class_for_block_number(BlockNumber(0)) pre_genesis_header = BlockHeader(difficulty=0, block_number=-1, gas_limit=0) state = genesis_vm_class.build_state(base_db, pre_genesis_header) if genesis_state is None: genesis_state = {} # mutation apply_state_dict(state, genesis_state) state.persist() if 'state_root' not in genesis_params: # If the genesis state_root was not specified, use the value # computed from the initialized state database. genesis_params = assoc(genesis_params, 'state_root', state.state_root) elif genesis_params['state_root'] != state.state_root: # If the genesis state_root was specified, validate that it matches # the computed state from the initialized state database. raise ValidationError( "The provided genesis state root does not match the computed " "genesis state root. Got {0}. Expected {1}".format( state.state_root, genesis_params['state_root'], ) ) genesis_header = BlockHeader(**genesis_params) return cls.from_genesis_header(base_db, genesis_header)
[ "def", "from_genesis", "(", "cls", ",", "base_db", ":", "BaseAtomicDB", ",", "genesis_params", ":", "Dict", "[", "str", ",", "HeaderParams", "]", ",", "genesis_state", ":", "AccountState", "=", "None", ")", "->", "'BaseChain'", ":", "genesis_vm_class", "=", ...
Initializes the Chain from a genesis state.
[ "Initializes", "the", "Chain", "from", "a", "genesis", "state", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L405-L440
244,002
ethereum/py-evm
eth/chains/base.py
Chain.get_vm
def get_vm(self, at_header: BlockHeader=None) -> 'BaseVM': """ Returns the VM instance for the given block number. """ header = self.ensure_header(at_header) vm_class = self.get_vm_class_for_block_number(header.block_number) return vm_class(header=header, chaindb=self.chaindb)
python
def get_vm(self, at_header: BlockHeader=None) -> 'BaseVM': header = self.ensure_header(at_header) vm_class = self.get_vm_class_for_block_number(header.block_number) return vm_class(header=header, chaindb=self.chaindb)
[ "def", "get_vm", "(", "self", ",", "at_header", ":", "BlockHeader", "=", "None", ")", "->", "'BaseVM'", ":", "header", "=", "self", ".", "ensure_header", "(", "at_header", ")", "vm_class", "=", "self", ".", "get_vm_class_for_block_number", "(", "header", "."...
Returns the VM instance for the given block number.
[ "Returns", "the", "VM", "instance", "for", "the", "given", "block", "number", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L456-L462
244,003
ethereum/py-evm
eth/chains/base.py
Chain.create_header_from_parent
def create_header_from_parent(self, parent_header: BlockHeader, **header_params: HeaderParams) -> BlockHeader: """ Passthrough helper to the VM class of the block descending from the given header. """ return self.get_vm_class_for_block_number( block_number=parent_header.block_number + 1, ).create_header_from_parent(parent_header, **header_params)
python
def create_header_from_parent(self, parent_header: BlockHeader, **header_params: HeaderParams) -> BlockHeader: return self.get_vm_class_for_block_number( block_number=parent_header.block_number + 1, ).create_header_from_parent(parent_header, **header_params)
[ "def", "create_header_from_parent", "(", "self", ",", "parent_header", ":", "BlockHeader", ",", "*", "*", "header_params", ":", "HeaderParams", ")", "->", "BlockHeader", ":", "return", "self", ".", "get_vm_class_for_block_number", "(", "block_number", "=", "parent_h...
Passthrough helper to the VM class of the block descending from the given header.
[ "Passthrough", "helper", "to", "the", "VM", "class", "of", "the", "block", "descending", "from", "the", "given", "header", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L467-L476
244,004
ethereum/py-evm
eth/chains/base.py
Chain.ensure_header
def ensure_header(self, header: BlockHeader=None) -> BlockHeader: """ Return ``header`` if it is not ``None``, otherwise return the header of the canonical head. """ if header is None: head = self.get_canonical_head() return self.create_header_from_parent(head) else: return header
python
def ensure_header(self, header: BlockHeader=None) -> BlockHeader: if header is None: head = self.get_canonical_head() return self.create_header_from_parent(head) else: return header
[ "def", "ensure_header", "(", "self", ",", "header", ":", "BlockHeader", "=", "None", ")", "->", "BlockHeader", ":", "if", "header", "is", "None", ":", "head", "=", "self", ".", "get_canonical_head", "(", ")", "return", "self", ".", "create_header_from_parent...
Return ``header`` if it is not ``None``, otherwise return the header of the canonical head.
[ "Return", "header", "if", "it", "is", "not", "None", "otherwise", "return", "the", "header", "of", "the", "canonical", "head", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L503-L512
244,005
ethereum/py-evm
eth/chains/base.py
Chain.get_ancestors
def get_ancestors(self, limit: int, header: BlockHeader) -> Tuple[BaseBlock, ...]: """ Return `limit` number of ancestor blocks from the current canonical head. """ ancestor_count = min(header.block_number, limit) # We construct a temporary block object vm_class = self.get_vm_class_for_block_number(header.block_number) block_class = vm_class.get_block_class() block = block_class(header=header, uncles=[]) ancestor_generator = iterate(compose( self.get_block_by_hash, operator.attrgetter('parent_hash'), operator.attrgetter('header'), ), block) # we peel off the first element from the iterator which will be the # temporary block object we constructed. next(ancestor_generator) return tuple(take(ancestor_count, ancestor_generator))
python
def get_ancestors(self, limit: int, header: BlockHeader) -> Tuple[BaseBlock, ...]: ancestor_count = min(header.block_number, limit) # We construct a temporary block object vm_class = self.get_vm_class_for_block_number(header.block_number) block_class = vm_class.get_block_class() block = block_class(header=header, uncles=[]) ancestor_generator = iterate(compose( self.get_block_by_hash, operator.attrgetter('parent_hash'), operator.attrgetter('header'), ), block) # we peel off the first element from the iterator which will be the # temporary block object we constructed. next(ancestor_generator) return tuple(take(ancestor_count, ancestor_generator))
[ "def", "get_ancestors", "(", "self", ",", "limit", ":", "int", ",", "header", ":", "BlockHeader", ")", "->", "Tuple", "[", "BaseBlock", ",", "...", "]", ":", "ancestor_count", "=", "min", "(", "header", ".", "block_number", ",", "limit", ")", "# We const...
Return `limit` number of ancestor blocks from the current canonical head.
[ "Return", "limit", "number", "of", "ancestor", "blocks", "from", "the", "current", "canonical", "head", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L517-L537
244,006
ethereum/py-evm
eth/chains/base.py
Chain.get_block_by_hash
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock: """ Returns the requested block as specified by block hash. """ validate_word(block_hash, title="Block Hash") block_header = self.get_block_header_by_hash(block_hash) return self.get_block_by_header(block_header)
python
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock: validate_word(block_hash, title="Block Hash") block_header = self.get_block_header_by_hash(block_hash) return self.get_block_by_header(block_header)
[ "def", "get_block_by_hash", "(", "self", ",", "block_hash", ":", "Hash32", ")", "->", "BaseBlock", ":", "validate_word", "(", "block_hash", ",", "title", "=", "\"Block Hash\"", ")", "block_header", "=", "self", ".", "get_block_header_by_hash", "(", "block_hash", ...
Returns the requested block as specified by block hash.
[ "Returns", "the", "requested", "block", "as", "specified", "by", "block", "hash", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L545-L551
244,007
ethereum/py-evm
eth/chains/base.py
Chain.get_block_by_header
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock: """ Returns the requested block as specified by the block header. """ vm = self.get_vm(block_header) return vm.block
python
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock: vm = self.get_vm(block_header) return vm.block
[ "def", "get_block_by_header", "(", "self", ",", "block_header", ":", "BlockHeader", ")", "->", "BaseBlock", ":", "vm", "=", "self", ".", "get_vm", "(", "block_header", ")", "return", "vm", ".", "block" ]
Returns the requested block as specified by the block header.
[ "Returns", "the", "requested", "block", "as", "specified", "by", "the", "block", "header", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L553-L558
244,008
ethereum/py-evm
eth/chains/base.py
Chain.get_canonical_block_by_number
def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock: """ Returns the block with the given number in the canonical chain. Raises BlockNotFound if there's no block with the given number in the canonical chain. """ validate_uint256(block_number, title="Block Number") return self.get_block_by_hash(self.chaindb.get_canonical_block_hash(block_number))
python
def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock: validate_uint256(block_number, title="Block Number") return self.get_block_by_hash(self.chaindb.get_canonical_block_hash(block_number))
[ "def", "get_canonical_block_by_number", "(", "self", ",", "block_number", ":", "BlockNumber", ")", "->", "BaseBlock", ":", "validate_uint256", "(", "block_number", ",", "title", "=", "\"Block Number\"", ")", "return", "self", ".", "get_block_by_hash", "(", "self", ...
Returns the block with the given number in the canonical chain. Raises BlockNotFound if there's no block with the given number in the canonical chain.
[ "Returns", "the", "block", "with", "the", "given", "number", "in", "the", "canonical", "chain", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L560-L568
244,009
ethereum/py-evm
eth/chains/base.py
Chain.get_canonical_transaction
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction: """ Returns the requested transaction as specified by the transaction hash from the canonical chain. Raises TransactionNotFound if no transaction with the specified hash is found in the main chain. """ (block_num, index) = self.chaindb.get_transaction_index(transaction_hash) VM_class = self.get_vm_class_for_block_number(block_num) transaction = self.chaindb.get_transaction_by_index( block_num, index, VM_class.get_transaction_class(), ) if transaction.hash == transaction_hash: return transaction else: raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format( encode_hex(transaction.hash), encode_hex(transaction_hash), block_num, index, ))
python
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction: (block_num, index) = self.chaindb.get_transaction_index(transaction_hash) VM_class = self.get_vm_class_for_block_number(block_num) transaction = self.chaindb.get_transaction_by_index( block_num, index, VM_class.get_transaction_class(), ) if transaction.hash == transaction_hash: return transaction else: raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format( encode_hex(transaction.hash), encode_hex(transaction_hash), block_num, index, ))
[ "def", "get_canonical_transaction", "(", "self", ",", "transaction_hash", ":", "Hash32", ")", "->", "BaseTransaction", ":", "(", "block_num", ",", "index", ")", "=", "self", ".", "chaindb", ".", "get_transaction_index", "(", "transaction_hash", ")", "VM_class", ...
Returns the requested transaction as specified by the transaction hash from the canonical chain. Raises TransactionNotFound if no transaction with the specified hash is found in the main chain.
[ "Returns", "the", "requested", "transaction", "as", "specified", "by", "the", "transaction", "hash", "from", "the", "canonical", "chain", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L604-L629
244,010
ethereum/py-evm
eth/chains/base.py
Chain.estimate_gas
def estimate_gas( self, transaction: BaseOrSpoofTransaction, at_header: BlockHeader=None) -> int: """ Returns an estimation of the amount of gas the given transaction will use if executed on top of the block specified by the given header. """ if at_header is None: at_header = self.get_canonical_head() with self.get_vm(at_header).state_in_temp_block() as state: return self.gas_estimator(state, transaction)
python
def estimate_gas( self, transaction: BaseOrSpoofTransaction, at_header: BlockHeader=None) -> int: if at_header is None: at_header = self.get_canonical_head() with self.get_vm(at_header).state_in_temp_block() as state: return self.gas_estimator(state, transaction)
[ "def", "estimate_gas", "(", "self", ",", "transaction", ":", "BaseOrSpoofTransaction", ",", "at_header", ":", "BlockHeader", "=", "None", ")", "->", "int", ":", "if", "at_header", "is", "None", ":", "at_header", "=", "self", ".", "get_canonical_head", "(", "...
Returns an estimation of the amount of gas the given transaction will use if executed on top of the block specified by the given header.
[ "Returns", "an", "estimation", "of", "the", "amount", "of", "gas", "the", "given", "transaction", "will", "use", "if", "executed", "on", "top", "of", "the", "block", "specified", "by", "the", "given", "header", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L685-L696
244,011
ethereum/py-evm
eth/chains/base.py
Chain.import_block
def import_block(self, block: BaseBlock, perform_validation: bool=True ) -> Tuple[BaseBlock, Tuple[BaseBlock, ...], Tuple[BaseBlock, ...]]: """ Imports a complete block and returns a 3-tuple - the imported block - a tuple of blocks which are now part of the canonical chain. - a tuple of blocks which were canonical and now are no longer canonical. """ try: parent_header = self.get_block_header_by_hash(block.header.parent_hash) except HeaderNotFound: raise ValidationError( "Attempt to import block #{}. Cannot import block {} before importing " "its parent block at {}".format( block.number, block.hash, block.header.parent_hash, ) ) base_header_for_import = self.create_header_from_parent(parent_header) imported_block = self.get_vm(base_header_for_import).import_block(block) # Validate the imported block. if perform_validation: validate_imported_block_unchanged(imported_block, block) self.validate_block(imported_block) ( new_canonical_hashes, old_canonical_hashes, ) = self.chaindb.persist_block(imported_block) self.logger.debug( 'IMPORTED_BLOCK: number %s | hash %s', imported_block.number, encode_hex(imported_block.hash), ) new_canonical_blocks = tuple( self.get_block_by_hash(header_hash) for header_hash in new_canonical_hashes ) old_canonical_blocks = tuple( self.get_block_by_hash(header_hash) for header_hash in old_canonical_hashes ) return imported_block, new_canonical_blocks, old_canonical_blocks
python
def import_block(self, block: BaseBlock, perform_validation: bool=True ) -> Tuple[BaseBlock, Tuple[BaseBlock, ...], Tuple[BaseBlock, ...]]: try: parent_header = self.get_block_header_by_hash(block.header.parent_hash) except HeaderNotFound: raise ValidationError( "Attempt to import block #{}. Cannot import block {} before importing " "its parent block at {}".format( block.number, block.hash, block.header.parent_hash, ) ) base_header_for_import = self.create_header_from_parent(parent_header) imported_block = self.get_vm(base_header_for_import).import_block(block) # Validate the imported block. if perform_validation: validate_imported_block_unchanged(imported_block, block) self.validate_block(imported_block) ( new_canonical_hashes, old_canonical_hashes, ) = self.chaindb.persist_block(imported_block) self.logger.debug( 'IMPORTED_BLOCK: number %s | hash %s', imported_block.number, encode_hex(imported_block.hash), ) new_canonical_blocks = tuple( self.get_block_by_hash(header_hash) for header_hash in new_canonical_hashes ) old_canonical_blocks = tuple( self.get_block_by_hash(header_hash) for header_hash in old_canonical_hashes ) return imported_block, new_canonical_blocks, old_canonical_blocks
[ "def", "import_block", "(", "self", ",", "block", ":", "BaseBlock", ",", "perform_validation", ":", "bool", "=", "True", ")", "->", "Tuple", "[", "BaseBlock", ",", "Tuple", "[", "BaseBlock", ",", "...", "]", ",", "Tuple", "[", "BaseBlock", ",", "...", ...
Imports a complete block and returns a 3-tuple - the imported block - a tuple of blocks which are now part of the canonical chain. - a tuple of blocks which were canonical and now are no longer canonical.
[ "Imports", "a", "complete", "block", "and", "returns", "a", "3", "-", "tuple" ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L698-L752
244,012
ethereum/py-evm
eth/chains/base.py
Chain.validate_block
def validate_block(self, block: BaseBlock) -> None: """ Performs validation on a block that is either being mined or imported. Since block validation (specifically the uncle validation) must have access to the ancestor blocks, this validation must occur at the Chain level. Cannot be used to validate genesis block. """ if block.is_genesis: raise ValidationError("Cannot validate genesis block this way") VM_class = self.get_vm_class_for_block_number(BlockNumber(block.number)) parent_block = self.get_block_by_hash(block.header.parent_hash) VM_class.validate_header(block.header, parent_block.header, check_seal=True) self.validate_uncles(block) self.validate_gaslimit(block.header)
python
def validate_block(self, block: BaseBlock) -> None: if block.is_genesis: raise ValidationError("Cannot validate genesis block this way") VM_class = self.get_vm_class_for_block_number(BlockNumber(block.number)) parent_block = self.get_block_by_hash(block.header.parent_hash) VM_class.validate_header(block.header, parent_block.header, check_seal=True) self.validate_uncles(block) self.validate_gaslimit(block.header)
[ "def", "validate_block", "(", "self", ",", "block", ":", "BaseBlock", ")", "->", "None", ":", "if", "block", ".", "is_genesis", ":", "raise", "ValidationError", "(", "\"Cannot validate genesis block this way\"", ")", "VM_class", "=", "self", ".", "get_vm_class_for...
Performs validation on a block that is either being mined or imported. Since block validation (specifically the uncle validation) must have access to the ancestor blocks, this validation must occur at the Chain level. Cannot be used to validate genesis block.
[ "Performs", "validation", "on", "a", "block", "that", "is", "either", "being", "mined", "or", "imported", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L761-L777
244,013
ethereum/py-evm
eth/chains/base.py
Chain.validate_gaslimit
def validate_gaslimit(self, header: BlockHeader) -> None: """ Validate the gas limit on the given header. """ parent_header = self.get_block_header_by_hash(header.parent_hash) low_bound, high_bound = compute_gas_limit_bounds(parent_header) if header.gas_limit < low_bound: raise ValidationError( "The gas limit on block {0} is too low: {1}. It must be at least {2}".format( encode_hex(header.hash), header.gas_limit, low_bound)) elif header.gas_limit > high_bound: raise ValidationError( "The gas limit on block {0} is too high: {1}. It must be at most {2}".format( encode_hex(header.hash), header.gas_limit, high_bound))
python
def validate_gaslimit(self, header: BlockHeader) -> None: parent_header = self.get_block_header_by_hash(header.parent_hash) low_bound, high_bound = compute_gas_limit_bounds(parent_header) if header.gas_limit < low_bound: raise ValidationError( "The gas limit on block {0} is too low: {1}. It must be at least {2}".format( encode_hex(header.hash), header.gas_limit, low_bound)) elif header.gas_limit > high_bound: raise ValidationError( "The gas limit on block {0} is too high: {1}. It must be at most {2}".format( encode_hex(header.hash), header.gas_limit, high_bound))
[ "def", "validate_gaslimit", "(", "self", ",", "header", ":", "BlockHeader", ")", "->", "None", ":", "parent_header", "=", "self", ".", "get_block_header_by_hash", "(", "header", ".", "parent_hash", ")", "low_bound", ",", "high_bound", "=", "compute_gas_limit_bound...
Validate the gas limit on the given header.
[ "Validate", "the", "gas", "limit", "on", "the", "given", "header", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L786-L799
244,014
ethereum/py-evm
eth/chains/base.py
Chain.validate_uncles
def validate_uncles(self, block: BaseBlock) -> None: """ Validate the uncles for the given block. """ has_uncles = len(block.uncles) > 0 should_have_uncles = block.header.uncles_hash != EMPTY_UNCLE_HASH if not has_uncles and not should_have_uncles: # optimization to avoid loading ancestors from DB, since the block has no uncles return elif has_uncles and not should_have_uncles: raise ValidationError("Block has uncles but header suggests uncles should be empty") elif should_have_uncles and not has_uncles: raise ValidationError("Header suggests block should have uncles but block has none") # Check for duplicates uncle_groups = groupby(operator.attrgetter('hash'), block.uncles) duplicate_uncles = tuple(sorted( hash for hash, twins in uncle_groups.items() if len(twins) > 1 )) if duplicate_uncles: raise ValidationError( "Block contains duplicate uncles:\n" " - {0}".format(' - '.join(duplicate_uncles)) ) recent_ancestors = tuple( ancestor for ancestor in self.get_ancestors(MAX_UNCLE_DEPTH + 1, header=block.header) ) recent_ancestor_hashes = {ancestor.hash for ancestor in recent_ancestors} recent_uncle_hashes = _extract_uncle_hashes(recent_ancestors) for uncle in block.uncles: if uncle.hash == block.hash: raise ValidationError("Uncle has same hash as block") # ensure the uncle has not already been included. if uncle.hash in recent_uncle_hashes: raise ValidationError( "Duplicate uncle: {0}".format(encode_hex(uncle.hash)) ) # ensure that the uncle is not one of the canonical chain blocks. if uncle.hash in recent_ancestor_hashes: raise ValidationError( "Uncle {0} cannot be an ancestor of {1}".format( encode_hex(uncle.hash), encode_hex(block.hash))) # ensure that the uncle was built off of one of the canonical chain # blocks. if uncle.parent_hash not in recent_ancestor_hashes or ( uncle.parent_hash == block.header.parent_hash): raise ValidationError( "Uncle's parent {0} is not an ancestor of {1}".format( encode_hex(uncle.parent_hash), encode_hex(block.hash))) # Now perform VM level validation of the uncle self.validate_seal(uncle) try: uncle_parent = self.get_block_header_by_hash(uncle.parent_hash) except HeaderNotFound: raise ValidationError( "Uncle ancestor not found: {0}".format(uncle.parent_hash) ) uncle_vm_class = self.get_vm_class_for_block_number(uncle.block_number) uncle_vm_class.validate_uncle(block, uncle, uncle_parent)
python
def validate_uncles(self, block: BaseBlock) -> None: has_uncles = len(block.uncles) > 0 should_have_uncles = block.header.uncles_hash != EMPTY_UNCLE_HASH if not has_uncles and not should_have_uncles: # optimization to avoid loading ancestors from DB, since the block has no uncles return elif has_uncles and not should_have_uncles: raise ValidationError("Block has uncles but header suggests uncles should be empty") elif should_have_uncles and not has_uncles: raise ValidationError("Header suggests block should have uncles but block has none") # Check for duplicates uncle_groups = groupby(operator.attrgetter('hash'), block.uncles) duplicate_uncles = tuple(sorted( hash for hash, twins in uncle_groups.items() if len(twins) > 1 )) if duplicate_uncles: raise ValidationError( "Block contains duplicate uncles:\n" " - {0}".format(' - '.join(duplicate_uncles)) ) recent_ancestors = tuple( ancestor for ancestor in self.get_ancestors(MAX_UNCLE_DEPTH + 1, header=block.header) ) recent_ancestor_hashes = {ancestor.hash for ancestor in recent_ancestors} recent_uncle_hashes = _extract_uncle_hashes(recent_ancestors) for uncle in block.uncles: if uncle.hash == block.hash: raise ValidationError("Uncle has same hash as block") # ensure the uncle has not already been included. if uncle.hash in recent_uncle_hashes: raise ValidationError( "Duplicate uncle: {0}".format(encode_hex(uncle.hash)) ) # ensure that the uncle is not one of the canonical chain blocks. if uncle.hash in recent_ancestor_hashes: raise ValidationError( "Uncle {0} cannot be an ancestor of {1}".format( encode_hex(uncle.hash), encode_hex(block.hash))) # ensure that the uncle was built off of one of the canonical chain # blocks. if uncle.parent_hash not in recent_ancestor_hashes or ( uncle.parent_hash == block.header.parent_hash): raise ValidationError( "Uncle's parent {0} is not an ancestor of {1}".format( encode_hex(uncle.parent_hash), encode_hex(block.hash))) # Now perform VM level validation of the uncle self.validate_seal(uncle) try: uncle_parent = self.get_block_header_by_hash(uncle.parent_hash) except HeaderNotFound: raise ValidationError( "Uncle ancestor not found: {0}".format(uncle.parent_hash) ) uncle_vm_class = self.get_vm_class_for_block_number(uncle.block_number) uncle_vm_class.validate_uncle(block, uncle, uncle_parent)
[ "def", "validate_uncles", "(", "self", ",", "block", ":", "BaseBlock", ")", "->", "None", ":", "has_uncles", "=", "len", "(", "block", ".", "uncles", ")", ">", "0", "should_have_uncles", "=", "block", ".", "header", ".", "uncles_hash", "!=", "EMPTY_UNCLE_H...
Validate the uncles for the given block.
[ "Validate", "the", "uncles", "for", "the", "given", "block", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L801-L870
244,015
ethereum/py-evm
eth/chains/base.py
MiningChain.apply_transaction
def apply_transaction(self, transaction: BaseTransaction ) -> Tuple[BaseBlock, Receipt, BaseComputation]: """ Applies the transaction to the current tip block. WARNING: Receipt and Transaction trie generation is computationally heavy and incurs significant performance overhead. """ vm = self.get_vm(self.header) base_block = vm.block receipt, computation = vm.apply_transaction(base_block.header, transaction) header_with_receipt = vm.add_receipt_to_header(base_block.header, receipt) # since we are building the block locally, we have to persist all the incremental state vm.state.persist() new_header = header_with_receipt.copy(state_root=vm.state.state_root) transactions = base_block.transactions + (transaction, ) receipts = base_block.get_receipts(self.chaindb) + (receipt, ) new_block = vm.set_block_transactions(base_block, new_header, transactions, receipts) self.header = new_block.header return new_block, receipt, computation
python
def apply_transaction(self, transaction: BaseTransaction ) -> Tuple[BaseBlock, Receipt, BaseComputation]: vm = self.get_vm(self.header) base_block = vm.block receipt, computation = vm.apply_transaction(base_block.header, transaction) header_with_receipt = vm.add_receipt_to_header(base_block.header, receipt) # since we are building the block locally, we have to persist all the incremental state vm.state.persist() new_header = header_with_receipt.copy(state_root=vm.state.state_root) transactions = base_block.transactions + (transaction, ) receipts = base_block.get_receipts(self.chaindb) + (receipt, ) new_block = vm.set_block_transactions(base_block, new_header, transactions, receipts) self.header = new_block.header return new_block, receipt, computation
[ "def", "apply_transaction", "(", "self", ",", "transaction", ":", "BaseTransaction", ")", "->", "Tuple", "[", "BaseBlock", ",", "Receipt", ",", "BaseComputation", "]", ":", "vm", "=", "self", ".", "get_vm", "(", "self", ".", "header", ")", "base_block", "=...
Applies the transaction to the current tip block. WARNING: Receipt and Transaction trie generation is computationally heavy and incurs significant performance overhead.
[ "Applies", "the", "transaction", "to", "the", "current", "tip", "block", "." ]
58346848f076116381d3274bbcea96b9e2cfcbdf
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L887-L913
244,016
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
wait_for_host
def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None, ssl_pymongo_options=None): """ Ping server and wait for response. Ping a mongod or mongos every `interval` seconds until it responds, or `timeout` seconds have passed. If `to_start` is set to False, will wait for the node to shut down instead. This function can be called as a separate thread. If queue is provided, it will place the results in the message queue and return, otherwise it will just return the result directly. """ host = 'localhost:%i' % port start_time = time.time() while True: if (time.time() - start_time) > timeout: if queue: queue.put_nowait((port, False)) return False try: # make connection and ping host con = MongoConnection(host, **(ssl_pymongo_options or {})) con.admin.command('ping') if to_start: if queue: queue.put_nowait((port, True)) return True else: time.sleep(interval) except Exception: if to_start: time.sleep(interval) else: if queue: queue.put_nowait((port, True)) return True
python
def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None, ssl_pymongo_options=None): host = 'localhost:%i' % port start_time = time.time() while True: if (time.time() - start_time) > timeout: if queue: queue.put_nowait((port, False)) return False try: # make connection and ping host con = MongoConnection(host, **(ssl_pymongo_options or {})) con.admin.command('ping') if to_start: if queue: queue.put_nowait((port, True)) return True else: time.sleep(interval) except Exception: if to_start: time.sleep(interval) else: if queue: queue.put_nowait((port, True)) return True
[ "def", "wait_for_host", "(", "port", ",", "interval", "=", "1", ",", "timeout", "=", "30", ",", "to_start", "=", "True", ",", "queue", "=", "None", ",", "ssl_pymongo_options", "=", "None", ")", ":", "host", "=", "'localhost:%i'", "%", "port", "start_time...
Ping server and wait for response. Ping a mongod or mongos every `interval` seconds until it responds, or `timeout` seconds have passed. If `to_start` is set to False, will wait for the node to shut down instead. This function can be called as a separate thread. If queue is provided, it will place the results in the message queue and return, otherwise it will just return the result directly.
[ "Ping", "server", "and", "wait", "for", "response", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L73-L110
244,017
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
shutdown_host
def shutdown_host(port, username=None, password=None, authdb=None): """ Send the shutdown command to a mongod or mongos on given port. This function can be called as a separate thread. """ host = 'localhost:%i' % port try: mc = MongoConnection(host) try: if username and password and authdb: if authdb != "admin": raise RuntimeError("given username/password is not for " "admin database") else: try: mc.admin.authenticate(name=username, password=password) except OperationFailure: # perhaps auth is not required pass mc.admin.command('shutdown', force=True) except AutoReconnect: pass except OperationFailure: print("Error: cannot authenticate to shut down %s." % host) return except ConnectionFailure: pass else: mc.close()
python
def shutdown_host(port, username=None, password=None, authdb=None): host = 'localhost:%i' % port try: mc = MongoConnection(host) try: if username and password and authdb: if authdb != "admin": raise RuntimeError("given username/password is not for " "admin database") else: try: mc.admin.authenticate(name=username, password=password) except OperationFailure: # perhaps auth is not required pass mc.admin.command('shutdown', force=True) except AutoReconnect: pass except OperationFailure: print("Error: cannot authenticate to shut down %s." % host) return except ConnectionFailure: pass else: mc.close()
[ "def", "shutdown_host", "(", "port", ",", "username", "=", "None", ",", "password", "=", "None", ",", "authdb", "=", "None", ")", ":", "host", "=", "'localhost:%i'", "%", "port", "try", ":", "mc", "=", "MongoConnection", "(", "host", ")", "try", ":", ...
Send the shutdown command to a mongod or mongos on given port. This function can be called as a separate thread.
[ "Send", "the", "shutdown", "command", "to", "a", "mongod", "or", "mongos", "on", "given", "port", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L113-L144
244,018
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool.start
def start(self): """Sub-command start.""" self.discover() # startup_info only gets loaded from protocol version 2 on, # check if it's loaded if not self.startup_info: # hack to make environment startable with older protocol # versions < 2: try to start nodes via init if all nodes are down if len(self.get_tagged(['down'])) == len(self.get_tagged(['all'])): self.args = self.loaded_args print("upgrading mlaunch environment meta-data.") return self.init() else: raise SystemExit("These nodes were created with an older " "version of mlaunch (v1.1.1 or below). To " "upgrade this environment and make use of " "the start/stop/list commands, stop all " "nodes manually, then run 'mlaunch start' " "again. You only have to do this once.") # if new unknown_args are present, compare them with loaded ones # (here we can be certain of protocol v2+) if (self.args['binarypath'] is not None or (self.unknown_args and set(self.unknown_args) != set(self.loaded_unknown_args))): # store current args, use self.args from file (self.loaded_args) start_args = self.args self.args = self.loaded_args self.args['binarypath'] = start_args['binarypath'] # construct new startup strings with updated unknown args. # They are for this start only and will not be persisted in # the .mlaunch_startup file self._construct_cmdlines() # reset to original args for this start command self.args = start_args matches = self._get_ports_from_args(self.args, 'down') if len(matches) == 0: raise SystemExit('no nodes started.') # start config servers first config_matches = self.get_tagged(['config']).intersection(matches) self._start_on_ports(config_matches, wait=True) # start shards next mongod_matches = (self.get_tagged(['mongod']) - self.get_tagged(['config'])) mongod_matches = mongod_matches.intersection(matches) self._start_on_ports(mongod_matches, wait=True) # now start mongos mongos_matches = self.get_tagged(['mongos']).intersection(matches) self._start_on_ports(mongos_matches) # wait for all matched nodes to be running self.wait_for(matches) # refresh discover self.discover()
python
def start(self): self.discover() # startup_info only gets loaded from protocol version 2 on, # check if it's loaded if not self.startup_info: # hack to make environment startable with older protocol # versions < 2: try to start nodes via init if all nodes are down if len(self.get_tagged(['down'])) == len(self.get_tagged(['all'])): self.args = self.loaded_args print("upgrading mlaunch environment meta-data.") return self.init() else: raise SystemExit("These nodes were created with an older " "version of mlaunch (v1.1.1 or below). To " "upgrade this environment and make use of " "the start/stop/list commands, stop all " "nodes manually, then run 'mlaunch start' " "again. You only have to do this once.") # if new unknown_args are present, compare them with loaded ones # (here we can be certain of protocol v2+) if (self.args['binarypath'] is not None or (self.unknown_args and set(self.unknown_args) != set(self.loaded_unknown_args))): # store current args, use self.args from file (self.loaded_args) start_args = self.args self.args = self.loaded_args self.args['binarypath'] = start_args['binarypath'] # construct new startup strings with updated unknown args. # They are for this start only and will not be persisted in # the .mlaunch_startup file self._construct_cmdlines() # reset to original args for this start command self.args = start_args matches = self._get_ports_from_args(self.args, 'down') if len(matches) == 0: raise SystemExit('no nodes started.') # start config servers first config_matches = self.get_tagged(['config']).intersection(matches) self._start_on_ports(config_matches, wait=True) # start shards next mongod_matches = (self.get_tagged(['mongod']) - self.get_tagged(['config'])) mongod_matches = mongod_matches.intersection(matches) self._start_on_ports(mongod_matches, wait=True) # now start mongos mongos_matches = self.get_tagged(['mongos']).intersection(matches) self._start_on_ports(mongos_matches) # wait for all matched nodes to be running self.wait_for(matches) # refresh discover self.discover()
[ "def", "start", "(", "self", ")", ":", "self", ".", "discover", "(", ")", "# startup_info only gets loaded from protocol version 2 on,", "# check if it's loaded", "if", "not", "self", ".", "startup_info", ":", "# hack to make environment startable with older protocol", "# ver...
Sub-command start.
[ "Sub", "-", "command", "start", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L861-L923
244,019
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool.is_running
def is_running(self, port): """Return True if a host on a specific port is running.""" try: con = self.client('localhost:%s' % port) con.admin.command('ping') return True except (AutoReconnect, ConnectionFailure, OperationFailure): # Catch OperationFailure to work around SERVER-31916. return False
python
def is_running(self, port): try: con = self.client('localhost:%s' % port) con.admin.command('ping') return True except (AutoReconnect, ConnectionFailure, OperationFailure): # Catch OperationFailure to work around SERVER-31916. return False
[ "def", "is_running", "(", "self", ",", "port", ")", ":", "try", ":", "con", "=", "self", ".", "client", "(", "'localhost:%s'", "%", "port", ")", "con", ".", "admin", ".", "command", "(", "'ping'", ")", "return", "True", "except", "(", "AutoReconnect", ...
Return True if a host on a specific port is running.
[ "Return", "True", "if", "a", "host", "on", "a", "specific", "port", "is", "running", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1290-L1298
244,020
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool.get_tagged
def get_tagged(self, tags): """ Tag format. The format for the tags list is tuples for tags: mongos, config, shard, secondary tags of the form (tag, number), e.g. ('mongos', 2) which references the second mongos in the list. For all other tags, it is simply the string, e.g. 'primary'. """ # if tags is a simple string, make it a list (note: tuples like # ('mongos', 2) must be in a surrounding list) if not hasattr(tags, '__iter__') and type(tags) == str: tags = [tags] nodes = set(self.cluster_tags['all']) for tag in tags: if re.match(r"\w+ \d{1,2}", tag): # special case for tuple tags: mongos, config, shard, # secondary. These can contain a number tag, number = tag.split() try: branch = self.cluster_tree[tag][int(number) - 1] except (IndexError, KeyError): continue if hasattr(branch, '__iter__'): subset = set(branch) else: subset = set([branch]) else: # otherwise use tags dict to get the subset subset = set(self.cluster_tags[tag]) nodes = nodes.intersection(subset) return nodes
python
def get_tagged(self, tags): # if tags is a simple string, make it a list (note: tuples like # ('mongos', 2) must be in a surrounding list) if not hasattr(tags, '__iter__') and type(tags) == str: tags = [tags] nodes = set(self.cluster_tags['all']) for tag in tags: if re.match(r"\w+ \d{1,2}", tag): # special case for tuple tags: mongos, config, shard, # secondary. These can contain a number tag, number = tag.split() try: branch = self.cluster_tree[tag][int(number) - 1] except (IndexError, KeyError): continue if hasattr(branch, '__iter__'): subset = set(branch) else: subset = set([branch]) else: # otherwise use tags dict to get the subset subset = set(self.cluster_tags[tag]) nodes = nodes.intersection(subset) return nodes
[ "def", "get_tagged", "(", "self", ",", "tags", ")", ":", "# if tags is a simple string, make it a list (note: tuples like", "# ('mongos', 2) must be in a surrounding list)", "if", "not", "hasattr", "(", "tags", ",", "'__iter__'", ")", "and", "type", "(", "tags", ")", "=...
Tag format. The format for the tags list is tuples for tags: mongos, config, shard, secondary tags of the form (tag, number), e.g. ('mongos', 2) which references the second mongos in the list. For all other tags, it is simply the string, e.g. 'primary'.
[ "Tag", "format", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1300-L1337
244,021
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool.get_tags_of_port
def get_tags_of_port(self, port): """ Get all tags related to a given port. This is the inverse of what is stored in self.cluster_tags). """ return(sorted([tag for tag in self.cluster_tags if port in self.cluster_tags[tag]]))
python
def get_tags_of_port(self, port): return(sorted([tag for tag in self.cluster_tags if port in self.cluster_tags[tag]]))
[ "def", "get_tags_of_port", "(", "self", ",", "port", ")", ":", "return", "(", "sorted", "(", "[", "tag", "for", "tag", "in", "self", ".", "cluster_tags", "if", "port", "in", "self", ".", "cluster_tags", "[", "tag", "]", "]", ")", ")" ]
Get all tags related to a given port. This is the inverse of what is stored in self.cluster_tags).
[ "Get", "all", "tags", "related", "to", "a", "given", "port", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1339-L1346
244,022
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool.wait_for
def wait_for(self, ports, interval=1.0, timeout=30, to_start=True): """ Spawn threads to ping host using a list of ports. Returns when all hosts are running (if to_start=True) / shut down (if to_start=False). """ threads = [] queue = Queue.Queue() for port in ports: threads.append(threading.Thread(target=wait_for_host, args=( port, interval, timeout, to_start, queue, self.ssl_pymongo_options))) if self.args and 'verbose' in self.args and self.args['verbose']: print("waiting for nodes %s..." % ('to start' if to_start else 'to shutdown')) for thread in threads: thread.start() for thread in threads: thread.join() # get all results back and return tuple return tuple(queue.get_nowait() for _ in ports)
python
def wait_for(self, ports, interval=1.0, timeout=30, to_start=True): threads = [] queue = Queue.Queue() for port in ports: threads.append(threading.Thread(target=wait_for_host, args=( port, interval, timeout, to_start, queue, self.ssl_pymongo_options))) if self.args and 'verbose' in self.args and self.args['verbose']: print("waiting for nodes %s..." % ('to start' if to_start else 'to shutdown')) for thread in threads: thread.start() for thread in threads: thread.join() # get all results back and return tuple return tuple(queue.get_nowait() for _ in ports)
[ "def", "wait_for", "(", "self", ",", "ports", ",", "interval", "=", "1.0", ",", "timeout", "=", "30", ",", "to_start", "=", "True", ")", ":", "threads", "=", "[", "]", "queue", "=", "Queue", ".", "Queue", "(", ")", "for", "port", "in", "ports", "...
Spawn threads to ping host using a list of ports. Returns when all hosts are running (if to_start=True) / shut down (if to_start=False).
[ "Spawn", "threads", "to", "ping", "host", "using", "a", "list", "of", "ports", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1348-L1374
244,023
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._load_parameters
def _load_parameters(self): """ Load the .mlaunch_startup file that exists in each datadir. Handles different protocol versions. """ datapath = self.dir startup_file = os.path.join(datapath, '.mlaunch_startup') if not os.path.exists(startup_file): return False in_dict = json.load(open(startup_file, 'rb')) # handle legacy version without versioned protocol if 'protocol_version' not in in_dict: in_dict['protocol_version'] = 1 self.loaded_args = in_dict self.startup_info = {} # hostname was added recently self.loaded_args['hostname'] = socket.gethostname() elif in_dict['protocol_version'] == 2: self.startup_info = in_dict['startup_info'] self.loaded_unknown_args = in_dict['unknown_args'] self.loaded_args = in_dict['parsed_args'] # changed 'authentication' to 'auth', if present (from old env) rename if 'authentication' in self.loaded_args: self.loaded_args['auth'] = self.loaded_args['authentication'] del self.loaded_args['authentication'] return True
python
def _load_parameters(self): datapath = self.dir startup_file = os.path.join(datapath, '.mlaunch_startup') if not os.path.exists(startup_file): return False in_dict = json.load(open(startup_file, 'rb')) # handle legacy version without versioned protocol if 'protocol_version' not in in_dict: in_dict['protocol_version'] = 1 self.loaded_args = in_dict self.startup_info = {} # hostname was added recently self.loaded_args['hostname'] = socket.gethostname() elif in_dict['protocol_version'] == 2: self.startup_info = in_dict['startup_info'] self.loaded_unknown_args = in_dict['unknown_args'] self.loaded_args = in_dict['parsed_args'] # changed 'authentication' to 'auth', if present (from old env) rename if 'authentication' in self.loaded_args: self.loaded_args['auth'] = self.loaded_args['authentication'] del self.loaded_args['authentication'] return True
[ "def", "_load_parameters", "(", "self", ")", ":", "datapath", "=", "self", ".", "dir", "startup_file", "=", "os", ".", "path", ".", "join", "(", "datapath", ",", "'.mlaunch_startup'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "startup_file...
Load the .mlaunch_startup file that exists in each datadir. Handles different protocol versions.
[ "Load", "the", ".", "mlaunch_startup", "file", "that", "exists", "in", "each", "datadir", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1378-L1410
244,024
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._create_paths
def _create_paths(self, basedir, name=None): """Create datadir and subdir paths.""" if name: datapath = os.path.join(basedir, name) else: datapath = basedir dbpath = os.path.join(datapath, 'db') if not os.path.exists(dbpath): os.makedirs(dbpath) if self.args['verbose']: print('creating directory: %s' % dbpath) return datapath
python
def _create_paths(self, basedir, name=None): if name: datapath = os.path.join(basedir, name) else: datapath = basedir dbpath = os.path.join(datapath, 'db') if not os.path.exists(dbpath): os.makedirs(dbpath) if self.args['verbose']: print('creating directory: %s' % dbpath) return datapath
[ "def", "_create_paths", "(", "self", ",", "basedir", ",", "name", "=", "None", ")", ":", "if", "name", ":", "datapath", "=", "os", ".", "path", ".", "join", "(", "basedir", ",", "name", ")", "else", ":", "datapath", "=", "basedir", "dbpath", "=", "...
Create datadir and subdir paths.
[ "Create", "datadir", "and", "subdir", "paths", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1433-L1446
244,025
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._filter_valid_arguments
def _filter_valid_arguments(self, arguments, binary="mongod", config=False): """ Return a list of accepted arguments. Check which arguments in list are accepted by the specified binary (mongod, mongos). If an argument does not start with '-' but its preceding argument was accepted, then it is accepted as well. Example ['--slowms', '1000'] both arguments would be accepted for a mongod. """ # get the help list of the binary if self.args and self.args['binarypath']: binary = os.path.join(self.args['binarypath'], binary) ret = (subprocess.Popen(['%s' % binary, '--help'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False)) out, err = ret.communicate() accepted_arguments = [] # extract all arguments starting with a '-' for line in [option for option in out.decode('utf-8').split('\n')]: line = line.lstrip() if line.startswith('-'): argument = line.split()[0] # exception: don't allow unsupported config server arguments if config and argument in ['--oplogSize', '--storageEngine', '--smallfiles', '--nojournal']: continue accepted_arguments.append(argument) # add undocumented options accepted_arguments.append('--setParameter') if binary.endswith('mongod'): accepted_arguments.append('--wiredTigerEngineConfigString') # filter valid arguments result = [] for i, arg in enumerate(arguments): if arg.startswith('-'): # check if the binary accepts this argument # or special case -vvv for any number of v argname = arg.split('=', 1)[0] if argname in accepted_arguments or re.match(r'-v+', arg): result.append(arg) elif (binary.endswith('mongod') and argname in self.UNDOCUMENTED_MONGOD_ARGS): result.append(arg) elif self.ignored_arguments.get(binary + argname) is None: # warn once for each combination of binary and unknown arg self.ignored_arguments[binary + argname] = True if not (binary.endswith("mongos") and arg in self.UNSUPPORTED_MONGOS_ARGS): print("warning: ignoring unknown argument %s for %s" % (arg, binary)) elif i > 0 and arguments[i - 1] in result: # if it doesn't start with a '-', it could be the value of # the last argument, e.g. `--slowms 1000` result.append(arg) # return valid arguments as joined string return ' '.join(result)
python
def _filter_valid_arguments(self, arguments, binary="mongod", config=False): # get the help list of the binary if self.args and self.args['binarypath']: binary = os.path.join(self.args['binarypath'], binary) ret = (subprocess.Popen(['%s' % binary, '--help'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False)) out, err = ret.communicate() accepted_arguments = [] # extract all arguments starting with a '-' for line in [option for option in out.decode('utf-8').split('\n')]: line = line.lstrip() if line.startswith('-'): argument = line.split()[0] # exception: don't allow unsupported config server arguments if config and argument in ['--oplogSize', '--storageEngine', '--smallfiles', '--nojournal']: continue accepted_arguments.append(argument) # add undocumented options accepted_arguments.append('--setParameter') if binary.endswith('mongod'): accepted_arguments.append('--wiredTigerEngineConfigString') # filter valid arguments result = [] for i, arg in enumerate(arguments): if arg.startswith('-'): # check if the binary accepts this argument # or special case -vvv for any number of v argname = arg.split('=', 1)[0] if argname in accepted_arguments or re.match(r'-v+', arg): result.append(arg) elif (binary.endswith('mongod') and argname in self.UNDOCUMENTED_MONGOD_ARGS): result.append(arg) elif self.ignored_arguments.get(binary + argname) is None: # warn once for each combination of binary and unknown arg self.ignored_arguments[binary + argname] = True if not (binary.endswith("mongos") and arg in self.UNSUPPORTED_MONGOS_ARGS): print("warning: ignoring unknown argument %s for %s" % (arg, binary)) elif i > 0 and arguments[i - 1] in result: # if it doesn't start with a '-', it could be the value of # the last argument, e.g. `--slowms 1000` result.append(arg) # return valid arguments as joined string return ' '.join(result)
[ "def", "_filter_valid_arguments", "(", "self", ",", "arguments", ",", "binary", "=", "\"mongod\"", ",", "config", "=", "False", ")", ":", "# get the help list of the binary", "if", "self", ".", "args", "and", "self", ".", "args", "[", "'binarypath'", "]", ":",...
Return a list of accepted arguments. Check which arguments in list are accepted by the specified binary (mongod, mongos). If an argument does not start with '-' but its preceding argument was accepted, then it is accepted as well. Example ['--slowms', '1000'] both arguments would be accepted for a mongod.
[ "Return", "a", "list", "of", "accepted", "arguments", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1479-L1538
244,026
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._initiate_replset
def _initiate_replset(self, port, name, maxwait=30): """Initiate replica set.""" if not self.args['replicaset'] and name != 'configRepl': if self.args['verbose']: print('Skipping replica set initialization for %s' % name) return con = self.client('localhost:%i' % port) try: rs_status = con['admin'].command({'replSetGetStatus': 1}) return rs_status except OperationFailure as e: # not initiated yet for i in range(maxwait): try: con['admin'].command({'replSetInitiate': self.config_docs[name]}) break except OperationFailure as e: print(e.message + " - will retry") time.sleep(1) if self.args['verbose']: print("initializing replica set '%s' with configuration: %s" % (name, self.config_docs[name])) print("replica set '%s' initialized." % name)
python
def _initiate_replset(self, port, name, maxwait=30): if not self.args['replicaset'] and name != 'configRepl': if self.args['verbose']: print('Skipping replica set initialization for %s' % name) return con = self.client('localhost:%i' % port) try: rs_status = con['admin'].command({'replSetGetStatus': 1}) return rs_status except OperationFailure as e: # not initiated yet for i in range(maxwait): try: con['admin'].command({'replSetInitiate': self.config_docs[name]}) break except OperationFailure as e: print(e.message + " - will retry") time.sleep(1) if self.args['verbose']: print("initializing replica set '%s' with configuration: %s" % (name, self.config_docs[name])) print("replica set '%s' initialized." % name)
[ "def", "_initiate_replset", "(", "self", ",", "port", ",", "name", ",", "maxwait", "=", "30", ")", ":", "if", "not", "self", ".", "args", "[", "'replicaset'", "]", "and", "name", "!=", "'configRepl'", ":", "if", "self", ".", "args", "[", "'verbose'", ...
Initiate replica set.
[ "Initiate", "replica", "set", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1667-L1692
244,027
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._construct_sharded
def _construct_sharded(self): """Construct command line strings for a sharded cluster.""" current_version = self.getMongoDVersion() num_mongos = self.args['mongos'] if self.args['mongos'] > 0 else 1 shard_names = self._get_shard_names(self.args) # create shards as stand-alones or replica sets nextport = self.args['port'] + num_mongos for shard in shard_names: if (self.args['single'] and LooseVersion(current_version) >= LooseVersion("3.6.0")): errmsg = " \n * In MongoDB 3.6 and above a Shard must be " \ "made up of a replica set. Please use --replicaset " \ "option when starting a sharded cluster.*" raise SystemExit(errmsg) elif (self.args['single'] and LooseVersion(current_version) < LooseVersion("3.6.0")): self.shard_connection_str.append( self._construct_single( self.dir, nextport, name=shard, extra='--shardsvr')) nextport += 1 elif self.args['replicaset']: self.shard_connection_str.append( self._construct_replset( self.dir, nextport, shard, num_nodes=list(range(self.args['nodes'])), arbiter=self.args['arbiter'], extra='--shardsvr')) nextport += self.args['nodes'] if self.args['arbiter']: nextport += 1 # start up config server(s) config_string = [] # SCCC config servers (MongoDB <3.3.0) if not self.args['csrs'] and self.args['config'] >= 3: config_names = ['config1', 'config2', 'config3'] else: config_names = ['config'] # CSRS config servers (MongoDB >=3.1.0) if self.args['csrs']: config_string.append(self._construct_config(self.dir, nextport, "configRepl", True)) else: for name in config_names: self._construct_config(self.dir, nextport, name) config_string.append('%s:%i' % (self.args['hostname'], nextport)) nextport += 1 # multiple mongos use <datadir>/mongos/ as subdir for log files if num_mongos > 1: mongosdir = os.path.join(self.dir, 'mongos') if not os.path.exists(mongosdir): if self.args['verbose']: print("creating directory: %s" % mongosdir) os.makedirs(mongosdir) # start up mongos, but put them to the front of the port range nextport = self.args['port'] for i in range(num_mongos): if num_mongos > 1: mongos_logfile = 'mongos/mongos_%i.log' % nextport else: mongos_logfile = 'mongos.log' self._construct_mongos(os.path.join(self.dir, mongos_logfile), nextport, ','.join(config_string)) nextport += 1
python
def _construct_sharded(self): current_version = self.getMongoDVersion() num_mongos = self.args['mongos'] if self.args['mongos'] > 0 else 1 shard_names = self._get_shard_names(self.args) # create shards as stand-alones or replica sets nextport = self.args['port'] + num_mongos for shard in shard_names: if (self.args['single'] and LooseVersion(current_version) >= LooseVersion("3.6.0")): errmsg = " \n * In MongoDB 3.6 and above a Shard must be " \ "made up of a replica set. Please use --replicaset " \ "option when starting a sharded cluster.*" raise SystemExit(errmsg) elif (self.args['single'] and LooseVersion(current_version) < LooseVersion("3.6.0")): self.shard_connection_str.append( self._construct_single( self.dir, nextport, name=shard, extra='--shardsvr')) nextport += 1 elif self.args['replicaset']: self.shard_connection_str.append( self._construct_replset( self.dir, nextport, shard, num_nodes=list(range(self.args['nodes'])), arbiter=self.args['arbiter'], extra='--shardsvr')) nextport += self.args['nodes'] if self.args['arbiter']: nextport += 1 # start up config server(s) config_string = [] # SCCC config servers (MongoDB <3.3.0) if not self.args['csrs'] and self.args['config'] >= 3: config_names = ['config1', 'config2', 'config3'] else: config_names = ['config'] # CSRS config servers (MongoDB >=3.1.0) if self.args['csrs']: config_string.append(self._construct_config(self.dir, nextport, "configRepl", True)) else: for name in config_names: self._construct_config(self.dir, nextport, name) config_string.append('%s:%i' % (self.args['hostname'], nextport)) nextport += 1 # multiple mongos use <datadir>/mongos/ as subdir for log files if num_mongos > 1: mongosdir = os.path.join(self.dir, 'mongos') if not os.path.exists(mongosdir): if self.args['verbose']: print("creating directory: %s" % mongosdir) os.makedirs(mongosdir) # start up mongos, but put them to the front of the port range nextport = self.args['port'] for i in range(num_mongos): if num_mongos > 1: mongos_logfile = 'mongos/mongos_%i.log' % nextport else: mongos_logfile = 'mongos.log' self._construct_mongos(os.path.join(self.dir, mongos_logfile), nextport, ','.join(config_string)) nextport += 1
[ "def", "_construct_sharded", "(", "self", ")", ":", "current_version", "=", "self", ".", "getMongoDVersion", "(", ")", "num_mongos", "=", "self", ".", "args", "[", "'mongos'", "]", "if", "self", ".", "args", "[", "'mongos'", "]", ">", "0", "else", "1", ...
Construct command line strings for a sharded cluster.
[ "Construct", "command", "line", "strings", "for", "a", "sharded", "cluster", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1820-L1892
244,028
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._construct_replset
def _construct_replset(self, basedir, portstart, name, num_nodes, arbiter, extra=''): """ Construct command line strings for a replicaset. Handles single set or sharded cluster. """ self.config_docs[name] = {'_id': name, 'members': []} # Construct individual replica set nodes for i in num_nodes: datapath = self._create_paths(basedir, '%s/rs%i' % (name, i + 1)) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart + i, replset=name, extra=extra) host = '%s:%i' % (self.args['hostname'], portstart + i) member_config = { '_id': len(self.config_docs[name]['members']), 'host': host, } # First node gets increased priority. if i == 0 and self.args['priority']: member_config['priority'] = 10 if i >= 7: member_config['votes'] = 0 member_config['priority'] = 0 self.config_docs[name]['members'].append(member_config) # launch arbiter if True if arbiter: datapath = self._create_paths(basedir, '%s/arb' % (name)) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart + self.args['nodes'], replset=name) host = '%s:%i' % (self.args['hostname'], portstart + self.args['nodes']) (self.config_docs[name]['members'] .append({'_id': len(self.config_docs[name]['members']), 'host': host, 'arbiterOnly': True})) return(name + '/' + ','.join([c['host'] for c in self.config_docs[name]['members']]))
python
def _construct_replset(self, basedir, portstart, name, num_nodes, arbiter, extra=''): self.config_docs[name] = {'_id': name, 'members': []} # Construct individual replica set nodes for i in num_nodes: datapath = self._create_paths(basedir, '%s/rs%i' % (name, i + 1)) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart + i, replset=name, extra=extra) host = '%s:%i' % (self.args['hostname'], portstart + i) member_config = { '_id': len(self.config_docs[name]['members']), 'host': host, } # First node gets increased priority. if i == 0 and self.args['priority']: member_config['priority'] = 10 if i >= 7: member_config['votes'] = 0 member_config['priority'] = 0 self.config_docs[name]['members'].append(member_config) # launch arbiter if True if arbiter: datapath = self._create_paths(basedir, '%s/arb' % (name)) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart + self.args['nodes'], replset=name) host = '%s:%i' % (self.args['hostname'], portstart + self.args['nodes']) (self.config_docs[name]['members'] .append({'_id': len(self.config_docs[name]['members']), 'host': host, 'arbiterOnly': True})) return(name + '/' + ','.join([c['host'] for c in self.config_docs[name]['members']]))
[ "def", "_construct_replset", "(", "self", ",", "basedir", ",", "portstart", ",", "name", ",", "num_nodes", ",", "arbiter", ",", "extra", "=", "''", ")", ":", "self", ".", "config_docs", "[", "name", "]", "=", "{", "'_id'", ":", "name", ",", "'members'"...
Construct command line strings for a replicaset. Handles single set or sharded cluster.
[ "Construct", "command", "line", "strings", "for", "a", "replicaset", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1894-L1943
244,029
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._construct_config
def _construct_config(self, basedir, port, name=None, isreplset=False): """Construct command line strings for a config server.""" if isreplset: return self._construct_replset(basedir=basedir, portstart=port, name=name, num_nodes=list(range( self.args['config'])), arbiter=False, extra='--configsvr') else: datapath = self._create_paths(basedir, name) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None, extra='--configsvr')
python
def _construct_config(self, basedir, port, name=None, isreplset=False): if isreplset: return self._construct_replset(basedir=basedir, portstart=port, name=name, num_nodes=list(range( self.args['config'])), arbiter=False, extra='--configsvr') else: datapath = self._create_paths(basedir, name) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None, extra='--configsvr')
[ "def", "_construct_config", "(", "self", ",", "basedir", ",", "port", ",", "name", "=", "None", ",", "isreplset", "=", "False", ")", ":", "if", "isreplset", ":", "return", "self", ".", "_construct_replset", "(", "basedir", "=", "basedir", ",", "portstart",...
Construct command line strings for a config server.
[ "Construct", "command", "line", "strings", "for", "a", "config", "server", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1945-L1957
244,030
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._construct_single
def _construct_single(self, basedir, port, name=None, extra=''): """ Construct command line strings for a single node. Handles shards and stand-alones. """ datapath = self._create_paths(basedir, name) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None, extra=extra) host = '%s:%i' % (self.args['hostname'], port) return host
python
def _construct_single(self, basedir, port, name=None, extra=''): datapath = self._create_paths(basedir, name) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None, extra=extra) host = '%s:%i' % (self.args['hostname'], port) return host
[ "def", "_construct_single", "(", "self", ",", "basedir", ",", "port", ",", "name", "=", "None", ",", "extra", "=", "''", ")", ":", "datapath", "=", "self", ".", "_create_paths", "(", "basedir", ",", "name", ")", "self", ".", "_construct_mongod", "(", "...
Construct command line strings for a single node. Handles shards and stand-alones.
[ "Construct", "command", "line", "strings", "for", "a", "single", "node", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1959-L1972
244,031
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._construct_mongod
def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''): """Construct command line strings for mongod process.""" rs_param = '' if replset: rs_param = '--replSet %s' % replset auth_param = '' if self.args['auth']: key_path = os.path.abspath(os.path.join(self.dir, 'keyfile')) auth_param = '--keyFile %s' % key_path if self.unknown_args: config = '--configsvr' in extra extra = self._filter_valid_arguments(self.unknown_args, "mongod", config=config) + ' ' + extra # set WiredTiger cache size to 1 GB by default if ('--wiredTigerCacheSizeGB' not in extra and self._filter_valid_arguments(['--wiredTigerCacheSizeGB'], 'mongod')): extra += ' --wiredTigerCacheSizeGB 1 ' current_version = self.getMongoDVersion() # Exit with error if hostname is specified but not bind_ip options if (self.args['hostname'] != 'localhost' and LooseVersion(current_version) >= LooseVersion("3.6.0") and (self.args['sharded'] or self.args['replicaset']) and '--bind_ip' not in extra): os.removedirs(dbpath) errmsg = " \n * If hostname is specified, please include "\ "'--bind_ip_all' or '--bind_ip' options when deploying "\ "replica sets or sharded cluster with MongoDB version 3.6.0 "\ "or greater" raise SystemExit(errmsg) extra += self._get_ssl_server_args() path = self.args['binarypath'] or '' if os.name == 'nt': newdbpath = dbpath.replace('\\', '\\\\') newlogpath = logpath.replace('\\', '\\\\') command_str = ("start /b \"\" \"%s\" %s --dbpath \"%s\" " " --logpath \"%s\" --port %i " "%s %s" % (os.path.join(path, 'mongod.exe'), rs_param, newdbpath, newlogpath, port, auth_param, extra)) else: command_str = ("\"%s\" %s --dbpath \"%s\" --logpath \"%s\" " "--port %i --fork " "%s %s" % (os.path.join(path, 'mongod'), rs_param, dbpath, logpath, port, auth_param, extra)) # store parameters in startup_info self.startup_info[str(port)] = command_str
python
def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''): rs_param = '' if replset: rs_param = '--replSet %s' % replset auth_param = '' if self.args['auth']: key_path = os.path.abspath(os.path.join(self.dir, 'keyfile')) auth_param = '--keyFile %s' % key_path if self.unknown_args: config = '--configsvr' in extra extra = self._filter_valid_arguments(self.unknown_args, "mongod", config=config) + ' ' + extra # set WiredTiger cache size to 1 GB by default if ('--wiredTigerCacheSizeGB' not in extra and self._filter_valid_arguments(['--wiredTigerCacheSizeGB'], 'mongod')): extra += ' --wiredTigerCacheSizeGB 1 ' current_version = self.getMongoDVersion() # Exit with error if hostname is specified but not bind_ip options if (self.args['hostname'] != 'localhost' and LooseVersion(current_version) >= LooseVersion("3.6.0") and (self.args['sharded'] or self.args['replicaset']) and '--bind_ip' not in extra): os.removedirs(dbpath) errmsg = " \n * If hostname is specified, please include "\ "'--bind_ip_all' or '--bind_ip' options when deploying "\ "replica sets or sharded cluster with MongoDB version 3.6.0 "\ "or greater" raise SystemExit(errmsg) extra += self._get_ssl_server_args() path = self.args['binarypath'] or '' if os.name == 'nt': newdbpath = dbpath.replace('\\', '\\\\') newlogpath = logpath.replace('\\', '\\\\') command_str = ("start /b \"\" \"%s\" %s --dbpath \"%s\" " " --logpath \"%s\" --port %i " "%s %s" % (os.path.join(path, 'mongod.exe'), rs_param, newdbpath, newlogpath, port, auth_param, extra)) else: command_str = ("\"%s\" %s --dbpath \"%s\" --logpath \"%s\" " "--port %i --fork " "%s %s" % (os.path.join(path, 'mongod'), rs_param, dbpath, logpath, port, auth_param, extra)) # store parameters in startup_info self.startup_info[str(port)] = command_str
[ "def", "_construct_mongod", "(", "self", ",", "dbpath", ",", "logpath", ",", "port", ",", "replset", "=", "None", ",", "extra", "=", "''", ")", ":", "rs_param", "=", "''", "if", "replset", ":", "rs_param", "=", "'--replSet %s'", "%", "replset", "auth_par...
Construct command line strings for mongod process.
[ "Construct", "command", "line", "strings", "for", "mongod", "process", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1974-L2029
244,032
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._construct_mongos
def _construct_mongos(self, logpath, port, configdb): """Construct command line strings for a mongos process.""" extra = '' auth_param = '' if self.args['auth']: key_path = os.path.abspath(os.path.join(self.dir, 'keyfile')) auth_param = '--keyFile %s' % key_path if self.unknown_args: extra = self._filter_valid_arguments(self.unknown_args, "mongos") + extra extra += ' ' + self._get_ssl_server_args() path = self.args['binarypath'] or '' if os.name == 'nt': newlogpath = logpath.replace('\\', '\\\\') command_str = ("start /b %s --logpath \"%s\" --port %i --configdb %s " "%s %s " % (os.path.join(path, 'mongos'), newlogpath, port, configdb, auth_param, extra)) else: command_str = ("%s --logpath \"%s\" --port %i --configdb %s %s %s " "--fork" % (os.path.join(path, 'mongos'), logpath, port, configdb, auth_param, extra)) # store parameters in startup_info self.startup_info[str(port)] = command_str
python
def _construct_mongos(self, logpath, port, configdb): extra = '' auth_param = '' if self.args['auth']: key_path = os.path.abspath(os.path.join(self.dir, 'keyfile')) auth_param = '--keyFile %s' % key_path if self.unknown_args: extra = self._filter_valid_arguments(self.unknown_args, "mongos") + extra extra += ' ' + self._get_ssl_server_args() path = self.args['binarypath'] or '' if os.name == 'nt': newlogpath = logpath.replace('\\', '\\\\') command_str = ("start /b %s --logpath \"%s\" --port %i --configdb %s " "%s %s " % (os.path.join(path, 'mongos'), newlogpath, port, configdb, auth_param, extra)) else: command_str = ("%s --logpath \"%s\" --port %i --configdb %s %s %s " "--fork" % (os.path.join(path, 'mongos'), logpath, port, configdb, auth_param, extra)) # store parameters in startup_info self.startup_info[str(port)] = command_str
[ "def", "_construct_mongos", "(", "self", ",", "logpath", ",", "port", ",", "configdb", ")", ":", "extra", "=", "''", "auth_param", "=", "''", "if", "self", ".", "args", "[", "'auth'", "]", ":", "key_path", "=", "os", ".", "path", ".", "abspath", "(",...
Construct command line strings for a mongos process.
[ "Construct", "command", "line", "strings", "for", "a", "mongos", "process", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L2031-L2059
244,033
rueckstiess/mtools
mtools/util/logcodeline.py
LogCodeLine.addMatch
def addMatch(self, version, filename, lineno, loglevel, trigger): """ Add a match to the LogCodeLine. Include the version, filename of the source file, the line number, and the loglevel. """ self.versions.add(version) self.matches[version].append((filename, lineno, loglevel, trigger))
python
def addMatch(self, version, filename, lineno, loglevel, trigger): self.versions.add(version) self.matches[version].append((filename, lineno, loglevel, trigger))
[ "def", "addMatch", "(", "self", ",", "version", ",", "filename", ",", "lineno", ",", "loglevel", ",", "trigger", ")", ":", "self", ".", "versions", ".", "add", "(", "version", ")", "self", ".", "matches", "[", "version", "]", ".", "append", "(", "(",...
Add a match to the LogCodeLine. Include the version, filename of the source file, the line number, and the loglevel.
[ "Add", "a", "match", "to", "the", "LogCodeLine", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logcodeline.py#L29-L37
244,034
rueckstiess/mtools
mtools/mplotqueries/plottypes/event_type.py
RSStatePlotType.accept_line
def accept_line(self, logevent): """ Return True on match. Only match log lines containing 'is now in state' (reflects other node's state changes) or of type "[rsMgr] replSet PRIMARY" (reflects own state changes). """ if ("is now in state" in logevent.line_str and logevent.split_tokens[-1] in self.states): return True if ("replSet" in logevent.line_str and logevent.thread == "rsMgr" and logevent.split_tokens[-1] in self.states): return True return False
python
def accept_line(self, logevent): if ("is now in state" in logevent.line_str and logevent.split_tokens[-1] in self.states): return True if ("replSet" in logevent.line_str and logevent.thread == "rsMgr" and logevent.split_tokens[-1] in self.states): return True return False
[ "def", "accept_line", "(", "self", ",", "logevent", ")", ":", "if", "(", "\"is now in state\"", "in", "logevent", ".", "line_str", "and", "logevent", ".", "split_tokens", "[", "-", "1", "]", "in", "self", ".", "states", ")", ":", "return", "True", "if", ...
Return True on match. Only match log lines containing 'is now in state' (reflects other node's state changes) or of type "[rsMgr] replSet PRIMARY" (reflects own state changes).
[ "Return", "True", "on", "match", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/event_type.py#L73-L90
244,035
rueckstiess/mtools
mtools/mplotqueries/plottypes/event_type.py
RSStatePlotType.color_map
def color_map(cls, group): print("Group %s" % group) """ Change default color behavior. Map certain states always to the same colors (similar to MMS). """ try: state_idx = cls.states.index(group) except ValueError: # on any unexpected state, return black state_idx = 5 return cls.colors[state_idx], cls.markers[0]
python
def color_map(cls, group): print("Group %s" % group) try: state_idx = cls.states.index(group) except ValueError: # on any unexpected state, return black state_idx = 5 return cls.colors[state_idx], cls.markers[0]
[ "def", "color_map", "(", "cls", ",", "group", ")", ":", "print", "(", "\"Group %s\"", "%", "group", ")", "try", ":", "state_idx", "=", "cls", ".", "states", ".", "index", "(", "group", ")", "except", "ValueError", ":", "# on any unexpected state, return blac...
Change default color behavior. Map certain states always to the same colors (similar to MMS).
[ "Change", "default", "color", "behavior", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/event_type.py#L97-L109
244,036
rueckstiess/mtools
mtools/mplotqueries/plottypes/base_type.py
BasePlotType.add_line
def add_line(self, logevent): """Append log line to this plot type.""" key = None self.empty = False self.groups.setdefault(key, list()).append(logevent)
python
def add_line(self, logevent): key = None self.empty = False self.groups.setdefault(key, list()).append(logevent)
[ "def", "add_line", "(", "self", ",", "logevent", ")", ":", "key", "=", "None", "self", ".", "empty", "=", "False", "self", ".", "groups", ".", "setdefault", "(", "key", ",", "list", "(", ")", ")", ".", "append", "(", "logevent", ")" ]
Append log line to this plot type.
[ "Append", "log", "line", "to", "this", "plot", "type", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/base_type.py#L53-L57
244,037
rueckstiess/mtools
mtools/mplotqueries/plottypes/base_type.py
BasePlotType.logevents
def logevents(self): """Iterator yielding all logevents from groups dictionary.""" for key in self.groups: for logevent in self.groups[key]: yield logevent
python
def logevents(self): for key in self.groups: for logevent in self.groups[key]: yield logevent
[ "def", "logevents", "(", "self", ")", ":", "for", "key", "in", "self", ".", "groups", ":", "for", "logevent", "in", "self", ".", "groups", "[", "key", "]", ":", "yield", "logevent" ]
Iterator yielding all logevents from groups dictionary.
[ "Iterator", "yielding", "all", "logevents", "from", "groups", "dictionary", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/base_type.py#L60-L64
244,038
rueckstiess/mtools
mtools/mplotqueries/plottypes/histogram_type.py
HistogramPlotType.clicked
def clicked(self, event): """Print group name and number of items in bin.""" group = event.artist._mt_group n = event.artist._mt_n dt = num2date(event.artist._mt_bin) print("%4i %s events in %s sec beginning at %s" % (n, group, self.bucketsize, dt.strftime("%b %d %H:%M:%S")))
python
def clicked(self, event): group = event.artist._mt_group n = event.artist._mt_n dt = num2date(event.artist._mt_bin) print("%4i %s events in %s sec beginning at %s" % (n, group, self.bucketsize, dt.strftime("%b %d %H:%M:%S")))
[ "def", "clicked", "(", "self", ",", "event", ")", ":", "group", "=", "event", ".", "artist", ".", "_mt_group", "n", "=", "event", ".", "artist", ".", "_mt_n", "dt", "=", "num2date", "(", "event", ".", "artist", ".", "_mt_bin", ")", "print", "(", "\...
Print group name and number of items in bin.
[ "Print", "group", "name", "and", "number", "of", "items", "in", "bin", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/histogram_type.py#L153-L159
244,039
rueckstiess/mtools
mtools/util/grouping.py
Grouping.add
def add(self, item, group_by=None): """General purpose class to group items by certain criteria.""" key = None if not group_by: group_by = self.group_by if group_by: # if group_by is a function, use it with item as argument if hasattr(group_by, '__call__'): key = group_by(item) # if the item has attribute of group_by as string, use that as key elif isinstance(group_by, str) and hasattr(item, group_by): key = getattr(item, group_by) else: key = None # try to match str(item) with regular expression if isinstance(group_by, str): match = re.search(group_by, str(item)) if match: if len(match.groups()) > 0: key = match.group(1) else: key = match.group() self.groups.setdefault(key, list()).append(item)
python
def add(self, item, group_by=None): key = None if not group_by: group_by = self.group_by if group_by: # if group_by is a function, use it with item as argument if hasattr(group_by, '__call__'): key = group_by(item) # if the item has attribute of group_by as string, use that as key elif isinstance(group_by, str) and hasattr(item, group_by): key = getattr(item, group_by) else: key = None # try to match str(item) with regular expression if isinstance(group_by, str): match = re.search(group_by, str(item)) if match: if len(match.groups()) > 0: key = match.group(1) else: key = match.group() self.groups.setdefault(key, list()).append(item)
[ "def", "add", "(", "self", ",", "item", ",", "group_by", "=", "None", ")", ":", "key", "=", "None", "if", "not", "group_by", ":", "group_by", "=", "self", ".", "group_by", "if", "group_by", ":", "# if group_by is a function, use it with item as argument", "if"...
General purpose class to group items by certain criteria.
[ "General", "purpose", "class", "to", "group", "items", "by", "certain", "criteria", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L23-L50
244,040
rueckstiess/mtools
mtools/util/grouping.py
Grouping.regroup
def regroup(self, group_by=None): """Regroup items.""" if not group_by: group_by = self.group_by groups = self.groups self.groups = {} for g in groups: for item in groups[g]: self.add(item, group_by)
python
def regroup(self, group_by=None): if not group_by: group_by = self.group_by groups = self.groups self.groups = {} for g in groups: for item in groups[g]: self.add(item, group_by)
[ "def", "regroup", "(", "self", ",", "group_by", "=", "None", ")", ":", "if", "not", "group_by", ":", "group_by", "=", "self", ".", "group_by", "groups", "=", "self", ".", "groups", "self", ".", "groups", "=", "{", "}", "for", "g", "in", "groups", "...
Regroup items.
[ "Regroup", "items", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L78-L88
244,041
rueckstiess/mtools
mtools/util/grouping.py
Grouping.move_items
def move_items(self, from_group, to_group): """Take all elements from the from_group and add it to the to_group.""" if from_group not in self.keys() or len(self.groups[from_group]) == 0: return self.groups.setdefault(to_group, list()).extend(self.groups.get (from_group, list())) if from_group in self.groups: del self.groups[from_group]
python
def move_items(self, from_group, to_group): if from_group not in self.keys() or len(self.groups[from_group]) == 0: return self.groups.setdefault(to_group, list()).extend(self.groups.get (from_group, list())) if from_group in self.groups: del self.groups[from_group]
[ "def", "move_items", "(", "self", ",", "from_group", ",", "to_group", ")", ":", "if", "from_group", "not", "in", "self", ".", "keys", "(", ")", "or", "len", "(", "self", ".", "groups", "[", "from_group", "]", ")", "==", "0", ":", "return", "self", ...
Take all elements from the from_group and add it to the to_group.
[ "Take", "all", "elements", "from", "the", "from_group", "and", "add", "it", "to", "the", "to_group", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L90-L98
244,042
rueckstiess/mtools
mtools/util/grouping.py
Grouping.sort_by_size
def sort_by_size(self, group_limit=None, discard_others=False, others_label='others'): """ Sort the groups by the number of elements they contain, descending. Also has option to limit the number of groups. If this option is chosen, the remaining elements are placed into another group with the name specified with others_label. if discard_others is True, the others group is removed instead. """ # sort groups by number of elements self.groups = OrderedDict(sorted(six.iteritems(self.groups), key=lambda x: len(x[1]), reverse=True)) # if group-limit is provided, combine remaining groups if group_limit is not None: # now group together all groups that did not make the limit if not discard_others: group_keys = self.groups.keys()[group_limit - 1:] self.groups.setdefault(others_label, list()) else: group_keys = self.groups.keys()[group_limit:] # only go to second last (-1), since the 'others' group is now last for g in group_keys: if not discard_others: self.groups[others_label].extend(self.groups[g]) del self.groups[g] # remove if empty if (others_label in self.groups and len(self.groups[others_label]) == 0): del self.groups[others_label] # remove others group regardless of limit if requested if discard_others and others_label in self.groups: del self.groups[others_label]
python
def sort_by_size(self, group_limit=None, discard_others=False, others_label='others'): # sort groups by number of elements self.groups = OrderedDict(sorted(six.iteritems(self.groups), key=lambda x: len(x[1]), reverse=True)) # if group-limit is provided, combine remaining groups if group_limit is not None: # now group together all groups that did not make the limit if not discard_others: group_keys = self.groups.keys()[group_limit - 1:] self.groups.setdefault(others_label, list()) else: group_keys = self.groups.keys()[group_limit:] # only go to second last (-1), since the 'others' group is now last for g in group_keys: if not discard_others: self.groups[others_label].extend(self.groups[g]) del self.groups[g] # remove if empty if (others_label in self.groups and len(self.groups[others_label]) == 0): del self.groups[others_label] # remove others group regardless of limit if requested if discard_others and others_label in self.groups: del self.groups[others_label]
[ "def", "sort_by_size", "(", "self", ",", "group_limit", "=", "None", ",", "discard_others", "=", "False", ",", "others_label", "=", "'others'", ")", ":", "# sort groups by number of elements", "self", ".", "groups", "=", "OrderedDict", "(", "sorted", "(", "six",...
Sort the groups by the number of elements they contain, descending. Also has option to limit the number of groups. If this option is chosen, the remaining elements are placed into another group with the name specified with others_label. if discard_others is True, the others group is removed instead.
[ "Sort", "the", "groups", "by", "the", "number", "of", "elements", "they", "contain", "descending", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L100-L138
244,043
rueckstiess/mtools
mtools/util/log2code.py
import_l2c_db
def import_l2c_db(): """ Static import helper function. Checks if the log2code.pickle exists first, otherwise raises ImportError. """ data_path = os.path.join(os.path.dirname(mtools.__file__), 'data') if os.path.exists(os.path.join(data_path, 'log2code.pickle')): av, lv, lbw, lcl = cPickle.load(open(os.path.join(data_path, 'log2code.pickle'), 'rb')) return av, lv, lbw, lcl else: raise ImportError('log2code.pickle not found in %s.' % data_path)
python
def import_l2c_db(): data_path = os.path.join(os.path.dirname(mtools.__file__), 'data') if os.path.exists(os.path.join(data_path, 'log2code.pickle')): av, lv, lbw, lcl = cPickle.load(open(os.path.join(data_path, 'log2code.pickle'), 'rb')) return av, lv, lbw, lcl else: raise ImportError('log2code.pickle not found in %s.' % data_path)
[ "def", "import_l2c_db", "(", ")", ":", "data_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "mtools", ".", "__file__", ")", ",", "'data'", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "pat...
Static import helper function. Checks if the log2code.pickle exists first, otherwise raises ImportError.
[ "Static", "import", "helper", "function", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L15-L29
244,044
rueckstiess/mtools
mtools/util/log2code.py
Log2CodeConverter._strip_counters
def _strip_counters(self, sub_line): """Find the codeline end by taking out the counters and durations.""" try: end = sub_line.rindex('}') except ValueError: return sub_line else: return sub_line[:(end + 1)]
python
def _strip_counters(self, sub_line): try: end = sub_line.rindex('}') except ValueError: return sub_line else: return sub_line[:(end + 1)]
[ "def", "_strip_counters", "(", "self", ",", "sub_line", ")", ":", "try", ":", "end", "=", "sub_line", ".", "rindex", "(", "'}'", ")", "except", "ValueError", ":", "return", "sub_line", "else", ":", "return", "sub_line", "[", ":", "(", "end", "+", "1", ...
Find the codeline end by taking out the counters and durations.
[ "Find", "the", "codeline", "end", "by", "taking", "out", "the", "counters", "and", "durations", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L78-L85
244,045
rueckstiess/mtools
mtools/util/log2code.py
Log2CodeConverter._strip_datetime
def _strip_datetime(self, sub_line): """Strip datetime and other parts so that there is no redundancy.""" try: begin = sub_line.index(']') except ValueError: return sub_line else: # create a "" in place character for the beginnings.. # needed when interleaving the lists sub = sub_line[begin + 1:] return sub
python
def _strip_datetime(self, sub_line): try: begin = sub_line.index(']') except ValueError: return sub_line else: # create a "" in place character for the beginnings.. # needed when interleaving the lists sub = sub_line[begin + 1:] return sub
[ "def", "_strip_datetime", "(", "self", ",", "sub_line", ")", ":", "try", ":", "begin", "=", "sub_line", ".", "index", "(", "']'", ")", "except", "ValueError", ":", "return", "sub_line", "else", ":", "# create a \"\" in place character for the beginnings..", "# nee...
Strip datetime and other parts so that there is no redundancy.
[ "Strip", "datetime", "and", "other", "parts", "so", "that", "there", "is", "no", "redundancy", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L87-L97
244,046
rueckstiess/mtools
mtools/util/log2code.py
Log2CodeConverter._find_variable
def _find_variable(self, pattern, logline): """ Return the variable parts of the code given a tuple of strings pattern. Example: (this, is, a, pattern) -> 'this is a good pattern' -> [good] """ var_subs = [] # find the beginning of the pattern first_index = logline.index(pattern[0]) beg_str = logline[:first_index] # strip the beginning substring var_subs.append(self._strip_datetime(beg_str)) for patt, patt_next in zip(pattern[:-1], pattern[1:]): # regular expression pattern that finds what's in the middle of # two substrings pat = re.escape(patt) + '(.*)' + re.escape(patt_next) # extract whats in the middle of the two substrings between = re.search(pat, logline) try: # add what's in between if the search isn't none var_subs.append(between.group(1)) except Exception: pass rest_of_string = logline.rindex(pattern[-1]) + len(pattern[-1]) # add the rest of the string to end minus the counters and durations end_str = logline[rest_of_string:] var_subs.append(self._strip_counters(end_str)) # strip whitespace from each string, but keep the strings themselves # var_subs = [v.strip() for v in var_subs] return var_subs
python
def _find_variable(self, pattern, logline): var_subs = [] # find the beginning of the pattern first_index = logline.index(pattern[0]) beg_str = logline[:first_index] # strip the beginning substring var_subs.append(self._strip_datetime(beg_str)) for patt, patt_next in zip(pattern[:-1], pattern[1:]): # regular expression pattern that finds what's in the middle of # two substrings pat = re.escape(patt) + '(.*)' + re.escape(patt_next) # extract whats in the middle of the two substrings between = re.search(pat, logline) try: # add what's in between if the search isn't none var_subs.append(between.group(1)) except Exception: pass rest_of_string = logline.rindex(pattern[-1]) + len(pattern[-1]) # add the rest of the string to end minus the counters and durations end_str = logline[rest_of_string:] var_subs.append(self._strip_counters(end_str)) # strip whitespace from each string, but keep the strings themselves # var_subs = [v.strip() for v in var_subs] return var_subs
[ "def", "_find_variable", "(", "self", ",", "pattern", ",", "logline", ")", ":", "var_subs", "=", "[", "]", "# find the beginning of the pattern", "first_index", "=", "logline", ".", "index", "(", "pattern", "[", "0", "]", ")", "beg_str", "=", "logline", "[",...
Return the variable parts of the code given a tuple of strings pattern. Example: (this, is, a, pattern) -> 'this is a good pattern' -> [good]
[ "Return", "the", "variable", "parts", "of", "the", "code", "given", "a", "tuple", "of", "strings", "pattern", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L99-L132
244,047
rueckstiess/mtools
mtools/util/log2code.py
Log2CodeConverter._variable_parts
def _variable_parts(self, line, codeline): """Return variable parts of the codeline, given the static parts.""" var_subs = [] # codeline has pattern and then has the outputs in different versions if codeline: var_subs = self._find_variable(codeline.pattern, line) else: # make variable part of the line string without all the other stuff line_str = self._strip_datetime(self._strip_counters(line)) var_subs = [line_str.strip()] return var_subs
python
def _variable_parts(self, line, codeline): var_subs = [] # codeline has pattern and then has the outputs in different versions if codeline: var_subs = self._find_variable(codeline.pattern, line) else: # make variable part of the line string without all the other stuff line_str = self._strip_datetime(self._strip_counters(line)) var_subs = [line_str.strip()] return var_subs
[ "def", "_variable_parts", "(", "self", ",", "line", ",", "codeline", ")", ":", "var_subs", "=", "[", "]", "# codeline has pattern and then has the outputs in different versions", "if", "codeline", ":", "var_subs", "=", "self", ".", "_find_variable", "(", "codeline", ...
Return variable parts of the codeline, given the static parts.
[ "Return", "variable", "parts", "of", "the", "codeline", "given", "the", "static", "parts", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L134-L144
244,048
rueckstiess/mtools
mtools/util/log2code.py
Log2CodeConverter.combine
def combine(self, pattern, variable): """Combine a pattern and variable parts to be a line string again.""" inter_zip = izip_longest(variable, pattern, fillvalue='') interleaved = [elt for pair in inter_zip for elt in pair] return ''.join(interleaved)
python
def combine(self, pattern, variable): inter_zip = izip_longest(variable, pattern, fillvalue='') interleaved = [elt for pair in inter_zip for elt in pair] return ''.join(interleaved)
[ "def", "combine", "(", "self", ",", "pattern", ",", "variable", ")", ":", "inter_zip", "=", "izip_longest", "(", "variable", ",", "pattern", ",", "fillvalue", "=", "''", ")", "interleaved", "=", "[", "elt", "for", "pair", "in", "inter_zip", "for", "elt",...
Combine a pattern and variable parts to be a line string again.
[ "Combine", "a", "pattern", "and", "variable", "parts", "to", "be", "a", "line", "string", "again", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L154-L158
244,049
rueckstiess/mtools
mtools/util/cmdlinetool.py
BaseCmdLineTool.run
def run(self, arguments=None, get_unknowns=False): """ Init point to execute the script. If `arguments` string is given, will evaluate the arguments, else evaluates sys.argv. Any inheriting class should extend the run method (but first calling BaseCmdLineTool.run(self)). """ # redirect PIPE signal to quiet kill script, if not on Windows if os.name != 'nt': signal.signal(signal.SIGPIPE, signal.SIG_DFL) if get_unknowns: if arguments: self.args, self.unknown_args = (self.argparser.parse_known_args (args=arguments.split())) else: (self.args, self.unknown_args) = self.argparser.parse_known_args() self.args = vars(self.args) else: if arguments: myargs = arguments.split() self.args = vars(self.argparser.parse_args (args=myargs)) else: self.args = vars(self.argparser.parse_args()) self.progress_bar_enabled = (not (self.args['no_progressbar'] or self.is_stdin))
python
def run(self, arguments=None, get_unknowns=False): # redirect PIPE signal to quiet kill script, if not on Windows if os.name != 'nt': signal.signal(signal.SIGPIPE, signal.SIG_DFL) if get_unknowns: if arguments: self.args, self.unknown_args = (self.argparser.parse_known_args (args=arguments.split())) else: (self.args, self.unknown_args) = self.argparser.parse_known_args() self.args = vars(self.args) else: if arguments: myargs = arguments.split() self.args = vars(self.argparser.parse_args (args=myargs)) else: self.args = vars(self.argparser.parse_args()) self.progress_bar_enabled = (not (self.args['no_progressbar'] or self.is_stdin))
[ "def", "run", "(", "self", ",", "arguments", "=", "None", ",", "get_unknowns", "=", "False", ")", ":", "# redirect PIPE signal to quiet kill script, if not on Windows", "if", "os", ".", "name", "!=", "'nt'", ":", "signal", ".", "signal", "(", "signal", ".", "S...
Init point to execute the script. If `arguments` string is given, will evaluate the arguments, else evaluates sys.argv. Any inheriting class should extend the run method (but first calling BaseCmdLineTool.run(self)).
[ "Init", "point", "to", "execute", "the", "script", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/cmdlinetool.py#L110-L139
244,050
rueckstiess/mtools
mtools/util/cmdlinetool.py
BaseCmdLineTool.update_progress
def update_progress(self, progress, prefix=''): """ Print a progress bar for longer-running scripts. The progress value is a value between 0.0 and 1.0. If a prefix is present, it will be printed before the progress bar. """ total_length = 40 if progress == 1.: sys.stderr.write('\r' + ' ' * (total_length + len(prefix) + 50)) sys.stderr.write('\n') sys.stderr.flush() else: bar_length = int(round(total_length * progress)) sys.stderr.write('\r%s [%s%s] %.1f %% ' % (prefix, '=' * bar_length, ' ' * (total_length - bar_length), progress * 100)) sys.stderr.flush()
python
def update_progress(self, progress, prefix=''): total_length = 40 if progress == 1.: sys.stderr.write('\r' + ' ' * (total_length + len(prefix) + 50)) sys.stderr.write('\n') sys.stderr.flush() else: bar_length = int(round(total_length * progress)) sys.stderr.write('\r%s [%s%s] %.1f %% ' % (prefix, '=' * bar_length, ' ' * (total_length - bar_length), progress * 100)) sys.stderr.flush()
[ "def", "update_progress", "(", "self", ",", "progress", ",", "prefix", "=", "''", ")", ":", "total_length", "=", "40", "if", "progress", "==", "1.", ":", "sys", ".", "stderr", ".", "write", "(", "'\\r'", "+", "' '", "*", "(", "total_length", "+", "le...
Print a progress bar for longer-running scripts. The progress value is a value between 0.0 and 1.0. If a prefix is present, it will be printed before the progress bar.
[ "Print", "a", "progress", "bar", "for", "longer", "-", "running", "scripts", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/cmdlinetool.py#L153-L172
244,051
rueckstiess/mtools
mtools/mplotqueries/plottypes/scatter_type.py
ScatterPlotType.accept_line
def accept_line(self, logevent): """Return True if the log line has the nominated yaxis field.""" if self.regex_mode: return bool(re.search(self.field, logevent.line_str)) else: return getattr(logevent, self.field) is not None
python
def accept_line(self, logevent): if self.regex_mode: return bool(re.search(self.field, logevent.line_str)) else: return getattr(logevent, self.field) is not None
[ "def", "accept_line", "(", "self", ",", "logevent", ")", ":", "if", "self", ".", "regex_mode", ":", "return", "bool", "(", "re", ".", "search", "(", "self", ".", "field", ",", "logevent", ".", "line_str", ")", ")", "else", ":", "return", "getattr", "...
Return True if the log line has the nominated yaxis field.
[ "Return", "True", "if", "the", "log", "line", "has", "the", "nominated", "yaxis", "field", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/scatter_type.py#L54-L59
244,052
rueckstiess/mtools
mtools/mplotqueries/plottypes/scatter_type.py
ScatterPlotType.clicked
def clicked(self, event): """ Call if an element of this plottype is clicked. Implement in sub class. """ group = event.artist._mt_group indices = event.ind # double click only supported on 1.2 or later major, minor, _ = mpl_version.split('.') if (int(major), int(minor)) < (1, 2) or not event.mouseevent.dblclick: for i in indices: print(self.groups[group][i].line_str) else: # toggle durline first = indices[0] logevent = self.groups[group][first] try: # remove triangle for this event idx = map(itemgetter(0), self.durlines).index(logevent) _, poly = self.durlines[idx] poly.remove() plt.gcf().canvas.draw() del self.durlines[idx] except ValueError: # construct triangle and add to list of durlines if self.args['optime_start']: pts = [[date2num(logevent.datetime), 0], [date2num(logevent.datetime), logevent.duration], [date2num(logevent.datetime + timedelta(milliseconds=logevent.duration) ), 0]] else: pts = [[date2num(logevent.datetime), 0], [date2num(logevent.datetime), logevent.duration], [date2num(logevent.datetime - timedelta(milliseconds=logevent.duration) ), 0]] poly = Polygon(pts, closed=True, alpha=0.2, linewidth=0, facecolor=event.artist.get_markerfacecolor(), edgecolor=None, zorder=-10000) ax = plt.gca() ax.add_patch(poly) plt.gcf().canvas.draw() self.durlines.append((logevent, poly))
python
def clicked(self, event): group = event.artist._mt_group indices = event.ind # double click only supported on 1.2 or later major, minor, _ = mpl_version.split('.') if (int(major), int(minor)) < (1, 2) or not event.mouseevent.dblclick: for i in indices: print(self.groups[group][i].line_str) else: # toggle durline first = indices[0] logevent = self.groups[group][first] try: # remove triangle for this event idx = map(itemgetter(0), self.durlines).index(logevent) _, poly = self.durlines[idx] poly.remove() plt.gcf().canvas.draw() del self.durlines[idx] except ValueError: # construct triangle and add to list of durlines if self.args['optime_start']: pts = [[date2num(logevent.datetime), 0], [date2num(logevent.datetime), logevent.duration], [date2num(logevent.datetime + timedelta(milliseconds=logevent.duration) ), 0]] else: pts = [[date2num(logevent.datetime), 0], [date2num(logevent.datetime), logevent.duration], [date2num(logevent.datetime - timedelta(milliseconds=logevent.duration) ), 0]] poly = Polygon(pts, closed=True, alpha=0.2, linewidth=0, facecolor=event.artist.get_markerfacecolor(), edgecolor=None, zorder=-10000) ax = plt.gca() ax.add_patch(poly) plt.gcf().canvas.draw() self.durlines.append((logevent, poly))
[ "def", "clicked", "(", "self", ",", "event", ")", ":", "group", "=", "event", ".", "artist", ".", "_mt_group", "indices", "=", "event", ".", "ind", "# double click only supported on 1.2 or later", "major", ",", "minor", ",", "_", "=", "mpl_version", ".", "sp...
Call if an element of this plottype is clicked. Implement in sub class.
[ "Call", "if", "an", "element", "of", "this", "plottype", "is", "clicked", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/scatter_type.py#L88-L140
244,053
rueckstiess/mtools
mtools/mloginfo/mloginfo.py
MLogInfoTool.run
def run(self, arguments=None): """Print useful information about the log file.""" LogFileTool.run(self, arguments) for i, self.logfile in enumerate(self.args['logfile']): if i > 0: print("\n ------------------------------------------\n") if self.logfile.datetime_format == 'ctime-pre2.4': # no milliseconds when datetime format doesn't support it start_time = (self.logfile.start.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown") end_time = (self.logfile.end.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown") else: # include milliseconds start_time = (self.logfile.start.strftime("%Y %b %d " "%H:%M:%S.%f")[:-3] if self.logfile.start else "unknown") end_time = (self.logfile.end.strftime("%Y %b %d " "%H:%M:%S.%f")[:-3] if self.logfile.start else "unknown") print(" source: %s" % self.logfile.name) print(" host: %s" % (self.logfile.hostname + ':' + str(self.logfile.port) if self.logfile.hostname else "unknown")) print(" start: %s" % (start_time)) print(" end: %s" % (end_time)) # TODO: add timezone if iso8601 format print("date format: %s" % self.logfile.datetime_format) print(" length: %s" % len(self.logfile)) print(" binary: %s" % (self.logfile.binary or "unknown")) version = (' -> '.join(self.logfile.versions) or "unknown") # if version is unknown, go by date if version == 'unknown': if self.logfile.datetime_format == 'ctime-pre2.4': version = '< 2.4 (no milliseconds)' elif self.logfile.datetime_format == 'ctime': version = '>= 2.4.x ctime (milliseconds present)' elif (self.logfile.datetime_format == "iso8601-utc" or self.logfile.datetime_format == "iso8601-local"): if self.logfile.has_level: version = '>= 3.0 (iso8601 format, level, component)' else: version = '= 2.6.x (iso8601 format)' print(" version: %s" % version) print(" storage: %s" % (self.logfile.storage_engine or 'unknown')) # now run all sections for section in self.sections: if section.active: print("\n%s" % section.name.upper()) section.run()
python
def run(self, arguments=None): LogFileTool.run(self, arguments) for i, self.logfile in enumerate(self.args['logfile']): if i > 0: print("\n ------------------------------------------\n") if self.logfile.datetime_format == 'ctime-pre2.4': # no milliseconds when datetime format doesn't support it start_time = (self.logfile.start.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown") end_time = (self.logfile.end.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown") else: # include milliseconds start_time = (self.logfile.start.strftime("%Y %b %d " "%H:%M:%S.%f")[:-3] if self.logfile.start else "unknown") end_time = (self.logfile.end.strftime("%Y %b %d " "%H:%M:%S.%f")[:-3] if self.logfile.start else "unknown") print(" source: %s" % self.logfile.name) print(" host: %s" % (self.logfile.hostname + ':' + str(self.logfile.port) if self.logfile.hostname else "unknown")) print(" start: %s" % (start_time)) print(" end: %s" % (end_time)) # TODO: add timezone if iso8601 format print("date format: %s" % self.logfile.datetime_format) print(" length: %s" % len(self.logfile)) print(" binary: %s" % (self.logfile.binary or "unknown")) version = (' -> '.join(self.logfile.versions) or "unknown") # if version is unknown, go by date if version == 'unknown': if self.logfile.datetime_format == 'ctime-pre2.4': version = '< 2.4 (no milliseconds)' elif self.logfile.datetime_format == 'ctime': version = '>= 2.4.x ctime (milliseconds present)' elif (self.logfile.datetime_format == "iso8601-utc" or self.logfile.datetime_format == "iso8601-local"): if self.logfile.has_level: version = '>= 3.0 (iso8601 format, level, component)' else: version = '= 2.6.x (iso8601 format)' print(" version: %s" % version) print(" storage: %s" % (self.logfile.storage_engine or 'unknown')) # now run all sections for section in self.sections: if section.active: print("\n%s" % section.name.upper()) section.run()
[ "def", "run", "(", "self", ",", "arguments", "=", "None", ")", ":", "LogFileTool", ".", "run", "(", "self", ",", "arguments", ")", "for", "i", ",", "self", ".", "logfile", "in", "enumerate", "(", "self", ".", "args", "[", "'logfile'", "]", ")", ":"...
Print useful information about the log file.
[ "Print", "useful", "information", "about", "the", "log", "file", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mloginfo/mloginfo.py#L32-L90
244,054
rueckstiess/mtools
mtools/util/logfile.py
LogFile.filesize
def filesize(self): """ Lazy evaluation of start and end of logfile. Returns None for stdin input currently. """ if self.from_stdin: return None if not self._filesize: self._calculate_bounds() return self._filesize
python
def filesize(self): if self.from_stdin: return None if not self._filesize: self._calculate_bounds() return self._filesize
[ "def", "filesize", "(", "self", ")", ":", "if", "self", ".", "from_stdin", ":", "return", "None", "if", "not", "self", ".", "_filesize", ":", "self", ".", "_calculate_bounds", "(", ")", "return", "self", ".", "_filesize" ]
Lazy evaluation of start and end of logfile. Returns None for stdin input currently.
[ "Lazy", "evaluation", "of", "start", "and", "end", "of", "logfile", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L84-L94
244,055
rueckstiess/mtools
mtools/util/logfile.py
LogFile.num_lines
def num_lines(self): """ Lazy evaluation of the number of lines. Returns None for stdin input currently. """ if self.from_stdin: return None if not self._num_lines: self._iterate_lines() return self._num_lines
python
def num_lines(self): if self.from_stdin: return None if not self._num_lines: self._iterate_lines() return self._num_lines
[ "def", "num_lines", "(", "self", ")", ":", "if", "self", ".", "from_stdin", ":", "return", "None", "if", "not", "self", ".", "_num_lines", ":", "self", ".", "_iterate_lines", "(", ")", "return", "self", ".", "_num_lines" ]
Lazy evaluation of the number of lines. Returns None for stdin input currently.
[ "Lazy", "evaluation", "of", "the", "number", "of", "lines", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L118-L128
244,056
rueckstiess/mtools
mtools/util/logfile.py
LogFile.versions
def versions(self): """Return all version changes.""" versions = [] for v, _ in self.restarts: if len(versions) == 0 or v != versions[-1]: versions.append(v) return versions
python
def versions(self): versions = [] for v, _ in self.restarts: if len(versions) == 0 or v != versions[-1]: versions.append(v) return versions
[ "def", "versions", "(", "self", ")", ":", "versions", "=", "[", "]", "for", "v", ",", "_", "in", "self", ".", "restarts", ":", "if", "len", "(", "versions", ")", "==", "0", "or", "v", "!=", "versions", "[", "-", "1", "]", ":", "versions", ".", ...
Return all version changes.
[ "Return", "all", "version", "changes", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L166-L172
244,057
rueckstiess/mtools
mtools/util/logfile.py
LogFile.next
def next(self): """Get next line, adjust for year rollover and hint datetime format.""" # use readline here because next() iterator uses internal readahead # buffer so seek position is wrong line = self.filehandle.readline() line = line.decode('utf-8', 'replace') if line == '': raise StopIteration line = line.rstrip('\n') le = LogEvent(line) # hint format and nextpos from previous line if self._datetime_format and self._datetime_nextpos is not None: ret = le.set_datetime_hint(self._datetime_format, self._datetime_nextpos, self.year_rollover) if not ret: # logevent indicates timestamp format has changed, # invalidate hint info self._datetime_format = None self._datetime_nextpos = None elif le.datetime: # gather new hint info from another logevent self._datetime_format = le.datetime_format self._datetime_nextpos = le._datetime_nextpos return le
python
def next(self): # use readline here because next() iterator uses internal readahead # buffer so seek position is wrong line = self.filehandle.readline() line = line.decode('utf-8', 'replace') if line == '': raise StopIteration line = line.rstrip('\n') le = LogEvent(line) # hint format and nextpos from previous line if self._datetime_format and self._datetime_nextpos is not None: ret = le.set_datetime_hint(self._datetime_format, self._datetime_nextpos, self.year_rollover) if not ret: # logevent indicates timestamp format has changed, # invalidate hint info self._datetime_format = None self._datetime_nextpos = None elif le.datetime: # gather new hint info from another logevent self._datetime_format = le.datetime_format self._datetime_nextpos = le._datetime_nextpos return le
[ "def", "next", "(", "self", ")", ":", "# use readline here because next() iterator uses internal readahead", "# buffer so seek position is wrong", "line", "=", "self", ".", "filehandle", ".", "readline", "(", ")", "line", "=", "line", ".", "decode", "(", "'utf-8'", ",...
Get next line, adjust for year rollover and hint datetime format.
[ "Get", "next", "line", "adjust", "for", "year", "rollover", "and", "hint", "datetime", "format", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L210-L236
244,058
rueckstiess/mtools
mtools/util/logfile.py
LogFile._calculate_bounds
def _calculate_bounds(self): """Calculate beginning and end of logfile.""" if self._bounds_calculated: # Assume no need to recalc bounds for lifetime of a Logfile object return if self.from_stdin: return False # we should be able to find a valid log line within max_start_lines max_start_lines = 10 lines_checked = 0 # get start datetime for line in self.filehandle: logevent = LogEvent(line) lines_checked += 1 if logevent.datetime: self._start = logevent.datetime self._timezone = logevent.datetime.tzinfo self._datetime_format = logevent.datetime_format self._datetime_nextpos = logevent._datetime_nextpos break if lines_checked > max_start_lines: break # sanity check before attempting to find end date if (self._start is None): raise SystemExit("Error: <%s> does not appear to be a supported " "MongoDB log file format" % self.filehandle.name) # get end datetime (lines are at most 10k, # go back 30k at most to make sure we catch one) self.filehandle.seek(0, 2) self._filesize = self.filehandle.tell() self.filehandle.seek(-min(self._filesize, 30000), 2) for line in reversed(self.filehandle.readlines()): logevent = LogEvent(line) if logevent.datetime: self._end = logevent.datetime break # if there was a roll-over, subtract 1 year from start time if self._end < self._start: self._start = self._start.replace(year=self._start.year - 1) self._year_rollover = self._end else: self._year_rollover = False # reset logfile self.filehandle.seek(0) self._bounds_calculated = True return True
python
def _calculate_bounds(self): if self._bounds_calculated: # Assume no need to recalc bounds for lifetime of a Logfile object return if self.from_stdin: return False # we should be able to find a valid log line within max_start_lines max_start_lines = 10 lines_checked = 0 # get start datetime for line in self.filehandle: logevent = LogEvent(line) lines_checked += 1 if logevent.datetime: self._start = logevent.datetime self._timezone = logevent.datetime.tzinfo self._datetime_format = logevent.datetime_format self._datetime_nextpos = logevent._datetime_nextpos break if lines_checked > max_start_lines: break # sanity check before attempting to find end date if (self._start is None): raise SystemExit("Error: <%s> does not appear to be a supported " "MongoDB log file format" % self.filehandle.name) # get end datetime (lines are at most 10k, # go back 30k at most to make sure we catch one) self.filehandle.seek(0, 2) self._filesize = self.filehandle.tell() self.filehandle.seek(-min(self._filesize, 30000), 2) for line in reversed(self.filehandle.readlines()): logevent = LogEvent(line) if logevent.datetime: self._end = logevent.datetime break # if there was a roll-over, subtract 1 year from start time if self._end < self._start: self._start = self._start.replace(year=self._start.year - 1) self._year_rollover = self._end else: self._year_rollover = False # reset logfile self.filehandle.seek(0) self._bounds_calculated = True return True
[ "def", "_calculate_bounds", "(", "self", ")", ":", "if", "self", ".", "_bounds_calculated", ":", "# Assume no need to recalc bounds for lifetime of a Logfile object", "return", "if", "self", ".", "from_stdin", ":", "return", "False", "# we should be able to find a valid log l...
Calculate beginning and end of logfile.
[ "Calculate", "beginning", "and", "end", "of", "logfile", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L407-L461
244,059
rueckstiess/mtools
mtools/util/logfile.py
LogFile._find_curr_line
def _find_curr_line(self, prev=False): """ Internal helper function. Find the current (or previous if prev=True) line in a log file based on the current seek position. """ curr_pos = self.filehandle.tell() # jump back 15k characters (at most) and find last newline char jump_back = min(self.filehandle.tell(), 15000) self.filehandle.seek(-jump_back, 1) buff = self.filehandle.read(jump_back) self.filehandle.seek(curr_pos, 0) if prev and self.prev_pos is not None and self.prev_pos == curr_pos: # Number of characters to show before/after the log offset error_context = 300 self.filehandle.seek(-error_context, 1) buff = self.filehandle.read(curr_pos) hr = "-" * 60 print("Fatal log parsing loop detected trying to find previous " "log line near offset %s in %s:\n\n%s\n%s\n" "<--- (current log parsing offset) \n%s\n%s\n" % (curr_pos, self.name, hr, buff[:error_context], buff[error_context:error_context + 1], hr), file=sys.stderr) raise SystemExit("Cannot parse %s with requested options" % self.filehandle.name) else: self.prev_pos = curr_pos buff = buff.decode("utf-8", "replace") newline_pos = buff.rfind('\n') if prev: newline_pos = buff[:newline_pos].rfind('\n') # move back to last newline char if newline_pos == -1: self.filehandle.seek(0) return self.next() self.filehandle.seek(newline_pos - jump_back + 1, 1) # roll forward until we found a line with a datetime try: logevent = self.next() while not logevent.datetime: logevent = self.next() return logevent except StopIteration: # reached end of file return None
python
def _find_curr_line(self, prev=False): curr_pos = self.filehandle.tell() # jump back 15k characters (at most) and find last newline char jump_back = min(self.filehandle.tell(), 15000) self.filehandle.seek(-jump_back, 1) buff = self.filehandle.read(jump_back) self.filehandle.seek(curr_pos, 0) if prev and self.prev_pos is not None and self.prev_pos == curr_pos: # Number of characters to show before/after the log offset error_context = 300 self.filehandle.seek(-error_context, 1) buff = self.filehandle.read(curr_pos) hr = "-" * 60 print("Fatal log parsing loop detected trying to find previous " "log line near offset %s in %s:\n\n%s\n%s\n" "<--- (current log parsing offset) \n%s\n%s\n" % (curr_pos, self.name, hr, buff[:error_context], buff[error_context:error_context + 1], hr), file=sys.stderr) raise SystemExit("Cannot parse %s with requested options" % self.filehandle.name) else: self.prev_pos = curr_pos buff = buff.decode("utf-8", "replace") newline_pos = buff.rfind('\n') if prev: newline_pos = buff[:newline_pos].rfind('\n') # move back to last newline char if newline_pos == -1: self.filehandle.seek(0) return self.next() self.filehandle.seek(newline_pos - jump_back + 1, 1) # roll forward until we found a line with a datetime try: logevent = self.next() while not logevent.datetime: logevent = self.next() return logevent except StopIteration: # reached end of file return None
[ "def", "_find_curr_line", "(", "self", ",", "prev", "=", "False", ")", ":", "curr_pos", "=", "self", ".", "filehandle", ".", "tell", "(", ")", "# jump back 15k characters (at most) and find last newline char", "jump_back", "=", "min", "(", "self", ".", "filehandle...
Internal helper function. Find the current (or previous if prev=True) line in a log file based on the current seek position.
[ "Internal", "helper", "function", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L463-L515
244,060
rueckstiess/mtools
mtools/util/logfile.py
LogFile.fast_forward
def fast_forward(self, start_dt): """ Fast-forward file to given start_dt datetime obj using binary search. Only fast for files. Streams need to be forwarded manually, and it will miss the first line that would otherwise match (as it consumes the log line). """ if self.from_stdin: # skip lines until start_dt is reached return else: # fast bisection path max_mark = self.filesize step_size = max_mark # check if start_dt is already smaller than first datetime self.filehandle.seek(0) le = self.next() if le.datetime and le.datetime >= start_dt: self.filehandle.seek(0) return le = None self.filehandle.seek(0) # search for lower bound while abs(step_size) > 100: step_size = ceil(step_size / 2.) self.filehandle.seek(step_size, 1) le = self._find_curr_line() if not le: break if le.datetime >= start_dt: step_size = -abs(step_size) else: step_size = abs(step_size) if not le: return # now walk backwards until we found a truly smaller line while self.filehandle.tell() >= 2 and (le.datetime is None or le.datetime >= start_dt): self.filehandle.seek(-2, 1) le = self._find_curr_line(prev=True)
python
def fast_forward(self, start_dt): if self.from_stdin: # skip lines until start_dt is reached return else: # fast bisection path max_mark = self.filesize step_size = max_mark # check if start_dt is already smaller than first datetime self.filehandle.seek(0) le = self.next() if le.datetime and le.datetime >= start_dt: self.filehandle.seek(0) return le = None self.filehandle.seek(0) # search for lower bound while abs(step_size) > 100: step_size = ceil(step_size / 2.) self.filehandle.seek(step_size, 1) le = self._find_curr_line() if not le: break if le.datetime >= start_dt: step_size = -abs(step_size) else: step_size = abs(step_size) if not le: return # now walk backwards until we found a truly smaller line while self.filehandle.tell() >= 2 and (le.datetime is None or le.datetime >= start_dt): self.filehandle.seek(-2, 1) le = self._find_curr_line(prev=True)
[ "def", "fast_forward", "(", "self", ",", "start_dt", ")", ":", "if", "self", ".", "from_stdin", ":", "# skip lines until start_dt is reached", "return", "else", ":", "# fast bisection path", "max_mark", "=", "self", ".", "filesize", "step_size", "=", "max_mark", "...
Fast-forward file to given start_dt datetime obj using binary search. Only fast for files. Streams need to be forwarded manually, and it will miss the first line that would otherwise match (as it consumes the log line).
[ "Fast", "-", "forward", "file", "to", "given", "start_dt", "datetime", "obj", "using", "binary", "search", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L517-L566
244,061
rueckstiess/mtools
mtools/mlogfilter/filters/datetime_filter.py
DateTimeFilter.setup
def setup(self): """Get start end end date of logfile before starting to parse.""" if self.mlogfilter.is_stdin: # assume this year (we have no other info) now = datetime.now() self.startDateTime = datetime(now.year, 1, 1, tzinfo=tzutc()) self.endDateTime = datetime(MAXYEAR, 12, 31, tzinfo=tzutc()) else: logfiles = self.mlogfilter.args['logfile'] self.startDateTime = min([lf.start + timedelta(hours=self .mlogfilter .args['timezone'][i]) for i, lf in enumerate(logfiles)]) self.endDateTime = max([lf.end + timedelta(hours=self .mlogfilter.args['timezone'][i]) for i, lf in enumerate(logfiles)]) # now parse for further changes to from and to datetimes dtbound = DateTimeBoundaries(self.startDateTime, self.endDateTime) self.fromDateTime, self.toDateTime = dtbound(self.mlogfilter .args['from'] or None, self.mlogfilter .args['to'] or None) # define start_limit for mlogfilter's fast_forward method self.start_limit = self.fromDateTime # for single logfile, get file seek position of `to` datetime if (len(self.mlogfilter.args['logfile']) == 1 and not self.mlogfilter.is_stdin): if self.mlogfilter.args['to'] != "end": # fast forward, get seek value, then reset file logfile = self.mlogfilter.args['logfile'][0] logfile.fast_forward(self.toDateTime) self.seek_to = logfile.filehandle.tell() logfile.filehandle.seek(0) else: self.seek_to = -1 else: self.seek_to = False
python
def setup(self): if self.mlogfilter.is_stdin: # assume this year (we have no other info) now = datetime.now() self.startDateTime = datetime(now.year, 1, 1, tzinfo=tzutc()) self.endDateTime = datetime(MAXYEAR, 12, 31, tzinfo=tzutc()) else: logfiles = self.mlogfilter.args['logfile'] self.startDateTime = min([lf.start + timedelta(hours=self .mlogfilter .args['timezone'][i]) for i, lf in enumerate(logfiles)]) self.endDateTime = max([lf.end + timedelta(hours=self .mlogfilter.args['timezone'][i]) for i, lf in enumerate(logfiles)]) # now parse for further changes to from and to datetimes dtbound = DateTimeBoundaries(self.startDateTime, self.endDateTime) self.fromDateTime, self.toDateTime = dtbound(self.mlogfilter .args['from'] or None, self.mlogfilter .args['to'] or None) # define start_limit for mlogfilter's fast_forward method self.start_limit = self.fromDateTime # for single logfile, get file seek position of `to` datetime if (len(self.mlogfilter.args['logfile']) == 1 and not self.mlogfilter.is_stdin): if self.mlogfilter.args['to'] != "end": # fast forward, get seek value, then reset file logfile = self.mlogfilter.args['logfile'][0] logfile.fast_forward(self.toDateTime) self.seek_to = logfile.filehandle.tell() logfile.filehandle.seek(0) else: self.seek_to = -1 else: self.seek_to = False
[ "def", "setup", "(", "self", ")", ":", "if", "self", ".", "mlogfilter", ".", "is_stdin", ":", "# assume this year (we have no other info)", "now", "=", "datetime", ".", "now", "(", ")", "self", ".", "startDateTime", "=", "datetime", "(", "now", ".", "year", ...
Get start end end date of logfile before starting to parse.
[ "Get", "start", "end", "end", "date", "of", "logfile", "before", "starting", "to", "parse", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/filters/datetime_filter.py#L108-L151
244,062
rueckstiess/mtools
mtools/util/profile_collection.py
ProfileCollection.num_events
def num_events(self): """Lazy evaluation of the number of events.""" if not self._num_events: self._num_events = self.coll_handle.count() return self._num_events
python
def num_events(self): if not self._num_events: self._num_events = self.coll_handle.count() return self._num_events
[ "def", "num_events", "(", "self", ")", ":", "if", "not", "self", ".", "_num_events", ":", "self", ".", "_num_events", "=", "self", ".", "coll_handle", ".", "count", "(", ")", "return", "self", ".", "_num_events" ]
Lazy evaluation of the number of events.
[ "Lazy", "evaluation", "of", "the", "number", "of", "events", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/profile_collection.py#L88-L92
244,063
rueckstiess/mtools
mtools/util/profile_collection.py
ProfileCollection.next
def next(self): """Make iterators.""" if not self.cursor: self.cursor = self.coll_handle.find().sort([("ts", ASCENDING)]) doc = self.cursor.next() doc['thread'] = self.name le = LogEvent(doc) return le
python
def next(self): if not self.cursor: self.cursor = self.coll_handle.find().sort([("ts", ASCENDING)]) doc = self.cursor.next() doc['thread'] = self.name le = LogEvent(doc) return le
[ "def", "next", "(", "self", ")", ":", "if", "not", "self", ".", "cursor", ":", "self", ".", "cursor", "=", "self", ".", "coll_handle", ".", "find", "(", ")", ".", "sort", "(", "[", "(", "\"ts\"", ",", "ASCENDING", ")", "]", ")", "doc", "=", "se...
Make iterators.
[ "Make", "iterators", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/profile_collection.py#L94-L102
244,064
rueckstiess/mtools
mtools/util/profile_collection.py
ProfileCollection._calculate_bounds
def _calculate_bounds(self): """Calculate beginning and end of log events.""" # get start datetime first = self.coll_handle.find_one(None, sort=[("ts", ASCENDING)]) last = self.coll_handle.find_one(None, sort=[("ts", DESCENDING)]) self._start = first['ts'] if self._start.tzinfo is None: self._start = self._start.replace(tzinfo=tzutc()) self._end = last['ts'] if self._end.tzinfo is None: self._end = self._end.replace(tzinfo=tzutc()) return True
python
def _calculate_bounds(self): # get start datetime first = self.coll_handle.find_one(None, sort=[("ts", ASCENDING)]) last = self.coll_handle.find_one(None, sort=[("ts", DESCENDING)]) self._start = first['ts'] if self._start.tzinfo is None: self._start = self._start.replace(tzinfo=tzutc()) self._end = last['ts'] if self._end.tzinfo is None: self._end = self._end.replace(tzinfo=tzutc()) return True
[ "def", "_calculate_bounds", "(", "self", ")", ":", "# get start datetime", "first", "=", "self", ".", "coll_handle", ".", "find_one", "(", "None", ",", "sort", "=", "[", "(", "\"ts\"", ",", "ASCENDING", ")", "]", ")", "last", "=", "self", ".", "coll_hand...
Calculate beginning and end of log events.
[ "Calculate", "beginning", "and", "end", "of", "log", "events", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/profile_collection.py#L117-L131
244,065
rueckstiess/mtools
mtools/mloginfo/sections/distinct_section.py
DistinctSection.run
def run(self): """Run each line through log2code and group by matched pattern.""" if ProfileCollection and isinstance(self.mloginfo.logfile, ProfileCollection): print("\n not available for system.profile collections\n") return codelines = defaultdict(lambda: 0) non_matches = 0 # get log file information logfile = self.mloginfo.logfile if logfile.start and logfile.end and not self.mloginfo.args['verbose']: progress_start = self.mloginfo._datetime_to_epoch(logfile.start) progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) - progress_start) else: self.mloginfo.progress_bar_enabled = False for i, logevent in enumerate(self.mloginfo.logfile): cl, _ = self.log2code(logevent.line_str) # update progress bar every 1000 lines if self.mloginfo.progress_bar_enabled and (i % 1000 == 0): if logevent.datetime: progress_curr = self.mloginfo._datetime_to_epoch(logevent .datetime) (self.mloginfo .update_progress(float(progress_curr - progress_start) / progress_total)) if cl: codelines[cl.pattern] += 1 else: if logevent.operation: # skip operations (command, insert, update, delete, # query, getmore) continue if not logevent.thread: # skip the lines that don't have a thread name # (usually map/reduce or assertions) continue if len(logevent.split_tokens) - logevent.datetime_nextpos <= 1: # skip empty log messages (after thread name) continue if ("warning: log line attempted" in logevent.line_str and "over max size" in logevent.line_str): # skip lines that are too long continue # everything else is a real non-match non_matches += 1 if self.mloginfo.args['verbose']: print("couldn't match:" + logevent) # clear progress bar again if self.mloginfo.progress_bar_enabled: self.mloginfo.update_progress(1.0) if self.mloginfo.args['verbose']: print('') for cl in sorted(codelines, key=lambda x: codelines[x], reverse=True): print("%8i %s" % (codelines[cl], " ... ".join(cl))) print('') if non_matches > 0: print("distinct couldn't match %i lines" % non_matches) if not self.mloginfo.args['verbose']: print("to show non-matched lines, run with --verbose.")
python
def run(self): if ProfileCollection and isinstance(self.mloginfo.logfile, ProfileCollection): print("\n not available for system.profile collections\n") return codelines = defaultdict(lambda: 0) non_matches = 0 # get log file information logfile = self.mloginfo.logfile if logfile.start and logfile.end and not self.mloginfo.args['verbose']: progress_start = self.mloginfo._datetime_to_epoch(logfile.start) progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) - progress_start) else: self.mloginfo.progress_bar_enabled = False for i, logevent in enumerate(self.mloginfo.logfile): cl, _ = self.log2code(logevent.line_str) # update progress bar every 1000 lines if self.mloginfo.progress_bar_enabled and (i % 1000 == 0): if logevent.datetime: progress_curr = self.mloginfo._datetime_to_epoch(logevent .datetime) (self.mloginfo .update_progress(float(progress_curr - progress_start) / progress_total)) if cl: codelines[cl.pattern] += 1 else: if logevent.operation: # skip operations (command, insert, update, delete, # query, getmore) continue if not logevent.thread: # skip the lines that don't have a thread name # (usually map/reduce or assertions) continue if len(logevent.split_tokens) - logevent.datetime_nextpos <= 1: # skip empty log messages (after thread name) continue if ("warning: log line attempted" in logevent.line_str and "over max size" in logevent.line_str): # skip lines that are too long continue # everything else is a real non-match non_matches += 1 if self.mloginfo.args['verbose']: print("couldn't match:" + logevent) # clear progress bar again if self.mloginfo.progress_bar_enabled: self.mloginfo.update_progress(1.0) if self.mloginfo.args['verbose']: print('') for cl in sorted(codelines, key=lambda x: codelines[x], reverse=True): print("%8i %s" % (codelines[cl], " ... ".join(cl))) print('') if non_matches > 0: print("distinct couldn't match %i lines" % non_matches) if not self.mloginfo.args['verbose']: print("to show non-matched lines, run with --verbose.")
[ "def", "run", "(", "self", ")", ":", "if", "ProfileCollection", "and", "isinstance", "(", "self", ".", "mloginfo", ".", "logfile", ",", "ProfileCollection", ")", ":", "print", "(", "\"\\n not available for system.profile collections\\n\"", ")", "return", "codelin...
Run each line through log2code and group by matched pattern.
[ "Run", "each", "line", "through", "log2code", "and", "group", "by", "matched", "pattern", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mloginfo/sections/distinct_section.py#L39-L108
244,066
rueckstiess/mtools
mtools/util/pattern.py
shell2json
def shell2json(s): """Convert shell syntax to json.""" replace = { r'BinData\(.+?\)': '1', r'(new )?Date\(.+?\)': '1', r'Timestamp\(.+?\)': '1', r'ObjectId\(.+?\)': '1', r'DBRef\(.+?\)': '1', r'undefined': '1', r'MinKey': '1', r'MaxKey': '1', r'NumberLong\(.+?\)': '1', r'/.+?/\w*': '1' } for key, value in replace.items(): s = re.sub(key, value, s) return s
python
def shell2json(s): replace = { r'BinData\(.+?\)': '1', r'(new )?Date\(.+?\)': '1', r'Timestamp\(.+?\)': '1', r'ObjectId\(.+?\)': '1', r'DBRef\(.+?\)': '1', r'undefined': '1', r'MinKey': '1', r'MaxKey': '1', r'NumberLong\(.+?\)': '1', r'/.+?/\w*': '1' } for key, value in replace.items(): s = re.sub(key, value, s) return s
[ "def", "shell2json", "(", "s", ")", ":", "replace", "=", "{", "r'BinData\\(.+?\\)'", ":", "'1'", ",", "r'(new )?Date\\(.+?\\)'", ":", "'1'", ",", "r'Timestamp\\(.+?\\)'", ":", "'1'", ",", "r'ObjectId\\(.+?\\)'", ":", "'1'", ",", "r'DBRef\\(.+?\\)'", ":", "'1'", ...
Convert shell syntax to json.
[ "Convert", "shell", "syntax", "to", "json", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/pattern.py#L52-L70
244,067
rueckstiess/mtools
mtools/util/pattern.py
json2pattern
def json2pattern(s): """ Convert JSON format to a query pattern. Includes even mongo shell notation without quoted key names. """ # make valid JSON by wrapping field names in quotes s, _ = re.subn(r'([{,])\s*([^,{\s\'"]+)\s*:', ' \\1 "\\2" : ', s) # handle shell values that are not valid JSON s = shell2json(s) # convert to 1 where possible, to get rid of things like new Date(...) s, n = re.subn(r'([:,\[])\s*([^{}\[\]"]+?)\s*([,}\]])', '\\1 1 \\3', s) # now convert to dictionary, converting unicode to ascii try: doc = json.loads(s, object_hook=_decode_pattern_dict) return json.dumps(doc, sort_keys=True, separators=(', ', ': ')) except ValueError as ex: return None
python
def json2pattern(s): # make valid JSON by wrapping field names in quotes s, _ = re.subn(r'([{,])\s*([^,{\s\'"]+)\s*:', ' \\1 "\\2" : ', s) # handle shell values that are not valid JSON s = shell2json(s) # convert to 1 where possible, to get rid of things like new Date(...) s, n = re.subn(r'([:,\[])\s*([^{}\[\]"]+?)\s*([,}\]])', '\\1 1 \\3', s) # now convert to dictionary, converting unicode to ascii try: doc = json.loads(s, object_hook=_decode_pattern_dict) return json.dumps(doc, sort_keys=True, separators=(', ', ': ')) except ValueError as ex: return None
[ "def", "json2pattern", "(", "s", ")", ":", "# make valid JSON by wrapping field names in quotes", "s", ",", "_", "=", "re", ".", "subn", "(", "r'([{,])\\s*([^,{\\s\\'\"]+)\\s*:'", ",", "' \\\\1 \"\\\\2\" : '", ",", "s", ")", "# handle shell values that are not valid JSON", ...
Convert JSON format to a query pattern. Includes even mongo shell notation without quoted key names.
[ "Convert", "JSON", "format", "to", "a", "query", "pattern", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/pattern.py#L73-L90
244,068
rueckstiess/mtools
mtools/util/print_table.py
print_table
def print_table(rows, override_headers=None, uppercase_headers=True): """All rows need to be a list of dictionaries, all with the same keys.""" if len(rows) == 0: return keys = list(rows[0].keys()) headers = override_headers or keys if uppercase_headers: rows = [dict(zip(keys, map(lambda x: x.upper(), headers))), None] + rows else: rows = [dict(zip(keys, headers)), None] + rows lengths = [max(len(str(row[k])) for row in rows if hasattr(row, '__iter__')) for k in keys] tmp = ['{%s:%i}' % (h, l) for h, l in zip(keys[: -1], lengths[: -1])] tmp.append('{%s}' % keys[-1]) template = (' ' * 4).join(tmp) for row in rows: if type(row) == str: print(row) elif row is None: print() elif isinstance(row, dict): row = {k: v if v is not None else 'None' for k, v in row.items()} print(template.format(**row)) else: print("Unhandled row type:", row)
python
def print_table(rows, override_headers=None, uppercase_headers=True): if len(rows) == 0: return keys = list(rows[0].keys()) headers = override_headers or keys if uppercase_headers: rows = [dict(zip(keys, map(lambda x: x.upper(), headers))), None] + rows else: rows = [dict(zip(keys, headers)), None] + rows lengths = [max(len(str(row[k])) for row in rows if hasattr(row, '__iter__')) for k in keys] tmp = ['{%s:%i}' % (h, l) for h, l in zip(keys[: -1], lengths[: -1])] tmp.append('{%s}' % keys[-1]) template = (' ' * 4).join(tmp) for row in rows: if type(row) == str: print(row) elif row is None: print() elif isinstance(row, dict): row = {k: v if v is not None else 'None' for k, v in row.items()} print(template.format(**row)) else: print("Unhandled row type:", row)
[ "def", "print_table", "(", "rows", ",", "override_headers", "=", "None", ",", "uppercase_headers", "=", "True", ")", ":", "if", "len", "(", "rows", ")", "==", "0", ":", "return", "keys", "=", "list", "(", "rows", "[", "0", "]", ".", "keys", "(", ")...
All rows need to be a list of dictionaries, all with the same keys.
[ "All", "rows", "need", "to", "be", "a", "list", "of", "dictionaries", "all", "with", "the", "same", "keys", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/print_table.py#L3-L30
244,069
rueckstiess/mtools
mtools/util/logevent.py
LogEvent.set_line_str
def set_line_str(self, line_str): """ Set line_str. Line_str is only writeable if LogEvent was created from a string, not from a system.profile documents. """ if not self.from_string: raise ValueError("can't set line_str for LogEvent created from " "system.profile documents.") if line_str != self._line_str: self._line_str = line_str.rstrip() self._reset()
python
def set_line_str(self, line_str): if not self.from_string: raise ValueError("can't set line_str for LogEvent created from " "system.profile documents.") if line_str != self._line_str: self._line_str = line_str.rstrip() self._reset()
[ "def", "set_line_str", "(", "self", ",", "line_str", ")", ":", "if", "not", "self", ".", "from_string", ":", "raise", "ValueError", "(", "\"can't set line_str for LogEvent created from \"", "\"system.profile documents.\"", ")", "if", "line_str", "!=", "self", ".", "...
Set line_str. Line_str is only writeable if LogEvent was created from a string, not from a system.profile documents.
[ "Set", "line_str", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L141-L154
244,070
rueckstiess/mtools
mtools/util/logevent.py
LogEvent.get_line_str
def get_line_str(self): """Return line_str depending on source, logfile or system.profile.""" if self.from_string: return ' '.join([s for s in [self.merge_marker_str, self._datetime_str, self._line_str] if s]) else: return ' '.join([s for s in [self._datetime_str, self._line_str] if s])
python
def get_line_str(self): if self.from_string: return ' '.join([s for s in [self.merge_marker_str, self._datetime_str, self._line_str] if s]) else: return ' '.join([s for s in [self._datetime_str, self._line_str] if s])
[ "def", "get_line_str", "(", "self", ")", ":", "if", "self", ".", "from_string", ":", "return", "' '", ".", "join", "(", "[", "s", "for", "s", "in", "[", "self", ".", "merge_marker_str", ",", "self", ".", "_datetime_str", ",", "self", ".", "_line_str", ...
Return line_str depending on source, logfile or system.profile.
[ "Return", "line_str", "depending", "on", "source", "logfile", "or", "system", ".", "profile", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L156-L164
244,071
rueckstiess/mtools
mtools/util/logevent.py
LogEvent._match_datetime_pattern
def _match_datetime_pattern(self, tokens): """ Match the datetime pattern at the beginning of the token list. There are several formats that this method needs to understand and distinguish between (see MongoDB's SERVER-7965): ctime-pre2.4 Wed Dec 31 19:00:00 ctime Wed Dec 31 19:00:00.000 iso8601-utc 1970-01-01T00:00:00.000Z iso8601-local 1969-12-31T19:00:00.000+0500 """ # first check: less than 4 tokens can't be ctime assume_iso8601_format = len(tokens) < 4 # check for ctime-pre-2.4 or ctime format if not assume_iso8601_format: weekday, month, day, time = tokens[:4] if (len(tokens) < 4 or (weekday not in self.weekdays) or (month not in self.months) or not day.isdigit()): assume_iso8601_format = True if assume_iso8601_format: # sanity check, because the dateutil parser could interpret # any numbers as a valid date if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}', tokens[0]): return None # convinced that this is a ISO-8601 format, the dateutil parser # will do the rest dt = dateutil.parser.parse(tokens[0]) self._datetime_format = "iso8601-utc" \ if tokens[0].endswith('Z') else "iso8601-local" else: # assume current year unless self.year_rollover # is set (from LogFile) year = datetime.now().year dt = dateutil.parser.parse(' '.join(tokens[: 4]), default=datetime(year, 1, 1)) if dt.tzinfo is None: dt = dt.replace(tzinfo=tzutc()) if self._year_rollover and dt > self._year_rollover: dt = dt.replace(year=year - 1) self._datetime_format = "ctime" \ if '.' in tokens[3] else "ctime-pre2.4" return dt
python
def _match_datetime_pattern(self, tokens): # first check: less than 4 tokens can't be ctime assume_iso8601_format = len(tokens) < 4 # check for ctime-pre-2.4 or ctime format if not assume_iso8601_format: weekday, month, day, time = tokens[:4] if (len(tokens) < 4 or (weekday not in self.weekdays) or (month not in self.months) or not day.isdigit()): assume_iso8601_format = True if assume_iso8601_format: # sanity check, because the dateutil parser could interpret # any numbers as a valid date if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}', tokens[0]): return None # convinced that this is a ISO-8601 format, the dateutil parser # will do the rest dt = dateutil.parser.parse(tokens[0]) self._datetime_format = "iso8601-utc" \ if tokens[0].endswith('Z') else "iso8601-local" else: # assume current year unless self.year_rollover # is set (from LogFile) year = datetime.now().year dt = dateutil.parser.parse(' '.join(tokens[: 4]), default=datetime(year, 1, 1)) if dt.tzinfo is None: dt = dt.replace(tzinfo=tzutc()) if self._year_rollover and dt > self._year_rollover: dt = dt.replace(year=year - 1) self._datetime_format = "ctime" \ if '.' in tokens[3] else "ctime-pre2.4" return dt
[ "def", "_match_datetime_pattern", "(", "self", ",", "tokens", ")", ":", "# first check: less than 4 tokens can't be ctime", "assume_iso8601_format", "=", "len", "(", "tokens", ")", "<", "4", "# check for ctime-pre-2.4 or ctime format", "if", "not", "assume_iso8601_format", ...
Match the datetime pattern at the beginning of the token list. There are several formats that this method needs to understand and distinguish between (see MongoDB's SERVER-7965): ctime-pre2.4 Wed Dec 31 19:00:00 ctime Wed Dec 31 19:00:00.000 iso8601-utc 1970-01-01T00:00:00.000Z iso8601-local 1969-12-31T19:00:00.000+0500
[ "Match", "the", "datetime", "pattern", "at", "the", "beginning", "of", "the", "token", "list", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L282-L333
244,072
rueckstiess/mtools
mtools/util/logevent.py
LogEvent._extract_operation_and_namespace
def _extract_operation_and_namespace(self): """ Helper method to extract both operation and namespace from a logevent. It doesn't make sense to only extract one as they appear back to back in the token list. """ split_tokens = self.split_tokens if not self._datetime_nextpos: # force evaluation of thread to get access to datetime_offset and # to protect from changes due to line truncation. _ = self.thread if not self._datetime_nextpos or (len(split_tokens) <= self._datetime_nextpos + 2): return op = split_tokens[self._datetime_nextpos + 1].lower() if op == 'warning:': # check if this log line got truncated if ("warning: log line attempted" in self._line_str and "over max size" in self._line_str): self._datetime_nextpos = split_tokens.index('...') op = split_tokens[self._datetime_nextpos + 1] else: # unknown warning, bail out return if op in self.log_operations: self._operation = op self._namespace = split_tokens[self._datetime_nextpos + 2]
python
def _extract_operation_and_namespace(self): split_tokens = self.split_tokens if not self._datetime_nextpos: # force evaluation of thread to get access to datetime_offset and # to protect from changes due to line truncation. _ = self.thread if not self._datetime_nextpos or (len(split_tokens) <= self._datetime_nextpos + 2): return op = split_tokens[self._datetime_nextpos + 1].lower() if op == 'warning:': # check if this log line got truncated if ("warning: log line attempted" in self._line_str and "over max size" in self._line_str): self._datetime_nextpos = split_tokens.index('...') op = split_tokens[self._datetime_nextpos + 1] else: # unknown warning, bail out return if op in self.log_operations: self._operation = op self._namespace = split_tokens[self._datetime_nextpos + 2]
[ "def", "_extract_operation_and_namespace", "(", "self", ")", ":", "split_tokens", "=", "self", ".", "split_tokens", "if", "not", "self", ".", "_datetime_nextpos", ":", "# force evaluation of thread to get access to datetime_offset and", "# to protect from changes due to line trun...
Helper method to extract both operation and namespace from a logevent. It doesn't make sense to only extract one as they appear back to back in the token list.
[ "Helper", "method", "to", "extract", "both", "operation", "and", "namespace", "from", "a", "logevent", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L395-L427
244,073
rueckstiess/mtools
mtools/util/logevent.py
LogEvent._extract_counters
def _extract_counters(self): """Extract counters like nscanned and nreturned from the logevent.""" # extract counters (if present) counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields', 'planSummary', 'writeConflicts', 'keyUpdates'] # TODO: refactor mtools to use current counter names throughout # Transitionary hack: mapping of current names into prior equivalents counter_equiv = { 'docsExamined': 'nscannedObjects', 'keysExamined': 'nscanned', 'nDeleted': 'ndeleted', 'nInserted': 'ninserted', 'nMatched': 'nreturned', 'nModified': 'nupdated' } counters.extend(counter_equiv.keys()) split_tokens = self.split_tokens # trigger operation evaluation to get access to offset if self.operation: for t, token in enumerate(split_tokens[self.datetime_nextpos + 2:]): for counter in counters: if token.startswith('%s:' % counter): try: # Remap counter to standard name, if applicable counter = counter_equiv.get(counter, counter) vars(self)['_' + counter] = int((token.split(':') [-1]).replace(',', '')) except ValueError: # see if this is a pre-2.5.2 numYields with space # in between (e.g. "numYields: 2") # https://jira.mongodb.org/browse/SERVER-10101 if (counter == 'numYields' and token.startswith('numYields')): try: self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', '')) except ValueError: pass if (counter == 'planSummary' and token.startswith('planSummary')): try: self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2] if self._planSummary: if split_tokens[t + 1 + self.datetime_nextpos + 3] != '{': self._actualPlanSummary = self._planSummary else: self._actualPlanSummary = '%s %s' % ( self._planSummary, self._find_pattern('planSummary: %s' % self._planSummary, actual=True) ) except ValueError: pass # token not parsable, skip break
python
def _extract_counters(self): # extract counters (if present) counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields', 'planSummary', 'writeConflicts', 'keyUpdates'] # TODO: refactor mtools to use current counter names throughout # Transitionary hack: mapping of current names into prior equivalents counter_equiv = { 'docsExamined': 'nscannedObjects', 'keysExamined': 'nscanned', 'nDeleted': 'ndeleted', 'nInserted': 'ninserted', 'nMatched': 'nreturned', 'nModified': 'nupdated' } counters.extend(counter_equiv.keys()) split_tokens = self.split_tokens # trigger operation evaluation to get access to offset if self.operation: for t, token in enumerate(split_tokens[self.datetime_nextpos + 2:]): for counter in counters: if token.startswith('%s:' % counter): try: # Remap counter to standard name, if applicable counter = counter_equiv.get(counter, counter) vars(self)['_' + counter] = int((token.split(':') [-1]).replace(',', '')) except ValueError: # see if this is a pre-2.5.2 numYields with space # in between (e.g. "numYields: 2") # https://jira.mongodb.org/browse/SERVER-10101 if (counter == 'numYields' and token.startswith('numYields')): try: self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', '')) except ValueError: pass if (counter == 'planSummary' and token.startswith('planSummary')): try: self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2] if self._planSummary: if split_tokens[t + 1 + self.datetime_nextpos + 3] != '{': self._actualPlanSummary = self._planSummary else: self._actualPlanSummary = '%s %s' % ( self._planSummary, self._find_pattern('planSummary: %s' % self._planSummary, actual=True) ) except ValueError: pass # token not parsable, skip break
[ "def", "_extract_counters", "(", "self", ")", ":", "# extract counters (if present)", "counters", "=", "[", "'nscanned'", ",", "'nscannedObjects'", ",", "'ntoreturn'", ",", "'nreturned'", ",", "'ninserted'", ",", "'nupdated'", ",", "'ndeleted'", ",", "'r'", ",", "...
Extract counters like nscanned and nreturned from the logevent.
[ "Extract", "counters", "like", "nscanned", "and", "nreturned", "from", "the", "logevent", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L626-L685
244,074
rueckstiess/mtools
mtools/util/logevent.py
LogEvent.parse_all
def parse_all(self): """ Trigger extraction of all information. These values are usually evaluated lazily. """ tokens = self.split_tokens duration = self.duration datetime = self.datetime thread = self.thread operation = self.operation namespace = self.namespace pattern = self.pattern nscanned = self.nscanned nscannedObjects = self.nscannedObjects ntoreturn = self.ntoreturn nreturned = self.nreturned ninserted = self.ninserted ndeleted = self.ndeleted nupdated = self.nupdated numYields = self.numYields w = self.w r = self.r
python
def parse_all(self): tokens = self.split_tokens duration = self.duration datetime = self.datetime thread = self.thread operation = self.operation namespace = self.namespace pattern = self.pattern nscanned = self.nscanned nscannedObjects = self.nscannedObjects ntoreturn = self.ntoreturn nreturned = self.nreturned ninserted = self.ninserted ndeleted = self.ndeleted nupdated = self.nupdated numYields = self.numYields w = self.w r = self.r
[ "def", "parse_all", "(", "self", ")", ":", "tokens", "=", "self", ".", "split_tokens", "duration", "=", "self", ".", "duration", "datetime", "=", "self", ".", "datetime", "thread", "=", "self", ".", "thread", "operation", "=", "self", ".", "operation", "...
Trigger extraction of all information. These values are usually evaluated lazily.
[ "Trigger", "extraction", "of", "all", "information", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L721-L743
244,075
rueckstiess/mtools
mtools/util/logevent.py
LogEvent.to_dict
def to_dict(self, labels=None): """Convert LogEvent object to a dictionary.""" output = {} if labels is None: labels = ['line_str', 'split_tokens', 'datetime', 'operation', 'thread', 'namespace', 'nscanned', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'duration', 'r', 'w', 'numYields'] for label in labels: value = getattr(self, label, None) if value is not None: output[label] = value return output
python
def to_dict(self, labels=None): output = {} if labels is None: labels = ['line_str', 'split_tokens', 'datetime', 'operation', 'thread', 'namespace', 'nscanned', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'duration', 'r', 'w', 'numYields'] for label in labels: value = getattr(self, label, None) if value is not None: output[label] = value return output
[ "def", "to_dict", "(", "self", ",", "labels", "=", "None", ")", ":", "output", "=", "{", "}", "if", "labels", "is", "None", ":", "labels", "=", "[", "'line_str'", ",", "'split_tokens'", ",", "'datetime'", ",", "'operation'", ",", "'thread'", ",", "'nam...
Convert LogEvent object to a dictionary.
[ "Convert", "LogEvent", "object", "to", "a", "dictionary", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L823-L837
244,076
rueckstiess/mtools
mtools/util/logevent.py
LogEvent.to_json
def to_json(self, labels=None): """Convert LogEvent object to valid JSON.""" output = self.to_dict(labels) return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
python
def to_json(self, labels=None): output = self.to_dict(labels) return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
[ "def", "to_json", "(", "self", ",", "labels", "=", "None", ")", ":", "output", "=", "self", ".", "to_dict", "(", "labels", ")", "return", "json", ".", "dumps", "(", "output", ",", "cls", "=", "DateTimeEncoder", ",", "ensure_ascii", "=", "False", ")" ]
Convert LogEvent object to valid JSON.
[ "Convert", "LogEvent", "object", "to", "valid", "JSON", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L839-L842
244,077
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool.addFilter
def addFilter(self, filterclass): """Add a filter class to the parser.""" if filterclass not in self.filters: self.filters.append(filterclass)
python
def addFilter(self, filterclass): if filterclass not in self.filters: self.filters.append(filterclass)
[ "def", "addFilter", "(", "self", ",", "filterclass", ")", ":", "if", "filterclass", "not", "in", "self", ".", "filters", ":", "self", ".", "filters", ".", "append", "(", "filterclass", ")" ]
Add a filter class to the parser.
[ "Add", "a", "filter", "class", "to", "the", "parser", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L71-L74
244,078
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool._outputLine
def _outputLine(self, logevent, length=None, human=False): """ Print the final line. Provides various options (length, human, datetime changes, ...). """ # adapt timezone output if necessary if self.args['timestamp_format'] != 'none': logevent._reformat_timestamp(self.args['timestamp_format'], force=True) if any(self.args['timezone']): if self.args['timestamp_format'] == 'none': self.args['timestamp_format'] = logevent.datetime_format logevent._reformat_timestamp(self.args['timestamp_format'], force=True) if self.args['json']: print(logevent.to_json()) return line = logevent.line_str if length: if len(line) > length: line = (line[:int(length / 2 - 2)] + '...' + line[int(-length / 2 + 1):]) if human: line = self._changeMs(line) line = self._formatNumbers(line) print(line)
python
def _outputLine(self, logevent, length=None, human=False): # adapt timezone output if necessary if self.args['timestamp_format'] != 'none': logevent._reformat_timestamp(self.args['timestamp_format'], force=True) if any(self.args['timezone']): if self.args['timestamp_format'] == 'none': self.args['timestamp_format'] = logevent.datetime_format logevent._reformat_timestamp(self.args['timestamp_format'], force=True) if self.args['json']: print(logevent.to_json()) return line = logevent.line_str if length: if len(line) > length: line = (line[:int(length / 2 - 2)] + '...' + line[int(-length / 2 + 1):]) if human: line = self._changeMs(line) line = self._formatNumbers(line) print(line)
[ "def", "_outputLine", "(", "self", ",", "logevent", ",", "length", "=", "None", ",", "human", "=", "False", ")", ":", "# adapt timezone output if necessary", "if", "self", ".", "args", "[", "'timestamp_format'", "]", "!=", "'none'", ":", "logevent", ".", "_r...
Print the final line. Provides various options (length, human, datetime changes, ...).
[ "Print", "the", "final", "line", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L83-L112
244,079
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool._msToString
def _msToString(self, ms): """Change milliseconds to hours min sec ms format.""" hr, ms = divmod(ms, 3600000) mins, ms = divmod(ms, 60000) secs, mill = divmod(ms, 1000) return "%ihr %imin %isecs %ims" % (hr, mins, secs, mill)
python
def _msToString(self, ms): hr, ms = divmod(ms, 3600000) mins, ms = divmod(ms, 60000) secs, mill = divmod(ms, 1000) return "%ihr %imin %isecs %ims" % (hr, mins, secs, mill)
[ "def", "_msToString", "(", "self", ",", "ms", ")", ":", "hr", ",", "ms", "=", "divmod", "(", "ms", ",", "3600000", ")", "mins", ",", "ms", "=", "divmod", "(", "ms", ",", "60000", ")", "secs", ",", "mill", "=", "divmod", "(", "ms", ",", "1000", ...
Change milliseconds to hours min sec ms format.
[ "Change", "milliseconds", "to", "hours", "min", "sec", "ms", "format", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L114-L119
244,080
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool._changeMs
def _changeMs(self, line): """Change the ms part in the string if needed.""" # use the position of the last space instead try: last_space_pos = line.rindex(' ') except ValueError: return line else: end_str = line[last_space_pos:] new_string = line if end_str[-2:] == 'ms' and int(end_str[:-2]) >= 1000: # isolate the number of milliseconds ms = int(end_str[:-2]) # create the new string with the beginning part of the # log with the new ms part added in new_string = (line[:last_space_pos] + ' (' + self._msToString(ms) + ')' + line[last_space_pos:]) return new_string
python
def _changeMs(self, line): # use the position of the last space instead try: last_space_pos = line.rindex(' ') except ValueError: return line else: end_str = line[last_space_pos:] new_string = line if end_str[-2:] == 'ms' and int(end_str[:-2]) >= 1000: # isolate the number of milliseconds ms = int(end_str[:-2]) # create the new string with the beginning part of the # log with the new ms part added in new_string = (line[:last_space_pos] + ' (' + self._msToString(ms) + ')' + line[last_space_pos:]) return new_string
[ "def", "_changeMs", "(", "self", ",", "line", ")", ":", "# use the position of the last space instead", "try", ":", "last_space_pos", "=", "line", ".", "rindex", "(", "' '", ")", "except", "ValueError", ":", "return", "line", "else", ":", "end_str", "=", "line...
Change the ms part in the string if needed.
[ "Change", "the", "ms", "part", "in", "the", "string", "if", "needed", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L121-L139
244,081
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool._formatNumbers
def _formatNumbers(self, line): """ Format the numbers so that there are commas inserted. For example: 1200300 becomes 1,200,300. """ # below thousands separator syntax only works for # python 2.7, skip for 2.6 if sys.version_info < (2, 7): return line last_index = 0 try: # find the index of the last } character last_index = (line.rindex('}') + 1) end = line[last_index:] except ValueError: return line else: # split the string on numbers to isolate them splitted = re.split("(\d+)", end) for index, val in enumerate(splitted): converted = 0 try: converted = int(val) # if it's not an int pass and don't change the string except ValueError: pass else: if converted > 1000: splitted[index] = format(converted, ",d") return line[:last_index] + ("").join(splitted)
python
def _formatNumbers(self, line): # below thousands separator syntax only works for # python 2.7, skip for 2.6 if sys.version_info < (2, 7): return line last_index = 0 try: # find the index of the last } character last_index = (line.rindex('}') + 1) end = line[last_index:] except ValueError: return line else: # split the string on numbers to isolate them splitted = re.split("(\d+)", end) for index, val in enumerate(splitted): converted = 0 try: converted = int(val) # if it's not an int pass and don't change the string except ValueError: pass else: if converted > 1000: splitted[index] = format(converted, ",d") return line[:last_index] + ("").join(splitted)
[ "def", "_formatNumbers", "(", "self", ",", "line", ")", ":", "# below thousands separator syntax only works for", "# python 2.7, skip for 2.6", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ":", "return", "line", "last_index", "=", "0", "try", ...
Format the numbers so that there are commas inserted. For example: 1200300 becomes 1,200,300.
[ "Format", "the", "numbers", "so", "that", "there", "are", "commas", "inserted", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L141-L172
244,082
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool._datetime_key_for_merge
def _datetime_key_for_merge(self, logevent): """Helper method for ordering log lines correctly during merge.""" if not logevent: # if logfile end is reached, return max datetime to never # pick this line return datetime(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzutc()) # if no datetime present (line doesn't have one) return mindate # to pick this line immediately return logevent.datetime or datetime(MINYEAR, 1, 1, 0, 0, 0, 0, tzutc())
python
def _datetime_key_for_merge(self, logevent): if not logevent: # if logfile end is reached, return max datetime to never # pick this line return datetime(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzutc()) # if no datetime present (line doesn't have one) return mindate # to pick this line immediately return logevent.datetime or datetime(MINYEAR, 1, 1, 0, 0, 0, 0, tzutc())
[ "def", "_datetime_key_for_merge", "(", "self", ",", "logevent", ")", ":", "if", "not", "logevent", ":", "# if logfile end is reached, return max datetime to never", "# pick this line", "return", "datetime", "(", "MAXYEAR", ",", "12", ",", "31", ",", "23", ",", "59",...
Helper method for ordering log lines correctly during merge.
[ "Helper", "method", "for", "ordering", "log", "lines", "correctly", "during", "merge", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L174-L184
244,083
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool._merge_logfiles
def _merge_logfiles(self): """Helper method to merge several files together by datetime.""" # open files, read first lines, extract first dates lines = [next(iter(logfile), None) for logfile in self.args['logfile']] # adjust lines by timezone for i in range(len(lines)): if lines[i] and lines[i].datetime: lines[i]._datetime = (lines[i].datetime + timedelta(hours=self.args['timezone'] [i])) while any(lines): min_line = min(lines, key=self._datetime_key_for_merge) min_idx = lines.index(min_line) if self.args['markers'][min_idx]: min_line.merge_marker_str = self.args['markers'][min_idx] yield min_line # update lines array with a new line from the min_idx'th logfile lines[min_idx] = next(iter(self.args['logfile'][min_idx]), None) if lines[min_idx] and lines[min_idx].datetime: lines[min_idx]._datetime = ( lines[min_idx].datetime + timedelta(hours=self.args['timezone'][min_idx]))
python
def _merge_logfiles(self): # open files, read first lines, extract first dates lines = [next(iter(logfile), None) for logfile in self.args['logfile']] # adjust lines by timezone for i in range(len(lines)): if lines[i] and lines[i].datetime: lines[i]._datetime = (lines[i].datetime + timedelta(hours=self.args['timezone'] [i])) while any(lines): min_line = min(lines, key=self._datetime_key_for_merge) min_idx = lines.index(min_line) if self.args['markers'][min_idx]: min_line.merge_marker_str = self.args['markers'][min_idx] yield min_line # update lines array with a new line from the min_idx'th logfile lines[min_idx] = next(iter(self.args['logfile'][min_idx]), None) if lines[min_idx] and lines[min_idx].datetime: lines[min_idx]._datetime = ( lines[min_idx].datetime + timedelta(hours=self.args['timezone'][min_idx]))
[ "def", "_merge_logfiles", "(", "self", ")", ":", "# open files, read first lines, extract first dates", "lines", "=", "[", "next", "(", "iter", "(", "logfile", ")", ",", "None", ")", "for", "logfile", "in", "self", ".", "args", "[", "'logfile'", "]", "]", "#...
Helper method to merge several files together by datetime.
[ "Helper", "method", "to", "merge", "several", "files", "together", "by", "datetime", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L186-L212
244,084
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
MLogFilterTool.logfile_generator
def logfile_generator(self): """Yield each line of the file, or the next line if several files.""" if not self.args['exclude']: # ask all filters for a start_limit and fast-forward to the maximum start_limits = [f.start_limit for f in self.filters if hasattr(f, 'start_limit')] if start_limits: for logfile in self.args['logfile']: logfile.fast_forward(max(start_limits)) if len(self.args['logfile']) > 1: # merge log files by time for logevent in self._merge_logfiles(): yield logevent else: # only one file for logevent in self.args['logfile'][0]: if self.args['timezone'][0] != 0 and logevent.datetime: logevent._datetime = (logevent.datetime + timedelta(hours=self .args['timezone'][0])) yield logevent
python
def logfile_generator(self): if not self.args['exclude']: # ask all filters for a start_limit and fast-forward to the maximum start_limits = [f.start_limit for f in self.filters if hasattr(f, 'start_limit')] if start_limits: for logfile in self.args['logfile']: logfile.fast_forward(max(start_limits)) if len(self.args['logfile']) > 1: # merge log files by time for logevent in self._merge_logfiles(): yield logevent else: # only one file for logevent in self.args['logfile'][0]: if self.args['timezone'][0] != 0 and logevent.datetime: logevent._datetime = (logevent.datetime + timedelta(hours=self .args['timezone'][0])) yield logevent
[ "def", "logfile_generator", "(", "self", ")", ":", "if", "not", "self", ".", "args", "[", "'exclude'", "]", ":", "# ask all filters for a start_limit and fast-forward to the maximum", "start_limits", "=", "[", "f", ".", "start_limit", "for", "f", "in", "self", "."...
Yield each line of the file, or the next line if several files.
[ "Yield", "each", "line", "of", "the", "file", "or", "the", "next", "line", "if", "several", "files", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L214-L236
244,085
rueckstiess/mtools
mtools/mlogfilter/filters/mask_filter.py
MaskFilter.setup
def setup(self): """ Create mask list. Consists of all tuples between which this filter accepts lines. """ # get start and end of the mask and set a start_limit if not self.mask_source.start: raise SystemExit("Can't parse format of %s. Is this a log file or " "system.profile collection?" % self.mlogfilter.args['mask']) self.mask_half_td = timedelta(seconds=self.mlogfilter.args ['mask_size'] / 2) # load filter mask file logevent_list = list(self.mask_source) # define start and end of total mask self.mask_start = self.mask_source.start - self.mask_half_td self.mask_end = self.mask_source.end + self.mask_half_td # consider --mask-center if self.mlogfilter.args['mask_center'] in ['start', 'both']: if logevent_list[0].duration: self.mask_start -= timedelta(milliseconds=logevent_list[0] .duration) if self.mlogfilter.args['mask_center'] == 'start': if logevent_list[-1].duration: self.mask_end -= timedelta(milliseconds=logevent_list[-1] .duration) self.start_limit = self.mask_start # different center points if 'mask_center' in self.mlogfilter.args: if self.mlogfilter.args['mask_center'] in ['start', 'both']: starts = ([(le.datetime - timedelta(milliseconds=le.duration)) if le.duration is not None else le.datetime for le in logevent_list if le.datetime]) if self.mlogfilter.args['mask_center'] in ['end', 'both']: ends = [le.datetime for le in logevent_list if le.datetime] if self.mlogfilter.args['mask_center'] == 'start': event_list = sorted(starts) elif self.mlogfilter.args['mask_center'] == 'end': event_list = sorted(ends) elif self.mlogfilter.args['mask_center'] == 'both': event_list = sorted(zip(starts, ends)) mask_list = [] if len(event_list) == 0: return start_point = end_point = None for e in event_list: if start_point is None: start_point, end_point = self._pad_event(e) continue next_start = (e[0] if type(e) == tuple else e) - self.mask_half_td if next_start <= end_point: end_point = ((e[1] if type(e) == tuple else e) + self.mask_half_td) else: mask_list.append((start_point, end_point)) start_point, end_point = self._pad_event(e) if start_point: mask_list.append((start_point, end_point)) self.mask_list = mask_list
python
def setup(self): # get start and end of the mask and set a start_limit if not self.mask_source.start: raise SystemExit("Can't parse format of %s. Is this a log file or " "system.profile collection?" % self.mlogfilter.args['mask']) self.mask_half_td = timedelta(seconds=self.mlogfilter.args ['mask_size'] / 2) # load filter mask file logevent_list = list(self.mask_source) # define start and end of total mask self.mask_start = self.mask_source.start - self.mask_half_td self.mask_end = self.mask_source.end + self.mask_half_td # consider --mask-center if self.mlogfilter.args['mask_center'] in ['start', 'both']: if logevent_list[0].duration: self.mask_start -= timedelta(milliseconds=logevent_list[0] .duration) if self.mlogfilter.args['mask_center'] == 'start': if logevent_list[-1].duration: self.mask_end -= timedelta(milliseconds=logevent_list[-1] .duration) self.start_limit = self.mask_start # different center points if 'mask_center' in self.mlogfilter.args: if self.mlogfilter.args['mask_center'] in ['start', 'both']: starts = ([(le.datetime - timedelta(milliseconds=le.duration)) if le.duration is not None else le.datetime for le in logevent_list if le.datetime]) if self.mlogfilter.args['mask_center'] in ['end', 'both']: ends = [le.datetime for le in logevent_list if le.datetime] if self.mlogfilter.args['mask_center'] == 'start': event_list = sorted(starts) elif self.mlogfilter.args['mask_center'] == 'end': event_list = sorted(ends) elif self.mlogfilter.args['mask_center'] == 'both': event_list = sorted(zip(starts, ends)) mask_list = [] if len(event_list) == 0: return start_point = end_point = None for e in event_list: if start_point is None: start_point, end_point = self._pad_event(e) continue next_start = (e[0] if type(e) == tuple else e) - self.mask_half_td if next_start <= end_point: end_point = ((e[1] if type(e) == tuple else e) + self.mask_half_td) else: mask_list.append((start_point, end_point)) start_point, end_point = self._pad_event(e) if start_point: mask_list.append((start_point, end_point)) self.mask_list = mask_list
[ "def", "setup", "(", "self", ")", ":", "# get start and end of the mask and set a start_limit", "if", "not", "self", ".", "mask_source", ".", "start", ":", "raise", "SystemExit", "(", "\"Can't parse format of %s. Is this a log file or \"", "\"system.profile collection?\"", "%...
Create mask list. Consists of all tuples between which this filter accepts lines.
[ "Create", "mask", "list", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/filters/mask_filter.py#L60-L135
244,086
rueckstiess/mtools
mtools/util/parse_sourcecode.py
source_files
def source_files(mongodb_path): """Find source files.""" for root, dirs, files in os.walk(mongodb_path): for filename in files: # skip files in dbtests folder if 'dbtests' in root: continue if filename.endswith(('.cpp', '.c', '.h')): yield os.path.join(root, filename)
python
def source_files(mongodb_path): for root, dirs, files in os.walk(mongodb_path): for filename in files: # skip files in dbtests folder if 'dbtests' in root: continue if filename.endswith(('.cpp', '.c', '.h')): yield os.path.join(root, filename)
[ "def", "source_files", "(", "mongodb_path", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "mongodb_path", ")", ":", "for", "filename", "in", "files", ":", "# skip files in dbtests folder", "if", "'dbtests'", "in", "root", ...
Find source files.
[ "Find", "source", "files", "." ]
a6a22910c3569c0c8a3908660ca218a4557e4249
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/parse_sourcecode.py#L23-L31
244,087
ansible-community/ara
ara/views/result.py
index
def index(): """ This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with result.show_result directly and are instead dynamically generated through javascript for performance purposes. """ if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] results = (models.TaskResult.query .join(models.Task) .filter(models.Task.playbook_id.in_(override))) else: results = models.TaskResult.query.all() return render_template('task_result_index.html', results=results)
python
def index(): if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] results = (models.TaskResult.query .join(models.Task) .filter(models.Task.playbook_id.in_(override))) else: results = models.TaskResult.query.all() return render_template('task_result_index.html', results=results)
[ "def", "index", "(", ")", ":", "if", "current_app", ".", "config", "[", "'ARA_PLAYBOOK_OVERRIDE'", "]", "is", "not", "None", ":", "override", "=", "current_app", ".", "config", "[", "'ARA_PLAYBOOK_OVERRIDE'", "]", "results", "=", "(", "models", ".", "TaskRes...
This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with result.show_result directly and are instead dynamically generated through javascript for performance purposes.
[ "This", "is", "not", "served", "anywhere", "in", "the", "web", "application", ".", "It", "is", "used", "explicitly", "in", "the", "context", "of", "generating", "static", "files", "since", "flask", "-", "frozen", "requires", "url_for", "s", "to", "crawl", ...
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/views/result.py#L28-L44
244,088
ansible-community/ara
ara/models.py
content_sha1
def content_sha1(context): """ Used by the FileContent model to automatically compute the sha1 hash of content before storing it to the database. """ try: content = context.current_parameters['content'] except AttributeError: content = context return hashlib.sha1(encodeutils.to_utf8(content)).hexdigest()
python
def content_sha1(context): try: content = context.current_parameters['content'] except AttributeError: content = context return hashlib.sha1(encodeutils.to_utf8(content)).hexdigest()
[ "def", "content_sha1", "(", "context", ")", ":", "try", ":", "content", "=", "context", ".", "current_parameters", "[", "'content'", "]", "except", "AttributeError", ":", "content", "=", "context", "return", "hashlib", ".", "sha1", "(", "encodeutils", ".", "...
Used by the FileContent model to automatically compute the sha1 hash of content before storing it to the database.
[ "Used", "by", "the", "FileContent", "model", "to", "automatically", "compute", "the", "sha1", "hash", "of", "content", "before", "storing", "it", "to", "the", "database", "." ]
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/models.py#L53-L62
244,089
ansible-community/ara
ara/views/about.py
main
def main(): """ Returns the about page """ files = models.File.query hosts = models.Host.query facts = models.HostFacts.query playbooks = models.Playbook.query records = models.Data.query tasks = models.Task.query results = models.TaskResult.query if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] files = files.filter(models.File.playbook_id.in_(override)) facts = (facts .join(models.Host) .filter(models.Host.playbook_id.in_(override))) hosts = hosts.filter(models.Host.playbook_id.in_(override)) playbooks = playbooks.filter(models.Playbook.id.in_(override)) records = records.filter(models.Data.playbook_id.in_(override)) tasks = tasks.filter(models.Task.playbook_id.in_(override)) results = (results .join(models.Task) .filter(models.Task.playbook_id.in_(override))) return render_template( 'about.html', active='about', files=fast_count(files), hosts=fast_count(hosts), facts=fast_count(facts), playbooks=fast_count(playbooks), records=fast_count(records), tasks=fast_count(tasks), results=fast_count(results) )
python
def main(): files = models.File.query hosts = models.Host.query facts = models.HostFacts.query playbooks = models.Playbook.query records = models.Data.query tasks = models.Task.query results = models.TaskResult.query if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] files = files.filter(models.File.playbook_id.in_(override)) facts = (facts .join(models.Host) .filter(models.Host.playbook_id.in_(override))) hosts = hosts.filter(models.Host.playbook_id.in_(override)) playbooks = playbooks.filter(models.Playbook.id.in_(override)) records = records.filter(models.Data.playbook_id.in_(override)) tasks = tasks.filter(models.Task.playbook_id.in_(override)) results = (results .join(models.Task) .filter(models.Task.playbook_id.in_(override))) return render_template( 'about.html', active='about', files=fast_count(files), hosts=fast_count(hosts), facts=fast_count(facts), playbooks=fast_count(playbooks), records=fast_count(records), tasks=fast_count(tasks), results=fast_count(results) )
[ "def", "main", "(", ")", ":", "files", "=", "models", ".", "File", ".", "query", "hosts", "=", "models", ".", "Host", ".", "query", "facts", "=", "models", ".", "HostFacts", ".", "query", "playbooks", "=", "models", ".", "Playbook", ".", "query", "re...
Returns the about page
[ "Returns", "the", "about", "page" ]
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/views/about.py#L29-L63
244,090
ansible-community/ara
ara/views/host.py
index
def index(): """ This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with host.show_host directly and are instead dynamically generated through javascript for performance purposes. """ if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] hosts = (models.Host.query .filter(models.Host.playbook_id.in_(override))) else: hosts = models.Host.query.all() return render_template('host_index.html', hosts=hosts)
python
def index(): if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] hosts = (models.Host.query .filter(models.Host.playbook_id.in_(override))) else: hosts = models.Host.query.all() return render_template('host_index.html', hosts=hosts)
[ "def", "index", "(", ")", ":", "if", "current_app", ".", "config", "[", "'ARA_PLAYBOOK_OVERRIDE'", "]", "is", "not", "None", ":", "override", "=", "current_app", ".", "config", "[", "'ARA_PLAYBOOK_OVERRIDE'", "]", "hosts", "=", "(", "models", ".", "Host", ...
This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with host.show_host directly and are instead dynamically generated through javascript for performance purposes.
[ "This", "is", "not", "served", "anywhere", "in", "the", "web", "application", ".", "It", "is", "used", "explicitly", "in", "the", "context", "of", "generating", "static", "files", "since", "flask", "-", "frozen", "requires", "url_for", "s", "to", "crawl", ...
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/views/host.py#L31-L46
244,091
ansible-community/ara
ara/config/webapp.py
WebAppConfig.config
def config(self): """ Returns a dictionary for the loaded configuration """ return { key: self.__dict__[key] for key in dir(self) if key.isupper() }
python
def config(self): return { key: self.__dict__[key] for key in dir(self) if key.isupper() }
[ "def", "config", "(", "self", ")", ":", "return", "{", "key", ":", "self", ".", "__dict__", "[", "key", "]", "for", "key", "in", "dir", "(", "self", ")", "if", "key", ".", "isupper", "(", ")", "}" ]
Returns a dictionary for the loaded configuration
[ "Returns", "a", "dictionary", "for", "the", "loaded", "configuration" ]
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/config/webapp.py#L58-L64
244,092
ansible-community/ara
ara/views/file.py
index
def index(): """ This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with file.show_file directly and are instead dynamically generated through javascript for performance purposes. """ if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] files = (models.File.query .filter(models.File.playbook_id.in_(override))) else: files = models.File.query.all() return render_template('file_index.html', files=files)
python
def index(): if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] files = (models.File.query .filter(models.File.playbook_id.in_(override))) else: files = models.File.query.all() return render_template('file_index.html', files=files)
[ "def", "index", "(", ")", ":", "if", "current_app", ".", "config", "[", "'ARA_PLAYBOOK_OVERRIDE'", "]", "is", "not", "None", ":", "override", "=", "current_app", ".", "config", "[", "'ARA_PLAYBOOK_OVERRIDE'", "]", "files", "=", "(", "models", ".", "File", ...
This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with file.show_file directly and are instead dynamically generated through javascript for performance purposes.
[ "This", "is", "not", "served", "anywhere", "in", "the", "web", "application", ".", "It", "is", "used", "explicitly", "in", "the", "context", "of", "generating", "static", "files", "since", "flask", "-", "frozen", "requires", "url_for", "s", "to", "crawl", ...
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/views/file.py#L28-L43
244,093
ansible-community/ara
ara/views/file.py
show_file
def show_file(file_): """ Returns details of a file """ file_ = (models.File.query.get(file_)) if file_ is None: abort(404) return render_template('file.html', file_=file_)
python
def show_file(file_): file_ = (models.File.query.get(file_)) if file_ is None: abort(404) return render_template('file.html', file_=file_)
[ "def", "show_file", "(", "file_", ")", ":", "file_", "=", "(", "models", ".", "File", ".", "query", ".", "get", "(", "file_", ")", ")", "if", "file_", "is", "None", ":", "abort", "(", "404", ")", "return", "render_template", "(", "'file.html'", ",", ...
Returns details of a file
[ "Returns", "details", "of", "a", "file" ]
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/views/file.py#L47-L55
244,094
ansible-community/ara
ara/webapp.py
configure_db
def configure_db(app): """ 0.10 is the first version of ARA that ships with a stable database schema. We can identify a database that originates from before this by checking if there is an alembic revision available. If there is no alembic revision available, assume we are running the first revision which contains the latest state of the database prior to this. """ models.db.init_app(app) log = logging.getLogger('ara.webapp.configure_db') log.debug('Setting up database...') if app.config.get('ARA_AUTOCREATE_DATABASE'): with app.app_context(): migrations = app.config['DB_MIGRATIONS'] flask_migrate.Migrate(app, models.db, directory=migrations) config = app.extensions['migrate'].migrate.get_config(migrations) # Verify if the database tables have been created at all inspector = Inspector.from_engine(models.db.engine) if len(inspector.get_table_names()) == 0: log.info('Initializing new DB from scratch') flask_migrate.upgrade(directory=migrations) # Get current alembic head revision script = ScriptDirectory.from_config(config) head = script.get_current_head() # Get current revision, if available connection = models.db.engine.connect() context = MigrationContext.configure(connection) current = context.get_current_revision() if not current: log.info('Unstable DB schema, stamping original revision') flask_migrate.stamp(directory=migrations, revision='da9459a1f71c') if head != current: log.info('DB schema out of date, upgrading') flask_migrate.upgrade(directory=migrations)
python
def configure_db(app): models.db.init_app(app) log = logging.getLogger('ara.webapp.configure_db') log.debug('Setting up database...') if app.config.get('ARA_AUTOCREATE_DATABASE'): with app.app_context(): migrations = app.config['DB_MIGRATIONS'] flask_migrate.Migrate(app, models.db, directory=migrations) config = app.extensions['migrate'].migrate.get_config(migrations) # Verify if the database tables have been created at all inspector = Inspector.from_engine(models.db.engine) if len(inspector.get_table_names()) == 0: log.info('Initializing new DB from scratch') flask_migrate.upgrade(directory=migrations) # Get current alembic head revision script = ScriptDirectory.from_config(config) head = script.get_current_head() # Get current revision, if available connection = models.db.engine.connect() context = MigrationContext.configure(connection) current = context.get_current_revision() if not current: log.info('Unstable DB schema, stamping original revision') flask_migrate.stamp(directory=migrations, revision='da9459a1f71c') if head != current: log.info('DB schema out of date, upgrading') flask_migrate.upgrade(directory=migrations)
[ "def", "configure_db", "(", "app", ")", ":", "models", ".", "db", ".", "init_app", "(", "app", ")", "log", "=", "logging", ".", "getLogger", "(", "'ara.webapp.configure_db'", ")", "log", ".", "debug", "(", "'Setting up database...'", ")", "if", "app", ".",...
0.10 is the first version of ARA that ships with a stable database schema. We can identify a database that originates from before this by checking if there is an alembic revision available. If there is no alembic revision available, assume we are running the first revision which contains the latest state of the database prior to this.
[ "0", ".", "10", "is", "the", "first", "version", "of", "ARA", "that", "ships", "with", "a", "stable", "database", "schema", ".", "We", "can", "identify", "a", "database", "that", "originates", "from", "before", "this", "by", "checking", "if", "there", "i...
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/webapp.py#L248-L288
244,095
ansible-community/ara
ara/webapp.py
configure_cache
def configure_cache(app): """ Sets up an attribute to cache data in the app context """ log = logging.getLogger('ara.webapp.configure_cache') log.debug('Configuring cache') if not getattr(app, '_cache', None): app._cache = {}
python
def configure_cache(app): log = logging.getLogger('ara.webapp.configure_cache') log.debug('Configuring cache') if not getattr(app, '_cache', None): app._cache = {}
[ "def", "configure_cache", "(", "app", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "'ara.webapp.configure_cache'", ")", "log", ".", "debug", "(", "'Configuring cache'", ")", "if", "not", "getattr", "(", "app", ",", "'_cache'", ",", "None", ")", ...
Sets up an attribute to cache data in the app context
[ "Sets", "up", "an", "attribute", "to", "cache", "data", "in", "the", "app", "context" ]
15e2d0133c23b6d07438a553bb8149fadff21547
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/webapp.py#L318-L324
244,096
orbingol/NURBS-Python
geomdl/convert.py
bspline_to_nurbs
def bspline_to_nurbs(obj): """ Converts non-rational parametric shapes to rational ones. :param obj: B-Spline shape :type obj: BSpline.Curve, BSpline.Surface or BSpline.Volume :return: NURBS shape :rtype: NURBS.Curve, NURBS.Surface or NURBS.Volume :raises: TypeError """ # B-Spline -> NURBS if isinstance(obj, BSpline.Curve): return _convert.convert_curve(obj, NURBS) elif isinstance(obj, BSpline.Surface): return _convert.convert_surface(obj, NURBS) elif isinstance(obj, BSpline.Volume): return _convert.convert_volume(obj, NURBS) else: raise TypeError("Input must be an instance of B-Spline curve, surface or volume")
python
def bspline_to_nurbs(obj): # B-Spline -> NURBS if isinstance(obj, BSpline.Curve): return _convert.convert_curve(obj, NURBS) elif isinstance(obj, BSpline.Surface): return _convert.convert_surface(obj, NURBS) elif isinstance(obj, BSpline.Volume): return _convert.convert_volume(obj, NURBS) else: raise TypeError("Input must be an instance of B-Spline curve, surface or volume")
[ "def", "bspline_to_nurbs", "(", "obj", ")", ":", "# B-Spline -> NURBS", "if", "isinstance", "(", "obj", ",", "BSpline", ".", "Curve", ")", ":", "return", "_convert", ".", "convert_curve", "(", "obj", ",", "NURBS", ")", "elif", "isinstance", "(", "obj", ","...
Converts non-rational parametric shapes to rational ones. :param obj: B-Spline shape :type obj: BSpline.Curve, BSpline.Surface or BSpline.Volume :return: NURBS shape :rtype: NURBS.Curve, NURBS.Surface or NURBS.Volume :raises: TypeError
[ "Converts", "non", "-", "rational", "parametric", "shapes", "to", "rational", "ones", "." ]
b1c6a8b51cf143ff58761438e93ba6baef470627
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/convert.py#L14-L31
244,097
orbingol/NURBS-Python
geomdl/convert.py
nurbs_to_bspline
def nurbs_to_bspline(obj, **kwargs): """ Extracts the non-rational components from rational parametric shapes, if possible. The possibility of converting a rational shape to a non-rational one depends on the weights vector. :param obj: NURBS shape :type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume :return: B-Spline shape :rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume :raises: TypeError """ if not obj.rational: raise TypeError("The input must be a rational shape") # Get keyword arguments tol = kwargs.get('tol', 10e-8) # Test for non-rational component extraction for w in obj.weights: if abs(w - 1.0) > tol: print("Cannot extract non-rational components") return obj # NURBS -> B-Spline if isinstance(obj, NURBS.Curve): return _convert.convert_curve(obj, BSpline) elif isinstance(obj, NURBS.Surface): return _convert.convert_surface(obj, BSpline) elif isinstance(obj, NURBS.Volume): return _convert.convert_volume(obj, BSpline) else: raise TypeError("Input must be an instance of NURBS curve, surface or volume")
python
def nurbs_to_bspline(obj, **kwargs): if not obj.rational: raise TypeError("The input must be a rational shape") # Get keyword arguments tol = kwargs.get('tol', 10e-8) # Test for non-rational component extraction for w in obj.weights: if abs(w - 1.0) > tol: print("Cannot extract non-rational components") return obj # NURBS -> B-Spline if isinstance(obj, NURBS.Curve): return _convert.convert_curve(obj, BSpline) elif isinstance(obj, NURBS.Surface): return _convert.convert_surface(obj, BSpline) elif isinstance(obj, NURBS.Volume): return _convert.convert_volume(obj, BSpline) else: raise TypeError("Input must be an instance of NURBS curve, surface or volume")
[ "def", "nurbs_to_bspline", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "not", "obj", ".", "rational", ":", "raise", "TypeError", "(", "\"The input must be a rational shape\"", ")", "# Get keyword arguments", "tol", "=", "kwargs", ".", "get", "(", "'to...
Extracts the non-rational components from rational parametric shapes, if possible. The possibility of converting a rational shape to a non-rational one depends on the weights vector. :param obj: NURBS shape :type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume :return: B-Spline shape :rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume :raises: TypeError
[ "Extracts", "the", "non", "-", "rational", "components", "from", "rational", "parametric", "shapes", "if", "possible", "." ]
b1c6a8b51cf143ff58761438e93ba6baef470627
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/convert.py#L34-L65
244,098
orbingol/NURBS-Python
geomdl/_linalg.py
doolittle
def doolittle(matrix_a): """ Doolittle's Method for LU-factorization. :param matrix_a: Input matrix (must be a square matrix) :type matrix_a: list, tuple :return: a tuple containing matrices (L,U) :rtype: tuple """ # Initialize L and U matrices matrix_u = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] matrix_l = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] # Doolittle Method for i in range(0, len(matrix_a)): for k in range(i, len(matrix_a)): # Upper triangular (U) matrix matrix_u[i][k] = float(matrix_a[i][k] - sum([matrix_l[i][j] * matrix_u[j][k] for j in range(0, i)])) # Lower triangular (L) matrix if i == k: matrix_l[i][i] = 1.0 else: matrix_l[k][i] = float(matrix_a[k][i] - sum([matrix_l[k][j] * matrix_u[j][i] for j in range(0, i)])) # Handle zero division error try: matrix_l[k][i] /= float(matrix_u[i][i]) except ZeroDivisionError: matrix_l[k][i] = 0.0 return matrix_l, matrix_u
python
def doolittle(matrix_a): # Initialize L and U matrices matrix_u = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] matrix_l = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] # Doolittle Method for i in range(0, len(matrix_a)): for k in range(i, len(matrix_a)): # Upper triangular (U) matrix matrix_u[i][k] = float(matrix_a[i][k] - sum([matrix_l[i][j] * matrix_u[j][k] for j in range(0, i)])) # Lower triangular (L) matrix if i == k: matrix_l[i][i] = 1.0 else: matrix_l[k][i] = float(matrix_a[k][i] - sum([matrix_l[k][j] * matrix_u[j][i] for j in range(0, i)])) # Handle zero division error try: matrix_l[k][i] /= float(matrix_u[i][i]) except ZeroDivisionError: matrix_l[k][i] = 0.0 return matrix_l, matrix_u
[ "def", "doolittle", "(", "matrix_a", ")", ":", "# Initialize L and U matrices", "matrix_u", "=", "[", "[", "0.0", "for", "_", "in", "range", "(", "len", "(", "matrix_a", ")", ")", "]", "for", "_", "in", "range", "(", "len", "(", "matrix_a", ")", ")", ...
Doolittle's Method for LU-factorization. :param matrix_a: Input matrix (must be a square matrix) :type matrix_a: list, tuple :return: a tuple containing matrices (L,U) :rtype: tuple
[ "Doolittle", "s", "Method", "for", "LU", "-", "factorization", "." ]
b1c6a8b51cf143ff58761438e93ba6baef470627
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_linalg.py#L14-L42
244,099
orbingol/NURBS-Python
setup.py
read_files
def read_files(project, ext): """ Reads files inside the input project directory. """ project_path = os.path.join(os.path.dirname(__file__), project) file_list = os.listdir(project_path) flist = [] flist_path = [] for f in file_list: f_path = os.path.join(project_path, f) if os.path.isfile(f_path) and f.endswith(ext) and f != "__init__.py": flist.append(f.split('.')[0]) flist_path.append(f_path) return flist, flist_path
python
def read_files(project, ext): project_path = os.path.join(os.path.dirname(__file__), project) file_list = os.listdir(project_path) flist = [] flist_path = [] for f in file_list: f_path = os.path.join(project_path, f) if os.path.isfile(f_path) and f.endswith(ext) and f != "__init__.py": flist.append(f.split('.')[0]) flist_path.append(f_path) return flist, flist_path
[ "def", "read_files", "(", "project", ",", "ext", ")", ":", "project_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "project", ")", "file_list", "=", "os", ".", "listdir", "(", "project_p...
Reads files inside the input project directory.
[ "Reads", "files", "inside", "the", "input", "project", "directory", "." ]
b1c6a8b51cf143ff58761438e93ba6baef470627
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/setup.py#L141-L152