code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
<|reserved_special_token_0|> def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None): """Add transactions to a collation (refer to ethereum.common.add_transactions) """ if not txqueue: return pre_txs = len(collation.transactions) log.info('Adding transactions, %d in txqueue, %d dunkles' % (len( txqueue.txs), pre_txs)) while 1: tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit - shard_state.gas_used, min_gasprice=min_gasprice) if tx is None: break try: apply_shard_transaction(mainchain_state, shard_state, shard_id, tx) collation.transactions.append(tx) except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e: log.info(str(e)) pass log.info('Added %d transactions' % (len(collation.transactions) - pre_txs)) def update_collation_env_variables(state, collation): """Update collation variables into the state (refer to ethereum.common.update_block_env_variables) """ state.block_coinbase = collation.header.coinbase def set_execution_results(state, collation): """Set state root, receipt root, etc (ethereum.pow.common.set_execution_results) """ collation.header.receipts_root = mk_receipt_sha(state.receipts) collation.header.tx_list_root = mk_transaction_sha(collation.transactions) state.commit() collation.header.post_state_root = state.trie.root_hash log.info('Collation pre-sealed, %d gas used' % state.gas_used) <|reserved_special_token_0|> def finalize(state, coinbase): """Apply rewards and commit (refer to ethereum.pow.consensus.finalize) """ delta = int(state.config['COLLATOR_REWARD']) state.delta_balance(coinbase, delta) <|reserved_special_token_1|> <|reserved_special_token_0|> def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None): """Add transactions to a collation (refer to ethereum.common.add_transactions) """ if not txqueue: return pre_txs = len(collation.transactions) log.info('Adding transactions, %d in txqueue, %d dunkles' % (len( txqueue.txs), pre_txs)) while 1: tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit - shard_state.gas_used, min_gasprice=min_gasprice) if tx is None: break try: apply_shard_transaction(mainchain_state, shard_state, shard_id, tx) collation.transactions.append(tx) except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e: log.info(str(e)) pass log.info('Added %d transactions' % (len(collation.transactions) - pre_txs)) def update_collation_env_variables(state, collation): """Update collation variables into the state (refer to ethereum.common.update_block_env_variables) """ state.block_coinbase = collation.header.coinbase def set_execution_results(state, collation): """Set state root, receipt root, etc (ethereum.pow.common.set_execution_results) """ collation.header.receipts_root = mk_receipt_sha(state.receipts) collation.header.tx_list_root = mk_transaction_sha(collation.transactions) state.commit() collation.header.post_state_root = state.trie.root_hash log.info('Collation pre-sealed, %d gas used' % state.gas_used) def validate_transaction_tree(collation): """Validate that the transaction list root is correct (refer to ethereum.common.validate_transaction_tree) """ if collation.header.tx_list_root != mk_transaction_sha(collation. transactions): raise ValueError( 'Transaction root mismatch: header %s computed %s, %d transactions' % (encode_hex(collation.header.tx_list_root), encode_hex( mk_transaction_sha(collation.transactions)), len(collation. transactions))) return True <|reserved_special_token_0|> def finalize(state, coinbase): """Apply rewards and commit (refer to ethereum.pow.consensus.finalize) """ delta = int(state.config['COLLATOR_REWARD']) state.delta_balance(coinbase, delta) <|reserved_special_token_1|> <|reserved_special_token_0|> def mk_collation_from_prevstate(shard_chain, state, coinbase): """Make collation from previous state (refer to ethereum.common.mk_block_from_prevstate) """ collation = Collation(CollationHeader()) collation.header.shard_id = shard_chain.shard_id collation.header.prev_state_root = state.trie.root_hash collation.header.coinbase = coinbase collation.transactions = [] return collation def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None): """Add transactions to a collation (refer to ethereum.common.add_transactions) """ if not txqueue: return pre_txs = len(collation.transactions) log.info('Adding transactions, %d in txqueue, %d dunkles' % (len( txqueue.txs), pre_txs)) while 1: tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit - shard_state.gas_used, min_gasprice=min_gasprice) if tx is None: break try: apply_shard_transaction(mainchain_state, shard_state, shard_id, tx) collation.transactions.append(tx) except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e: log.info(str(e)) pass log.info('Added %d transactions' % (len(collation.transactions) - pre_txs)) def update_collation_env_variables(state, collation): """Update collation variables into the state (refer to ethereum.common.update_block_env_variables) """ state.block_coinbase = collation.header.coinbase def set_execution_results(state, collation): """Set state root, receipt root, etc (ethereum.pow.common.set_execution_results) """ collation.header.receipts_root = mk_receipt_sha(state.receipts) collation.header.tx_list_root = mk_transaction_sha(collation.transactions) state.commit() collation.header.post_state_root = state.trie.root_hash log.info('Collation pre-sealed, %d gas used' % state.gas_used) def validate_transaction_tree(collation): """Validate that the transaction list root is correct (refer to ethereum.common.validate_transaction_tree) """ if collation.header.tx_list_root != mk_transaction_sha(collation. transactions): raise ValueError( 'Transaction root mismatch: header %s computed %s, %d transactions' % (encode_hex(collation.header.tx_list_root), encode_hex( mk_transaction_sha(collation.transactions)), len(collation. transactions))) return True <|reserved_special_token_0|> def finalize(state, coinbase): """Apply rewards and commit (refer to ethereum.pow.consensus.finalize) """ delta = int(state.config['COLLATOR_REWARD']) state.delta_balance(coinbase, delta) <|reserved_special_token_1|> <|reserved_special_token_0|> def mk_collation_from_prevstate(shard_chain, state, coinbase): """Make collation from previous state (refer to ethereum.common.mk_block_from_prevstate) """ collation = Collation(CollationHeader()) collation.header.shard_id = shard_chain.shard_id collation.header.prev_state_root = state.trie.root_hash collation.header.coinbase = coinbase collation.transactions = [] return collation def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None): """Add transactions to a collation (refer to ethereum.common.add_transactions) """ if not txqueue: return pre_txs = len(collation.transactions) log.info('Adding transactions, %d in txqueue, %d dunkles' % (len( txqueue.txs), pre_txs)) while 1: tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit - shard_state.gas_used, min_gasprice=min_gasprice) if tx is None: break try: apply_shard_transaction(mainchain_state, shard_state, shard_id, tx) collation.transactions.append(tx) except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e: log.info(str(e)) pass log.info('Added %d transactions' % (len(collation.transactions) - pre_txs)) def update_collation_env_variables(state, collation): """Update collation variables into the state (refer to ethereum.common.update_block_env_variables) """ state.block_coinbase = collation.header.coinbase def set_execution_results(state, collation): """Set state root, receipt root, etc (ethereum.pow.common.set_execution_results) """ collation.header.receipts_root = mk_receipt_sha(state.receipts) collation.header.tx_list_root = mk_transaction_sha(collation.transactions) state.commit() collation.header.post_state_root = state.trie.root_hash log.info('Collation pre-sealed, %d gas used' % state.gas_used) def validate_transaction_tree(collation): """Validate that the transaction list root is correct (refer to ethereum.common.validate_transaction_tree) """ if collation.header.tx_list_root != mk_transaction_sha(collation. transactions): raise ValueError( 'Transaction root mismatch: header %s computed %s, %d transactions' % (encode_hex(collation.header.tx_list_root), encode_hex( mk_transaction_sha(collation.transactions)), len(collation. transactions))) return True def verify_execution_results(state, collation): """Verify the results by Merkle Proof (refer to ethereum.common.verify_execution_results) """ state.commit() validate_transaction_tree(collation) if collation.header.post_state_root != state.trie.root_hash: raise ValueError('State root mismatch: header %s computed %s' % ( encode_hex(collation.header.post_state_root), encode_hex(state. trie.root_hash))) if collation.header.receipts_root != mk_receipt_sha(state.receipts): raise ValueError( 'Receipt root mismatch: header %s computed %s, computed %d, %d receipts' % (encode_hex(collation.header.receipts_root), encode_hex( mk_receipt_sha(state.receipts)), state.gas_used, len(state. receipts))) return True def finalize(state, coinbase): """Apply rewards and commit (refer to ethereum.pow.consensus.finalize) """ delta = int(state.config['COLLATOR_REWARD']) state.delta_balance(coinbase, delta) <|reserved_special_token_1|> from ethereum.common import mk_transaction_sha, mk_receipt_sha from ethereum.exceptions import InsufficientBalance, BlockGasLimitReached, \ InsufficientStartGas, InvalidNonce, UnsignedTransaction from ethereum.messages import apply_transaction from ethereum.slogging import get_logger from ethereum.utils import encode_hex from sharding.receipt_consuming_tx_utils import apply_shard_transaction from sharding.collation import Collation, CollationHeader log = get_logger('sharding.shard_state_transition') def mk_collation_from_prevstate(shard_chain, state, coinbase): """Make collation from previous state (refer to ethereum.common.mk_block_from_prevstate) """ # state = state or shard_chain.state collation = Collation(CollationHeader()) collation.header.shard_id = shard_chain.shard_id collation.header.prev_state_root = state.trie.root_hash collation.header.coinbase = coinbase collation.transactions = [] return collation def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None): """Add transactions to a collation (refer to ethereum.common.add_transactions) """ if not txqueue: return pre_txs = len(collation.transactions) log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs)) while 1: tx = txqueue.pop_transaction( max_gas=shard_state.gas_limit - shard_state.gas_used, min_gasprice=min_gasprice ) if tx is None: break try: apply_shard_transaction(mainchain_state, shard_state, shard_id, tx) collation.transactions.append(tx) except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e: log.info(str(e)) pass log.info('Added %d transactions' % (len(collation.transactions) - pre_txs)) def update_collation_env_variables(state, collation): """Update collation variables into the state (refer to ethereum.common.update_block_env_variables) """ state.block_coinbase = collation.header.coinbase def set_execution_results(state, collation): """Set state root, receipt root, etc (ethereum.pow.common.set_execution_results) """ collation.header.receipts_root = mk_receipt_sha(state.receipts) collation.header.tx_list_root = mk_transaction_sha(collation.transactions) # Notice: commit state before assigning state.commit() collation.header.post_state_root = state.trie.root_hash # TODO: Don't handle in basic sharding currently # block.header.gas_used = state.gas_used # block.header.bloom = state.bloom log.info('Collation pre-sealed, %d gas used' % state.gas_used) def validate_transaction_tree(collation): """Validate that the transaction list root is correct (refer to ethereum.common.validate_transaction_tree) """ if collation.header.tx_list_root != mk_transaction_sha(collation.transactions): raise ValueError("Transaction root mismatch: header %s computed %s, %d transactions" % (encode_hex(collation.header.tx_list_root), encode_hex(mk_transaction_sha(collation.transactions)), len(collation.transactions))) return True def verify_execution_results(state, collation): """Verify the results by Merkle Proof (refer to ethereum.common.verify_execution_results) """ state.commit() validate_transaction_tree(collation) if collation.header.post_state_root != state.trie.root_hash: raise ValueError('State root mismatch: header %s computed %s' % (encode_hex(collation.header.post_state_root), encode_hex(state.trie.root_hash))) if collation.header.receipts_root != mk_receipt_sha(state.receipts): raise ValueError('Receipt root mismatch: header %s computed %s, computed %d, %d receipts' % (encode_hex(collation.header.receipts_root), encode_hex(mk_receipt_sha(state.receipts)), state.gas_used, len(state.receipts))) return True def finalize(state, coinbase): """Apply rewards and commit (refer to ethereum.pow.consensus.finalize) """ delta = int(state.config['COLLATOR_REWARD']) state.delta_balance(coinbase, delta)
flexible
{ "blob_id": "e364ba45513167966fe50e31a01f552ccedec452", "index": 6552, "step-1": "<mask token>\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n", "step-2": "<mask token>\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n", "step-3": "<mask token>\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\n<mask token>\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n", "step-4": "<mask token>\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id,\n min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(\n txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(max_gas=shard_state.gas_limit -\n shard_state.gas_used, min_gasprice=min_gasprice)\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached,\n InsufficientStartGas, InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.\n transactions):\n raise ValueError(\n 'Transaction root mismatch: header %s computed %s, %d transactions'\n % (encode_hex(collation.header.tx_list_root), encode_hex(\n mk_transaction_sha(collation.transactions)), len(collation.\n transactions)))\n return True\n\n\ndef verify_execution_results(state, collation):\n \"\"\"Verify the results by Merkle Proof\n (refer to ethereum.common.verify_execution_results)\n \"\"\"\n state.commit()\n validate_transaction_tree(collation)\n if collation.header.post_state_root != state.trie.root_hash:\n raise ValueError('State root mismatch: header %s computed %s' % (\n encode_hex(collation.header.post_state_root), encode_hex(state.\n trie.root_hash)))\n if collation.header.receipts_root != mk_receipt_sha(state.receipts):\n raise ValueError(\n 'Receipt root mismatch: header %s computed %s, computed %d, %d receipts'\n % (encode_hex(collation.header.receipts_root), encode_hex(\n mk_receipt_sha(state.receipts)), state.gas_used, len(state.\n receipts)))\n return True\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n", "step-5": "from ethereum.common import mk_transaction_sha, mk_receipt_sha\nfrom ethereum.exceptions import InsufficientBalance, BlockGasLimitReached, \\\n InsufficientStartGas, InvalidNonce, UnsignedTransaction\nfrom ethereum.messages import apply_transaction\nfrom ethereum.slogging import get_logger\nfrom ethereum.utils import encode_hex\n\nfrom sharding.receipt_consuming_tx_utils import apply_shard_transaction\nfrom sharding.collation import Collation, CollationHeader\n\nlog = get_logger('sharding.shard_state_transition')\n\n\ndef mk_collation_from_prevstate(shard_chain, state, coinbase):\n \"\"\"Make collation from previous state\n (refer to ethereum.common.mk_block_from_prevstate)\n \"\"\"\n # state = state or shard_chain.state\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation\n\n\ndef add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None):\n \"\"\"Add transactions to a collation\n (refer to ethereum.common.add_transactions)\n \"\"\"\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(\n max_gas=shard_state.gas_limit - shard_state.gas_used,\n min_gasprice=min_gasprice\n )\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas,\n InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))\n\n\ndef update_collation_env_variables(state, collation):\n \"\"\"Update collation variables into the state\n (refer to ethereum.common.update_block_env_variables)\n \"\"\"\n state.block_coinbase = collation.header.coinbase\n\n\ndef set_execution_results(state, collation):\n \"\"\"Set state root, receipt root, etc\n (ethereum.pow.common.set_execution_results)\n \"\"\"\n collation.header.receipts_root = mk_receipt_sha(state.receipts)\n collation.header.tx_list_root = mk_transaction_sha(collation.transactions)\n\n # Notice: commit state before assigning\n state.commit()\n collation.header.post_state_root = state.trie.root_hash\n\n # TODO: Don't handle in basic sharding currently\n # block.header.gas_used = state.gas_used\n # block.header.bloom = state.bloom\n\n log.info('Collation pre-sealed, %d gas used' % state.gas_used)\n\n\ndef validate_transaction_tree(collation):\n \"\"\"Validate that the transaction list root is correct\n (refer to ethereum.common.validate_transaction_tree)\n \"\"\"\n if collation.header.tx_list_root != mk_transaction_sha(collation.transactions):\n raise ValueError(\"Transaction root mismatch: header %s computed %s, %d transactions\" %\n (encode_hex(collation.header.tx_list_root), encode_hex(mk_transaction_sha(collation.transactions)),\n len(collation.transactions)))\n return True\n\n\ndef verify_execution_results(state, collation):\n \"\"\"Verify the results by Merkle Proof\n (refer to ethereum.common.verify_execution_results)\n \"\"\"\n state.commit()\n\n validate_transaction_tree(collation)\n\n if collation.header.post_state_root != state.trie.root_hash:\n raise ValueError('State root mismatch: header %s computed %s' %\n (encode_hex(collation.header.post_state_root), encode_hex(state.trie.root_hash)))\n if collation.header.receipts_root != mk_receipt_sha(state.receipts):\n raise ValueError('Receipt root mismatch: header %s computed %s, computed %d, %d receipts' %\n (encode_hex(collation.header.receipts_root), encode_hex(mk_receipt_sha(state.receipts)),\n state.gas_used, len(state.receipts)))\n\n return True\n\n\ndef finalize(state, coinbase):\n \"\"\"Apply rewards and commit\n (refer to ethereum.pow.consensus.finalize)\n \"\"\"\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)\n", "step-ids": [ 4, 5, 6, 7, 10 ] }
[ 4, 5, 6, 7, 10 ]
# from the top # clean up dependencies from flask import Flask app = Flask(__name__) @app.route("/") def index(): return "<h1>Congratulations, it's a web app!</h1>" if __name__ == "__main__": app.run(host="127.0.0.1", port=8080, debug=True)
normal
{ "blob_id": "612535d95e655f2e2d2c58f41b2aa99afa7fbcbc", "index": 874, "step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n", "step-4": "from flask import Flask\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n", "step-5": "# from the top\n# clean up dependencies\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return \"<h1>Congratulations, it's a web app!</h1>\"\n\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=8080, debug=True)\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in range(10): line = sys.stdin.readline() height = int(line) heights.append(height) heights.sort() heights.reverse() for i in range(3): print(heights[i]) <|reserved_special_token_1|> <|reserved_special_token_0|> heights = [] for i in range(10): line = sys.stdin.readline() height = int(line) heights.append(height) heights.sort() heights.reverse() for i in range(3): print(heights[i]) <|reserved_special_token_1|> import sys heights = [] for i in range(10): line = sys.stdin.readline() height = int(line) heights.append(height) heights.sort() heights.reverse() for i in range(3): print(heights[i])
flexible
{ "blob_id": "3e48de2e3b12965de1b3b5cb6c3cf68c90ec6212", "index": 2274, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n", "step-3": "<mask token>\nheights = []\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n", "step-4": "import sys\nheights = []\nfor i in range(10):\n line = sys.stdin.readline()\n height = int(line)\n heights.append(height)\nheights.sort()\nheights.reverse()\nfor i in range(3):\n print(heights[i])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# coding: utf-8 etc_dictionary = { '2 30대': '이삼십대', '20~30대': '이삼십대', '20, 30대': '이십대 삼십대', '1+1': '원플러스원', '3에서 6개월인': '3개월에서 육개월인', } english_dictionary = { 'Devsisters': '데브시스터즈', 'track': '트랙', # krbook 'LA': '엘에이', 'LG': '엘지', 'KOREA': '코리아', 'JSA': '제이에스에이', 'PGA': '피지에이', 'GA': '지에이', 'idol': '아이돌', 'KTX': '케이티엑스', 'AC': '에이씨', 'DVD': '디비디', 'US': '유에스', 'CNN': '씨엔엔', 'LPGA': '엘피지에이', 'P': '피', 'L': '엘', 'T': '티', 'B': '비', 'C': '씨', 'BIFF': '비아이에프에프', 'GV': '지비', # JTBC 'IT': '아이티', 'IQ': '아이큐', 'JTBC': '제이티비씨', 'trickle down effect': '트리클 다운 이펙트', 'trickle up effect': '트리클 업 이펙트', 'down': '다운', 'up': '업', 'FCK': '에프씨케이', 'AP': '에이피', 'WHERETHEWILDTHINGSARE': '', 'Rashomon Effect': '', 'O': '오', 'OO': '오오', 'B': '비', 'GDP': '지디피', 'CIPA': '씨아이피에이', 'YS': '와이에스', 'Y': '와이', 'S': '에스', 'JTBC': '제이티비씨', 'PC': '피씨', 'bill': '빌', 'Halmuny': '하모니', ##### 'X': '엑스', 'SNS': '에스엔에스', 'ability': '어빌리티', 'shy': '', 'CCTV': '씨씨티비', 'IT': '아이티', 'the tenth man': '더 텐쓰 맨', #### 'L': '엘', 'PC': '피씨', 'YSDJJPMB': '', ######## 'Content Attitude Timing': '컨텐트 애티튜드 타이밍', 'CAT': '캣', 'IS': '아이에스', 'SNS': '에스엔에스', 'K': '케이', 'Y': '와이', 'KDI': '케이디아이', 'DOC': '디오씨', 'CIA': '씨아이에이', 'PBS': '피비에스', 'D': '디', 'PPropertyPositionPowerPrisonP' 'S': '에스', 'francisco': '프란시스코', 'I': '아이', 'III': '아이아이', ###### 'No joke': '노 조크', 'BBK': '비비케이', 'LA': '엘에이', 'Don': '', 't worry be happy': ' 워리 비 해피', 'NO': '엔오', ##### 'it was our sky': '잇 워즈 아워 스카이', 'it is our sky': '잇 이즈 아워 스카이', #### 'NEIS': '엔이아이에스', ##### 'IMF': '아이엠에프', 'apology': '어폴로지', 'humble': '험블', 'M': '엠', 'Nowhere Man': '노웨어 맨', 'The Tenth Man': '더 텐쓰 맨', 'PBS': '피비에스', 'BBC': '비비씨', 'MRJ': '엠알제이', 'CCTV': '씨씨티비', 'Pick me up': '픽 미 업', 'DNA': '디엔에이', 'UN': '유엔', 'STOP': '스탑', ##### 'PRESS': '프레스', ##### 'not to be': '낫 투비', 'Denial': '디나이얼', 'G': '지', 'IMF': '아이엠에프', 'GDP': '지디피', 'JTBC': '제이티비씨', 'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우', 'DDT': '디디티', 'AI': '에이아이', 'Z': '제트', 'OECD': '오이씨디', 'N': '앤', 'A': '에이', 'MB': '엠비', 'EH': '이에이치', 'IS': '아이에스', 'TV': '티비', 'MIT': '엠아이티', 'KBO': '케이비오', 'I love America': '아이 러브 아메리카', 'SF': '에스에프', 'Q': '큐', 'KFX': '케이에프엑스', 'PM': '피엠', 'Prime Minister': '프라임 미니스터', 'Swordline': '스워드라인', 'TBS': '티비에스', 'DDT': '디디티', 'CS': '씨에스', 'Reflecting Absence': '리플렉팅 앱센스', 'PBS': '피비에스', 'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원', 'negative pressure': '네거티브 프레셔', 'F': '에프', 'KIA': '기아', 'FTA': '에프티에이', 'Que sais-je': '', 'UFC': '유에프씨', 'P': '피', 'DJ': '디제이', 'Chaebol': '채벌', 'BBC': '비비씨', 'OECD': '오이씨디', 'BC': '삐씨', 'C': '씨', 'B': '씨', 'KY': '케이와이', 'K': '케이', 'CEO': '씨이오', 'YH': '와이에치', 'IS': '아이에스', 'who are you': '후 얼 유', 'Y': '와이', 'The Devils Advocate': '더 데빌즈 어드보카트', 'YS': '와이에스', 'so sorry': '쏘 쏘리', 'Santa': '산타', 'Big Endian': '빅 엔디안', 'Small Endian': '스몰 엔디안', 'Oh Captain My Captain': '오 캡틴 마이 캡틴', 'AIB': '에이아이비', 'K': '케이', 'PBS': '피비에스', }
normal
{ "blob_id": "ccd1e57518065963158984dda52297db45ce204e", "index": 2471, "step-1": "<mask token>\n", "step-2": "etc_dictionary = {'2 30대': '이삼십대', '20~30대': '이삼십대', '20, 30대': '이십대 삼십대',\n '1+1': '원플러스원', '3에서 6개월인': '3개월에서 육개월인'}\nenglish_dictionary = {'Devsisters': '데브시스터즈', 'track': '트랙', 'LA': '엘에이',\n 'LG': '엘지', 'KOREA': '코리아', 'JSA': '제이에스에이', 'PGA': '피지에이', 'GA': '지에이',\n 'idol': '아이돌', 'KTX': '케이티엑스', 'AC': '에이씨', 'DVD': '디비디', 'US': '유에스',\n 'CNN': '씨엔엔', 'LPGA': '엘피지에이', 'P': '피', 'L': '엘', 'T': '티', 'B': '비',\n 'C': '씨', 'BIFF': '비아이에프에프', 'GV': '지비', 'IT': '아이티', 'IQ': '아이큐',\n 'JTBC': '제이티비씨', 'trickle down effect': '트리클 다운 이펙트',\n 'trickle up effect': '트리클 업 이펙트', 'down': '다운', 'up': '업', 'FCK':\n '에프씨케이', 'AP': '에이피', 'WHERETHEWILDTHINGSARE': '', 'Rashomon Effect':\n '', 'O': '오', 'OO': '오오', 'B': '비', 'GDP': '지디피', 'CIPA': '씨아이피에이',\n 'YS': '와이에스', 'Y': '와이', 'S': '에스', 'JTBC': '제이티비씨', 'PC': '피씨', 'bill':\n '빌', 'Halmuny': '하모니', 'X': '엑스', 'SNS': '에스엔에스', 'ability': '어빌리티',\n 'shy': '', 'CCTV': '씨씨티비', 'IT': '아이티', 'the tenth man': '더 텐쓰 맨', 'L':\n '엘', 'PC': '피씨', 'YSDJJPMB': '', 'Content Attitude Timing':\n '컨텐트 애티튜드 타이밍', 'CAT': '캣', 'IS': '아이에스', 'SNS': '에스엔에스', 'K': '케이',\n 'Y': '와이', 'KDI': '케이디아이', 'DOC': '디오씨', 'CIA': '씨아이에이', 'PBS': '피비에스',\n 'D': '디', 'PPropertyPositionPowerPrisonPS': '에스', 'francisco': '프란시스코',\n 'I': '아이', 'III': '아이아이', 'No joke': '노 조크', 'BBK': '비비케이', 'LA': '엘에이',\n 'Don': '', 't worry be happy': ' 워리 비 해피', 'NO': '엔오', 'it was our sky':\n '잇 워즈 아워 스카이', 'it is our sky': '잇 이즈 아워 스카이', 'NEIS': '엔이아이에스', 'IMF':\n '아이엠에프', 'apology': '어폴로지', 'humble': '험블', 'M': '엠', 'Nowhere Man':\n '노웨어 맨', 'The Tenth Man': '더 텐쓰 맨', 'PBS': '피비에스', 'BBC': '비비씨', 'MRJ':\n '엠알제이', 'CCTV': '씨씨티비', 'Pick me up': '픽 미 업', 'DNA': '디엔에이', 'UN':\n '유엔', 'STOP': '스탑', 'PRESS': '프레스', 'not to be': '낫 투비', 'Denial':\n '디나이얼', 'G': '지', 'IMF': '아이엠에프', 'GDP': '지디피', 'JTBC': '제이티비씨',\n 'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우', 'DDT': '디디티', 'AI':\n '에이아이', 'Z': '제트', 'OECD': '오이씨디', 'N': '앤', 'A': '에이', 'MB': '엠비',\n 'EH': '이에이치', 'IS': '아이에스', 'TV': '티비', 'MIT': '엠아이티', 'KBO': '케이비오',\n 'I love America': '아이 러브 아메리카', 'SF': '에스에프', 'Q': '큐', 'KFX': '케이에프엑스',\n 'PM': '피엠', 'Prime Minister': '프라임 미니스터', 'Swordline': '스워드라인', 'TBS':\n '티비에스', 'DDT': '디디티', 'CS': '씨에스', 'Reflecting Absence': '리플렉팅 앱센스',\n 'PBS': '피비에스', 'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원',\n 'negative pressure': '네거티브 프레셔', 'F': '에프', 'KIA': '기아', 'FTA': '에프티에이',\n 'Que sais-je': '', 'UFC': '유에프씨', 'P': '피', 'DJ': '디제이', 'Chaebol':\n '채벌', 'BBC': '비비씨', 'OECD': '오이씨디', 'BC': '삐씨', 'C': '씨', 'B': '씨',\n 'KY': '케이와이', 'K': '케이', 'CEO': '씨이오', 'YH': '와이에치', 'IS': '아이에스',\n 'who are you': '후 얼 유', 'Y': '와이', 'The Devils Advocate': '더 데빌즈 어드보카트',\n 'YS': '와이에스', 'so sorry': '쏘 쏘리', 'Santa': '산타', 'Big Endian': '빅 엔디안',\n 'Small Endian': '스몰 엔디안', 'Oh Captain My Captain': '오 캡틴 마이 캡틴', 'AIB':\n '에이아이비', 'K': '케이', 'PBS': '피비에스'}\n", "step-3": "# coding: utf-8\r\n\r\netc_dictionary = {\r\n '2 30대': '이삼십대',\r\n '20~30대': '이삼십대',\r\n '20, 30대': '이십대 삼십대',\r\n '1+1': '원플러스원',\r\n '3에서 6개월인': '3개월에서 육개월인',\r\n}\r\n\r\nenglish_dictionary = {\r\n 'Devsisters': '데브시스터즈',\r\n 'track': '트랙',\r\n\r\n # krbook\r\n 'LA': '엘에이',\r\n 'LG': '엘지',\r\n 'KOREA': '코리아',\r\n 'JSA': '제이에스에이',\r\n 'PGA': '피지에이',\r\n 'GA': '지에이',\r\n 'idol': '아이돌',\r\n 'KTX': '케이티엑스',\r\n 'AC': '에이씨',\r\n 'DVD': '디비디',\r\n 'US': '유에스',\r\n 'CNN': '씨엔엔',\r\n 'LPGA': '엘피지에이',\r\n 'P': '피',\r\n 'L': '엘',\r\n 'T': '티',\r\n 'B': '비',\r\n 'C': '씨',\r\n 'BIFF': '비아이에프에프',\r\n 'GV': '지비',\r\n\r\n # JTBC\r\n 'IT': '아이티',\r\n 'IQ': '아이큐',\r\n 'JTBC': '제이티비씨',\r\n 'trickle down effect': '트리클 다운 이펙트',\r\n 'trickle up effect': '트리클 업 이펙트',\r\n 'down': '다운',\r\n 'up': '업',\r\n 'FCK': '에프씨케이',\r\n 'AP': '에이피',\r\n 'WHERETHEWILDTHINGSARE': '',\r\n 'Rashomon Effect': '',\r\n 'O': '오',\r\n 'OO': '오오',\r\n 'B': '비',\r\n 'GDP': '지디피',\r\n 'CIPA': '씨아이피에이',\r\n 'YS': '와이에스',\r\n 'Y': '와이',\r\n 'S': '에스',\r\n 'JTBC': '제이티비씨',\r\n 'PC': '피씨',\r\n 'bill': '빌',\r\n 'Halmuny': '하모니', #####\r\n 'X': '엑스',\r\n 'SNS': '에스엔에스',\r\n 'ability': '어빌리티',\r\n 'shy': '',\r\n 'CCTV': '씨씨티비',\r\n 'IT': '아이티',\r\n 'the tenth man': '더 텐쓰 맨', ####\r\n 'L': '엘',\r\n 'PC': '피씨',\r\n 'YSDJJPMB': '', ########\r\n 'Content Attitude Timing': '컨텐트 애티튜드 타이밍',\r\n 'CAT': '캣',\r\n 'IS': '아이에스',\r\n 'SNS': '에스엔에스',\r\n 'K': '케이',\r\n 'Y': '와이',\r\n 'KDI': '케이디아이',\r\n 'DOC': '디오씨',\r\n 'CIA': '씨아이에이',\r\n 'PBS': '피비에스',\r\n 'D': '디',\r\n 'PPropertyPositionPowerPrisonP'\r\n 'S': '에스',\r\n 'francisco': '프란시스코',\r\n 'I': '아이',\r\n 'III': '아이아이', ######\r\n 'No joke': '노 조크',\r\n 'BBK': '비비케이',\r\n 'LA': '엘에이',\r\n 'Don': '',\r\n 't worry be happy': ' 워리 비 해피',\r\n 'NO': '엔오', #####\r\n 'it was our sky': '잇 워즈 아워 스카이',\r\n 'it is our sky': '잇 이즈 아워 스카이', ####\r\n 'NEIS': '엔이아이에스', #####\r\n 'IMF': '아이엠에프',\r\n 'apology': '어폴로지',\r\n 'humble': '험블',\r\n 'M': '엠',\r\n 'Nowhere Man': '노웨어 맨',\r\n 'The Tenth Man': '더 텐쓰 맨',\r\n 'PBS': '피비에스',\r\n 'BBC': '비비씨',\r\n 'MRJ': '엠알제이',\r\n 'CCTV': '씨씨티비',\r\n 'Pick me up': '픽 미 업',\r\n 'DNA': '디엔에이',\r\n 'UN': '유엔',\r\n 'STOP': '스탑', #####\r\n 'PRESS': '프레스', #####\r\n 'not to be': '낫 투비',\r\n 'Denial': '디나이얼',\r\n 'G': '지',\r\n 'IMF': '아이엠에프',\r\n 'GDP': '지디피',\r\n 'JTBC': '제이티비씨',\r\n 'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우',\r\n 'DDT': '디디티',\r\n 'AI': '에이아이',\r\n 'Z': '제트',\r\n 'OECD': '오이씨디',\r\n 'N': '앤',\r\n 'A': '에이',\r\n 'MB': '엠비',\r\n 'EH': '이에이치',\r\n 'IS': '아이에스',\r\n 'TV': '티비',\r\n 'MIT': '엠아이티',\r\n 'KBO': '케이비오',\r\n 'I love America': '아이 러브 아메리카',\r\n 'SF': '에스에프',\r\n 'Q': '큐',\r\n 'KFX': '케이에프엑스',\r\n 'PM': '피엠',\r\n 'Prime Minister': '프라임 미니스터',\r\n 'Swordline': '스워드라인',\r\n 'TBS': '티비에스',\r\n 'DDT': '디디티',\r\n 'CS': '씨에스',\r\n 'Reflecting Absence': '리플렉팅 앱센스',\r\n 'PBS': '피비에스',\r\n 'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원',\r\n 'negative pressure': '네거티브 프레셔',\r\n 'F': '에프',\r\n 'KIA': '기아',\r\n 'FTA': '에프티에이',\r\n 'Que sais-je': '',\r\n 'UFC': '유에프씨',\r\n 'P': '피',\r\n 'DJ': '디제이',\r\n 'Chaebol': '채벌',\r\n 'BBC': '비비씨',\r\n 'OECD': '오이씨디',\r\n 'BC': '삐씨',\r\n 'C': '씨',\r\n 'B': '씨',\r\n 'KY': '케이와이',\r\n 'K': '케이',\r\n 'CEO': '씨이오',\r\n 'YH': '와이에치',\r\n 'IS': '아이에스',\r\n 'who are you': '후 얼 유',\r\n 'Y': '와이',\r\n 'The Devils Advocate': '더 데빌즈 어드보카트',\r\n 'YS': '와이에스',\r\n 'so sorry': '쏘 쏘리',\r\n 'Santa': '산타',\r\n 'Big Endian': '빅 엔디안',\r\n 'Small Endian': '스몰 엔디안',\r\n 'Oh Captain My Captain': '오 캡틴 마이 캡틴',\r\n 'AIB': '에이아이비',\r\n 'K': '케이',\r\n 'PBS': '피비에스',\r\n}\r\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> class Meters: <|reserved_special_token_0|> def indicate(self, kmh=None, amp=None, led=None): if self.pi: if kmh != None: kmh = SPEED_MAX if kmh > SPEED_MAX else kmh kmh = 0 if kmh < 0 else kmh self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh / SPEED_MAX * (SERVO_MAX - SERVO_MIN))) if amp != None: amp = IMM_MAX if amp > IMM_MAX else amp amp = -IMM_MAX if amp < -IMM_MAX else amp self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 * (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN))) if led != None: self.pi.write(PIN_LED, led) class SerialCom: def __init__(self, meterObj=None): self.ser = None self.rxdata = {} self.flagrx = True self.t1 = None self.METERS = meterObj print('[serialcom.__init__] open serial port') if isMCUConnected: try: if os.name == 'posix': portpath = SERIALPATH_RASPI elif os.name == 'nt': portpath = SERIALPATH_WIN self.ser = serial.Serial(portpath, 115200, timeout=None) except serial.serialutil.SerialException: print('[serialcom.__init__] failed to open port') self.rxdata = {'serialfailed': 1} else: print( "[serialcom.__init__] port wasn't opened because isMCUConnected==False." ) def recieve_loop(self): if self.ser: print('[serialcom.recieve_loop] start recieving') self.ser.readline() while self.flagrx: rxbuf = self.ser.readline().decode('ascii', 'ignore') print(rxbuf) try: self.rxdata = json.loads(rxbuf) self.rxdata['serialfailed'] = 0 if self.METERS: self.METERS.indicate(self.rxdata['speed'], self. rxdata['Imm'], self.rxdata['invstate']) except json.decoder.JSONDecodeError: print( '[serialcom.recieve_loop] when decoding, error has occured' ) self.rxdata['serialfailed'] = 1 self.ser.close() else: print( '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.' ) self.rxdata = json.loads(RXBUF0) self.rxdata['serialfailed'] = 0 while self.flagrx: time.sleep(0.5) print('[serialcom.recieve_loop] end recieving') def recieve_start(self): if not self.t1: self.flagrx = True self.t1 = threading.Thread(target=self.recieve_loop, daemon=True) self.t1.start() def recieve_end(self): if self.t1: self.flagrx = False self.t1.join() del self.t1 def send(self, txbuf): if self.ser: print(bytes(txbuf, 'ascii')) return self.ser.write(bytes(txbuf, 'ascii')) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Meters: def __init__(self): self.pi = None if os.name == 'posix': import pigpio self.pi = pigpio.pi() self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT) self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT) self.pi.set_mode(PIN_LED, pigpio.OUTPUT) def indicate(self, kmh=None, amp=None, led=None): if self.pi: if kmh != None: kmh = SPEED_MAX if kmh > SPEED_MAX else kmh kmh = 0 if kmh < 0 else kmh self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh / SPEED_MAX * (SERVO_MAX - SERVO_MIN))) if amp != None: amp = IMM_MAX if amp > IMM_MAX else amp amp = -IMM_MAX if amp < -IMM_MAX else amp self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 * (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN))) if led != None: self.pi.write(PIN_LED, led) class SerialCom: def __init__(self, meterObj=None): self.ser = None self.rxdata = {} self.flagrx = True self.t1 = None self.METERS = meterObj print('[serialcom.__init__] open serial port') if isMCUConnected: try: if os.name == 'posix': portpath = SERIALPATH_RASPI elif os.name == 'nt': portpath = SERIALPATH_WIN self.ser = serial.Serial(portpath, 115200, timeout=None) except serial.serialutil.SerialException: print('[serialcom.__init__] failed to open port') self.rxdata = {'serialfailed': 1} else: print( "[serialcom.__init__] port wasn't opened because isMCUConnected==False." ) def recieve_loop(self): if self.ser: print('[serialcom.recieve_loop] start recieving') self.ser.readline() while self.flagrx: rxbuf = self.ser.readline().decode('ascii', 'ignore') print(rxbuf) try: self.rxdata = json.loads(rxbuf) self.rxdata['serialfailed'] = 0 if self.METERS: self.METERS.indicate(self.rxdata['speed'], self. rxdata['Imm'], self.rxdata['invstate']) except json.decoder.JSONDecodeError: print( '[serialcom.recieve_loop] when decoding, error has occured' ) self.rxdata['serialfailed'] = 1 self.ser.close() else: print( '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.' ) self.rxdata = json.loads(RXBUF0) self.rxdata['serialfailed'] = 0 while self.flagrx: time.sleep(0.5) print('[serialcom.recieve_loop] end recieving') def recieve_start(self): if not self.t1: self.flagrx = True self.t1 = threading.Thread(target=self.recieve_loop, daemon=True) self.t1.start() def recieve_end(self): if self.t1: self.flagrx = False self.t1.join() del self.t1 def send(self, txbuf): if self.ser: print(bytes(txbuf, 'ascii')) return self.ser.write(bytes(txbuf, 'ascii')) def main(): class Mode(enum.IntEnum): DEMO = 0 EBIKE = 1 ASSIST = 2 mode = Mode.DEMO meters = Meters() meters.indicate(0, 0, 0) serialcom = SerialCom(meters) serialcom.recieve_start() api = responder.API() @api.route('/reset') def reset(req, resp): serialcom.send('invoff\n') @api.route('/info') def get_info(req, resp): resp.headers = {'Content-Type': 'application/json; charset=utf-8'} resp.media = serialcom.rxdata @api.route('/cardata') def get_cardata(req, resp): text = open('static/cars/cardata.json', 'r', encoding='utf-8').read() resp.headers = {'Content-Type': 'application/json; charset=utf-8'} resp.text = text @api.route('/command') async def post_command(req, resp): data = await req.media() print(data) if 'carno' in data: serialcom.send('invoff\n') time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"carno={data['carno']}\n") if 'mode' in data: serialcom.send('invoff\n') time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"mode={data['mode']}\n") if 'notch' in data: if data['notch'] == 'P': serialcom.send('P\n') elif data['notch'] == 'N': serialcom.send('N\n') elif data['notch'] == 'B': serialcom.send('B\n') else: serialcom.send(f"notch={data['notch']}\n") if 'invoff' in data: serialcom.send('invoff\n') @api.route('/') def hello_html(req, resp): resp.html = api.template('index.html') api.run(address='0.0.0.0', port=5042) if __name__ == '__main__': main() <|reserved_special_token_1|> <|reserved_special_token_0|> isMCUConnected = True SERIALPATH_RASPI = '/dev/ttyACM0' SERIALPATH_WIN = 'COM16' PIN_SERVO1 = 12 PIN_SERVO2 = 13 PIN_LED = 16 SERVO_MIN = 115000 SERVO_MAX = 26000 SPEED_MAX = 30 IMM_MAX = 7.5 RXBUF0 = open('rxdata.json', 'r').read().replace('\n', '') class Meters: def __init__(self): self.pi = None if os.name == 'posix': import pigpio self.pi = pigpio.pi() self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT) self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT) self.pi.set_mode(PIN_LED, pigpio.OUTPUT) def indicate(self, kmh=None, amp=None, led=None): if self.pi: if kmh != None: kmh = SPEED_MAX if kmh > SPEED_MAX else kmh kmh = 0 if kmh < 0 else kmh self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh / SPEED_MAX * (SERVO_MAX - SERVO_MIN))) if amp != None: amp = IMM_MAX if amp > IMM_MAX else amp amp = -IMM_MAX if amp < -IMM_MAX else amp self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 * (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN))) if led != None: self.pi.write(PIN_LED, led) class SerialCom: def __init__(self, meterObj=None): self.ser = None self.rxdata = {} self.flagrx = True self.t1 = None self.METERS = meterObj print('[serialcom.__init__] open serial port') if isMCUConnected: try: if os.name == 'posix': portpath = SERIALPATH_RASPI elif os.name == 'nt': portpath = SERIALPATH_WIN self.ser = serial.Serial(portpath, 115200, timeout=None) except serial.serialutil.SerialException: print('[serialcom.__init__] failed to open port') self.rxdata = {'serialfailed': 1} else: print( "[serialcom.__init__] port wasn't opened because isMCUConnected==False." ) def recieve_loop(self): if self.ser: print('[serialcom.recieve_loop] start recieving') self.ser.readline() while self.flagrx: rxbuf = self.ser.readline().decode('ascii', 'ignore') print(rxbuf) try: self.rxdata = json.loads(rxbuf) self.rxdata['serialfailed'] = 0 if self.METERS: self.METERS.indicate(self.rxdata['speed'], self. rxdata['Imm'], self.rxdata['invstate']) except json.decoder.JSONDecodeError: print( '[serialcom.recieve_loop] when decoding, error has occured' ) self.rxdata['serialfailed'] = 1 self.ser.close() else: print( '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.' ) self.rxdata = json.loads(RXBUF0) self.rxdata['serialfailed'] = 0 while self.flagrx: time.sleep(0.5) print('[serialcom.recieve_loop] end recieving') def recieve_start(self): if not self.t1: self.flagrx = True self.t1 = threading.Thread(target=self.recieve_loop, daemon=True) self.t1.start() def recieve_end(self): if self.t1: self.flagrx = False self.t1.join() del self.t1 def send(self, txbuf): if self.ser: print(bytes(txbuf, 'ascii')) return self.ser.write(bytes(txbuf, 'ascii')) def main(): class Mode(enum.IntEnum): DEMO = 0 EBIKE = 1 ASSIST = 2 mode = Mode.DEMO meters = Meters() meters.indicate(0, 0, 0) serialcom = SerialCom(meters) serialcom.recieve_start() api = responder.API() @api.route('/reset') def reset(req, resp): serialcom.send('invoff\n') @api.route('/info') def get_info(req, resp): resp.headers = {'Content-Type': 'application/json; charset=utf-8'} resp.media = serialcom.rxdata @api.route('/cardata') def get_cardata(req, resp): text = open('static/cars/cardata.json', 'r', encoding='utf-8').read() resp.headers = {'Content-Type': 'application/json; charset=utf-8'} resp.text = text @api.route('/command') async def post_command(req, resp): data = await req.media() print(data) if 'carno' in data: serialcom.send('invoff\n') time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"carno={data['carno']}\n") if 'mode' in data: serialcom.send('invoff\n') time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"mode={data['mode']}\n") if 'notch' in data: if data['notch'] == 'P': serialcom.send('P\n') elif data['notch'] == 'N': serialcom.send('N\n') elif data['notch'] == 'B': serialcom.send('B\n') else: serialcom.send(f"notch={data['notch']}\n") if 'invoff' in data: serialcom.send('invoff\n') @api.route('/') def hello_html(req, resp): resp.html = api.template('index.html') api.run(address='0.0.0.0', port=5042) if __name__ == '__main__': main() <|reserved_special_token_1|> import time, json, glob, os, enum import serial import threading import responder isMCUConnected = True SERIALPATH_RASPI = '/dev/ttyACM0' SERIALPATH_WIN = 'COM16' PIN_SERVO1 = 12 PIN_SERVO2 = 13 PIN_LED = 16 SERVO_MIN = 115000 SERVO_MAX = 26000 SPEED_MAX = 30 IMM_MAX = 7.5 RXBUF0 = open('rxdata.json', 'r').read().replace('\n', '') class Meters: def __init__(self): self.pi = None if os.name == 'posix': import pigpio self.pi = pigpio.pi() self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT) self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT) self.pi.set_mode(PIN_LED, pigpio.OUTPUT) def indicate(self, kmh=None, amp=None, led=None): if self.pi: if kmh != None: kmh = SPEED_MAX if kmh > SPEED_MAX else kmh kmh = 0 if kmh < 0 else kmh self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh / SPEED_MAX * (SERVO_MAX - SERVO_MIN))) if amp != None: amp = IMM_MAX if amp > IMM_MAX else amp amp = -IMM_MAX if amp < -IMM_MAX else amp self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 * (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN))) if led != None: self.pi.write(PIN_LED, led) class SerialCom: def __init__(self, meterObj=None): self.ser = None self.rxdata = {} self.flagrx = True self.t1 = None self.METERS = meterObj print('[serialcom.__init__] open serial port') if isMCUConnected: try: if os.name == 'posix': portpath = SERIALPATH_RASPI elif os.name == 'nt': portpath = SERIALPATH_WIN self.ser = serial.Serial(portpath, 115200, timeout=None) except serial.serialutil.SerialException: print('[serialcom.__init__] failed to open port') self.rxdata = {'serialfailed': 1} else: print( "[serialcom.__init__] port wasn't opened because isMCUConnected==False." ) def recieve_loop(self): if self.ser: print('[serialcom.recieve_loop] start recieving') self.ser.readline() while self.flagrx: rxbuf = self.ser.readline().decode('ascii', 'ignore') print(rxbuf) try: self.rxdata = json.loads(rxbuf) self.rxdata['serialfailed'] = 0 if self.METERS: self.METERS.indicate(self.rxdata['speed'], self. rxdata['Imm'], self.rxdata['invstate']) except json.decoder.JSONDecodeError: print( '[serialcom.recieve_loop] when decoding, error has occured' ) self.rxdata['serialfailed'] = 1 self.ser.close() else: print( '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.' ) self.rxdata = json.loads(RXBUF0) self.rxdata['serialfailed'] = 0 while self.flagrx: time.sleep(0.5) print('[serialcom.recieve_loop] end recieving') def recieve_start(self): if not self.t1: self.flagrx = True self.t1 = threading.Thread(target=self.recieve_loop, daemon=True) self.t1.start() def recieve_end(self): if self.t1: self.flagrx = False self.t1.join() del self.t1 def send(self, txbuf): if self.ser: print(bytes(txbuf, 'ascii')) return self.ser.write(bytes(txbuf, 'ascii')) def main(): class Mode(enum.IntEnum): DEMO = 0 EBIKE = 1 ASSIST = 2 mode = Mode.DEMO meters = Meters() meters.indicate(0, 0, 0) serialcom = SerialCom(meters) serialcom.recieve_start() api = responder.API() @api.route('/reset') def reset(req, resp): serialcom.send('invoff\n') @api.route('/info') def get_info(req, resp): resp.headers = {'Content-Type': 'application/json; charset=utf-8'} resp.media = serialcom.rxdata @api.route('/cardata') def get_cardata(req, resp): text = open('static/cars/cardata.json', 'r', encoding='utf-8').read() resp.headers = {'Content-Type': 'application/json; charset=utf-8'} resp.text = text @api.route('/command') async def post_command(req, resp): data = await req.media() print(data) if 'carno' in data: serialcom.send('invoff\n') time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"carno={data['carno']}\n") if 'mode' in data: serialcom.send('invoff\n') time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"mode={data['mode']}\n") if 'notch' in data: if data['notch'] == 'P': serialcom.send('P\n') elif data['notch'] == 'N': serialcom.send('N\n') elif data['notch'] == 'B': serialcom.send('B\n') else: serialcom.send(f"notch={data['notch']}\n") if 'invoff' in data: serialcom.send('invoff\n') @api.route('/') def hello_html(req, resp): resp.html = api.template('index.html') api.run(address='0.0.0.0', port=5042) if __name__ == '__main__': main() <|reserved_special_token_1|> import time, json, glob, os, enum import serial import threading import responder # 環境によって書き換える変数 isMCUConnected = True # マイコンがUSBポートに接続されているか SERIALPATH_RASPI = '/dev/ttyACM0' # ラズパイのシリアルポート SERIALPATH_WIN = 'COM16' # Windowsのシリアルポート # 各種定数 PIN_SERVO1 = 12 # GPIO12 PWM0 Pin PIN_SERVO2 = 13 # GPIO13 PWM1 Pin PIN_LED = 16 # GPIO25 LED Pin SERVO_MIN = 115000 # サーボの最小duty SERVO_MAX = 26000 # サーボの最大duty SPEED_MAX = 30 # 速度の最大値 [km/h] IMM_MAX = 7.5 # 電流の最大値(プラスとマイナス両方に振れる) [A] RXBUF0 = open("rxdata.json", "r").read().replace("\n","") # シリアル通信しないときにダミーで読み込む受信結果 class Meters(): def __init__(self): self.pi = None # pigpioオブジェクト # pigpioのセットアップ if os.name == 'posix': # ラズパイで動かした場合にはpigpioをインポート import pigpio self.pi = pigpio.pi() self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT) self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT) self.pi.set_mode(PIN_LED, pigpio.OUTPUT) def indicate(self, kmh=None, amp=None, led=None): if self.pi: if kmh != None: kmh = SPEED_MAX if (kmh > SPEED_MAX) else kmh # constrain upbound and lowbound kmh = 0 if (kmh < 0) else kmh self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh/SPEED_MAX * (SERVO_MAX - SERVO_MIN))) # 速度計 if amp != None: amp = IMM_MAX if (amp > IMM_MAX) else amp amp = -IMM_MAX if (amp < -IMM_MAX) else amp self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5*(1 + amp/IMM_MAX) * (SERVO_MAX - SERVO_MIN))) # 電流計 if led != None: self.pi.write(PIN_LED, led) class SerialCom(): def __init__(self, meterObj=None): self.ser = None # シリアル通信オブジェクト self.rxdata = {} # 受信したデータを入れておく辞書型変数。外部からこれにアクセスすることでデータを取り出す self.flagrx = True # Trueの間シリアル通信を実行 self.t1 = None # シリアルの受信を行うThreadingオブジェクト self.METERS = meterObj # 速度を表示するMetersオブジェクトへの参照をセット # Metersオブジェクトへの参照 # MCUが接続されていればシリアルポートをオープン print("[serialcom.__init__] open serial port") if isMCUConnected: try: # OSによってポートを切り替え if os.name == 'posix': portpath = SERIALPATH_RASPI elif os.name == 'nt': portpath = SERIALPATH_WIN # ポートを開く self.ser = serial.Serial(portpath, 115200, timeout=None) # ポートオープン失敗時 except serial.serialutil.SerialException: print("[serialcom.__init__] failed to open port") self.rxdata = {"serialfailed":1} else: print("[serialcom.__init__] port wasn't opened because isMCUConnected==False.") def recieve_loop(self): # シリアルポートから受信を行う無限ループ if self.ser: print("[serialcom.recieve_loop] start recieving") self.ser.readline() # 1回目は不完全なデータなので空読み while self.flagrx: rxbuf = self.ser.readline().decode('ascii','ignore') print(rxbuf) try: self.rxdata = json.loads(rxbuf) # JSON形式へデコード self.rxdata['serialfailed'] = 0 if self.METERS: # メーターに表示 self.METERS.indicate(self.rxdata['speed'], self.rxdata['Imm'], self.rxdata['invstate']) except json.decoder.JSONDecodeError: print("[serialcom.recieve_loop] when decoding, error has occured") self.rxdata['serialfailed'] = 1 self.ser.close() # シリアルポートが開いていないときは、 rxdataとしてRXBUF0を代入する else: print("[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.") self.rxdata = json.loads(RXBUF0) self.rxdata['serialfailed'] = 0 while self.flagrx: time.sleep(0.5) print("[serialcom.recieve_loop] end recieving") def recieve_start(self): if not(self.t1): self.flagrx = True self.t1 = threading.Thread(target=self.recieve_loop, daemon=True) self.t1.start() def recieve_end(self): if self.t1: self.flagrx = False self.t1.join() del self.t1 def send(self, txbuf): if self.ser: print(bytes(txbuf,"ascii")) return self.ser.write(bytes(txbuf,"ascii")) def main(): class Mode(enum.IntEnum): DEMO = 0 EBIKE = 1 ASSIST = 2 mode = Mode.DEMO # 動作モード # メーターとシリアル通信のインスタンスを生成、初期化 meters = Meters() meters.indicate(0, 0, 0) serialcom = SerialCom(meters) serialcom.recieve_start() # サーバを立てる api = responder.API() @api.route("/reset") def reset(req,resp): serialcom.send("invoff\n") @api.route("/info") def get_info(req,resp): resp.headers = {"Content-Type": "application/json; charset=utf-8"} resp.media = serialcom.rxdata @api.route("/cardata") def get_cardata(req,resp): text = open("static/cars/cardata.json", "r", encoding='utf-8').read() resp.headers = {"Content-Type": "application/json; charset=utf-8"} resp.text = text @api.route("/command") async def post_command(req,resp): data = await req.media() print(data) if 'carno' in data: serialcom.send("invoff\n") time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"carno={data['carno']}\n") if 'mode' in data: serialcom.send("invoff\n") time.sleep(0.5) while serialcom.rxdata['invstate'] == 1: time.sleep(0.1) serialcom.send(f"mode={data['mode']}\n") if 'notch' in data: if data['notch'] == 'P': serialcom.send("P\n") elif data['notch'] == 'N': serialcom.send("N\n") elif data['notch'] == 'B': serialcom.send("B\n") else: serialcom.send(f"notch={data['notch']}\n") if 'invoff' in data: serialcom.send("invoff\n") @api.route("/") def hello_html(req,resp): resp.html = api.template('index.html') # web server start api.run(address='0.0.0.0', port=5042) # 0.0.0.0にすると外部からアクセスできる if __name__ == '__main__': main()
flexible
{ "blob_id": "25532102cc36da139a22a61d226dff613f06ab31", "index": 4714, "step-1": "<mask token>\n\n\nclass Meters:\n <mask token>\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Meters:\n\n def __init__(self):\n self.pi = None\n if os.name == 'posix':\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\ndef main():\n\n\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n mode = Mode.DEMO\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n api = responder.API()\n\n @api.route('/reset')\n def reset(req, resp):\n serialcom.send('invoff\\n')\n\n @api.route('/info')\n def get_info(req, resp):\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.media = serialcom.rxdata\n\n @api.route('/cardata')\n def get_cardata(req, resp):\n text = open('static/cars/cardata.json', 'r', encoding='utf-8').read()\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.text = text\n\n @api.route('/command')\n async def post_command(req, resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send('P\\n')\n elif data['notch'] == 'N':\n serialcom.send('N\\n')\n elif data['notch'] == 'B':\n serialcom.send('B\\n')\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send('invoff\\n')\n\n @api.route('/')\n def hello_html(req, resp):\n resp.html = api.template('index.html')\n api.run(address='0.0.0.0', port=5042)\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nisMCUConnected = True\nSERIALPATH_RASPI = '/dev/ttyACM0'\nSERIALPATH_WIN = 'COM16'\nPIN_SERVO1 = 12\nPIN_SERVO2 = 13\nPIN_LED = 16\nSERVO_MIN = 115000\nSERVO_MAX = 26000\nSPEED_MAX = 30\nIMM_MAX = 7.5\nRXBUF0 = open('rxdata.json', 'r').read().replace('\\n', '')\n\n\nclass Meters:\n\n def __init__(self):\n self.pi = None\n if os.name == 'posix':\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\ndef main():\n\n\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n mode = Mode.DEMO\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n api = responder.API()\n\n @api.route('/reset')\n def reset(req, resp):\n serialcom.send('invoff\\n')\n\n @api.route('/info')\n def get_info(req, resp):\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.media = serialcom.rxdata\n\n @api.route('/cardata')\n def get_cardata(req, resp):\n text = open('static/cars/cardata.json', 'r', encoding='utf-8').read()\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.text = text\n\n @api.route('/command')\n async def post_command(req, resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send('P\\n')\n elif data['notch'] == 'N':\n serialcom.send('N\\n')\n elif data['notch'] == 'B':\n serialcom.send('B\\n')\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send('invoff\\n')\n\n @api.route('/')\n def hello_html(req, resp):\n resp.html = api.template('index.html')\n api.run(address='0.0.0.0', port=5042)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import time, json, glob, os, enum\nimport serial\nimport threading\nimport responder\nisMCUConnected = True\nSERIALPATH_RASPI = '/dev/ttyACM0'\nSERIALPATH_WIN = 'COM16'\nPIN_SERVO1 = 12\nPIN_SERVO2 = 13\nPIN_LED = 16\nSERVO_MIN = 115000\nSERVO_MAX = 26000\nSPEED_MAX = 30\nIMM_MAX = 7.5\nRXBUF0 = open('rxdata.json', 'r').read().replace('\\n', '')\n\n\nclass Meters:\n\n def __init__(self):\n self.pi = None\n if os.name == 'posix':\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\ndef main():\n\n\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n mode = Mode.DEMO\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n api = responder.API()\n\n @api.route('/reset')\n def reset(req, resp):\n serialcom.send('invoff\\n')\n\n @api.route('/info')\n def get_info(req, resp):\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.media = serialcom.rxdata\n\n @api.route('/cardata')\n def get_cardata(req, resp):\n text = open('static/cars/cardata.json', 'r', encoding='utf-8').read()\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.text = text\n\n @api.route('/command')\n async def post_command(req, resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send('P\\n')\n elif data['notch'] == 'N':\n serialcom.send('N\\n')\n elif data['notch'] == 'B':\n serialcom.send('B\\n')\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send('invoff\\n')\n\n @api.route('/')\n def hello_html(req, resp):\n resp.html = api.template('index.html')\n api.run(address='0.0.0.0', port=5042)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import time, json, glob, os, enum\nimport serial\nimport threading\nimport responder\n\n# 環境によって書き換える変数\nisMCUConnected = True # マイコンがUSBポートに接続されているか\nSERIALPATH_RASPI = '/dev/ttyACM0' # ラズパイのシリアルポート\nSERIALPATH_WIN = 'COM16' # Windowsのシリアルポート\n\n# 各種定数\nPIN_SERVO1 = 12 # GPIO12 PWM0 Pin\nPIN_SERVO2 = 13 # GPIO13 PWM1 Pin\nPIN_LED = 16 # GPIO25 LED Pin\nSERVO_MIN = 115000 # サーボの最小duty\nSERVO_MAX = 26000 # サーボの最大duty\nSPEED_MAX = 30 # 速度の最大値 [km/h]\nIMM_MAX = 7.5 # 電流の最大値(プラスとマイナス両方に振れる) [A]\nRXBUF0 = open(\"rxdata.json\", \"r\").read().replace(\"\\n\",\"\") # シリアル通信しないときにダミーで読み込む受信結果\n\nclass Meters():\n def __init__(self):\n self.pi = None # pigpioオブジェクト\n # pigpioのセットアップ\n if os.name == 'posix': # ラズパイで動かした場合にはpigpioをインポート\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if (kmh > SPEED_MAX) else kmh # constrain upbound and lowbound\n kmh = 0 if (kmh < 0) else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh/SPEED_MAX * (SERVO_MAX - SERVO_MIN))) # 速度計\n if amp != None:\n amp = IMM_MAX if (amp > IMM_MAX) else amp\n amp = -IMM_MAX if (amp < -IMM_MAX) else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5*(1 + amp/IMM_MAX) * (SERVO_MAX - SERVO_MIN))) # 電流計\n if led != None:\n self.pi.write(PIN_LED, led)\n\nclass SerialCom():\n def __init__(self, meterObj=None):\n self.ser = None # シリアル通信オブジェクト\n self.rxdata = {} # 受信したデータを入れておく辞書型変数。外部からこれにアクセスすることでデータを取り出す\n self.flagrx = True # Trueの間シリアル通信を実行\n self.t1 = None # シリアルの受信を行うThreadingオブジェクト\n self.METERS = meterObj # 速度を表示するMetersオブジェクトへの参照をセット # Metersオブジェクトへの参照\n\n # MCUが接続されていればシリアルポートをオープン\n print(\"[serialcom.__init__] open serial port\")\n if isMCUConnected:\n try:\n # OSによってポートを切り替え\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n\n # ポートを開く\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n\n # ポートオープン失敗時\n except serial.serialutil.SerialException:\n print(\"[serialcom.__init__] failed to open port\")\n self.rxdata = {\"serialfailed\":1}\n \n else:\n print(\"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\")\n \n def recieve_loop(self):\n # シリアルポートから受信を行う無限ループ\n if self.ser:\n print(\"[serialcom.recieve_loop] start recieving\")\n self.ser.readline() # 1回目は不完全なデータなので空読み\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii','ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf) # JSON形式へデコード\n self.rxdata['serialfailed'] = 0\n if self.METERS: # メーターに表示\n self.METERS.indicate(self.rxdata['speed'], self.rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\"[serialcom.recieve_loop] when decoding, error has occured\")\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n\n # シリアルポートが開いていないときは、 rxdataとしてRXBUF0を代入する\n else:\n print(\"[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.\")\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n \n print(\"[serialcom.recieve_loop] end recieving\")\n\n def recieve_start(self):\n if not(self.t1):\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n \n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf,\"ascii\"))\n return self.ser.write(bytes(txbuf,\"ascii\"))\n \ndef main():\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n \n mode = Mode.DEMO # 動作モード\n \n # メーターとシリアル通信のインスタンスを生成、初期化\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n\n # サーバを立てる\n api = responder.API()\n\n @api.route(\"/reset\")\n def reset(req,resp):\n serialcom.send(\"invoff\\n\")\n\n @api.route(\"/info\")\n def get_info(req,resp):\n resp.headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n resp.media = serialcom.rxdata\n\n @api.route(\"/cardata\")\n def get_cardata(req,resp):\n text = open(\"static/cars/cardata.json\", \"r\", encoding='utf-8').read()\n resp.headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n resp.text = text\n\n @api.route(\"/command\")\n async def post_command(req,resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send(\"invoff\\n\")\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send(\"invoff\\n\")\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send(\"P\\n\")\n elif data['notch'] == 'N':\n serialcom.send(\"N\\n\")\n elif data['notch'] == 'B':\n serialcom.send(\"B\\n\")\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send(\"invoff\\n\")\n\n @api.route(\"/\")\n def hello_html(req,resp):\n resp.html = api.template('index.html')\n\n # web server start\n api.run(address='0.0.0.0', port=5042) # 0.0.0.0にすると外部からアクセスできる\n \n \nif __name__ == '__main__':\n main()\n", "step-ids": [ 8, 11, 12, 13, 14 ] }
[ 8, 11, 12, 13, 14 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> app = web.application(urls, globals()) <|reserved_special_token_1|> <|reserved_special_token_0|> import web from myapp.urls import urls app = web.application(urls, globals()) <|reserved_special_token_1|> #coding=utf-8 '初始化Package,加载url,生成app对象' import web from myapp.urls import urls app = web.application(urls, globals())
flexible
{ "blob_id": "4480b305a6f71ff64022f2b890998326bf402bf0", "index": 1669, "step-1": "<mask token>\n", "step-2": "<mask token>\napp = web.application(urls, globals())\n", "step-3": "<mask token>\nimport web\nfrom myapp.urls import urls\napp = web.application(urls, globals())\n", "step-4": "#coding=utf-8\r\n'初始化Package,加载url,生成app对象'\r\nimport web\r\nfrom myapp.urls import urls\r\n\r\napp = web.application(urls, globals())\r\n\r\n\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class ZydisDisassembler(disassemblerAbstract): def diassemble(self, filename, bits='32bit'): """ Disassembly executable file return iterable instruction set. :param filename : Executable file path :type filename: str :param bits : File platform 16, 32 or 64. :type bits : str [16bit, 32bit, 64bit] (default:32bit) :return: assembly code iterator: :rtype: str """ mode = bits.replace('bit', '') diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode, filename]) return diasm.decode('utf-8') <|reserved_special_token_0|> <|reserved_special_token_0|> def getAssemblyCodeList(self, filename, bits='32bit'): return self.diassemble(filename, bits).split('\n') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class ZydisDisassembler(disassemblerAbstract): def diassemble(self, filename, bits='32bit'): """ Disassembly executable file return iterable instruction set. :param filename : Executable file path :type filename: str :param bits : File platform 16, 32 or 64. :type bits : str [16bit, 32bit, 64bit] (default:32bit) :return: assembly code iterator: :rtype: str """ mode = bits.replace('bit', '') diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode, filename]) return diasm.decode('utf-8') def getDisassembledCode(self, filename, delimeter='\n', bits='32bit'): """ Disassemble file and concatenete offset, size, hexcode and instruction into string result. :param filename: Binary file name :type filename: str :param delimeter: Line delimeter for instruction set :type delimeter: str :param bits: File platform 16, 32 or 64. :type bits: str [16bit, 32bit, 64bit] (default:32bit) :return assembly instruction list :rtype : str """ return self.diassemble(filename, bits).replace('\n', delimeter) <|reserved_special_token_0|> def getAssemblyCodeList(self, filename, bits='32bit'): return self.diassemble(filename, bits).split('\n') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class ZydisDisassembler(disassemblerAbstract): def diassemble(self, filename, bits='32bit'): """ Disassembly executable file return iterable instruction set. :param filename : Executable file path :type filename: str :param bits : File platform 16, 32 or 64. :type bits : str [16bit, 32bit, 64bit] (default:32bit) :return: assembly code iterator: :rtype: str """ mode = bits.replace('bit', '') diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode, filename]) return diasm.decode('utf-8') def getDisassembledCode(self, filename, delimeter='\n', bits='32bit'): """ Disassemble file and concatenete offset, size, hexcode and instruction into string result. :param filename: Binary file name :type filename: str :param delimeter: Line delimeter for instruction set :type delimeter: str :param bits: File platform 16, 32 or 64. :type bits: str [16bit, 32bit, 64bit] (default:32bit) :return assembly instruction list :rtype : str """ return self.diassemble(filename, bits).replace('\n', delimeter) def getAssemblyCode(self, filename, delimeter='\n', bits='32bit'): return self.diassemble(filename, bits).replace('\n', delimeter) def getAssemblyCodeList(self, filename, bits='32bit'): return self.diassemble(filename, bits).split('\n') <|reserved_special_token_0|> <|reserved_special_token_1|> import subprocess from dissamblerAbstract import disassemblerAbstract class ZydisDisassembler(disassemblerAbstract): def diassemble(self, filename, bits='32bit'): """ Disassembly executable file return iterable instruction set. :param filename : Executable file path :type filename: str :param bits : File platform 16, 32 or 64. :type bits : str [16bit, 32bit, 64bit] (default:32bit) :return: assembly code iterator: :rtype: str """ mode = bits.replace('bit', '') diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode, filename]) return diasm.decode('utf-8') def getDisassembledCode(self, filename, delimeter='\n', bits='32bit'): """ Disassemble file and concatenete offset, size, hexcode and instruction into string result. :param filename: Binary file name :type filename: str :param delimeter: Line delimeter for instruction set :type delimeter: str :param bits: File platform 16, 32 or 64. :type bits: str [16bit, 32bit, 64bit] (default:32bit) :return assembly instruction list :rtype : str """ return self.diassemble(filename, bits).replace('\n', delimeter) def getAssemblyCode(self, filename, delimeter='\n', bits='32bit'): return self.diassemble(filename, bits).replace('\n', delimeter) def getAssemblyCodeList(self, filename, bits='32bit'): return self.diassemble(filename, bits).split('\n') <|reserved_special_token_0|> <|reserved_special_token_1|> import subprocess from dissamblerAbstract import disassemblerAbstract #lib/ZydisDisasm -64 /home/nislab2/Desktop/DissamblerEffect/metamorphic/00fe0c08024f7db771d6711787d890a3.exe class ZydisDisassembler(disassemblerAbstract): def diassemble(self,filename, bits='32bit'): """ Disassembly executable file return iterable instruction set. :param filename : Executable file path :type filename: str :param bits : File platform 16, 32 or 64. :type bits : str [16bit, 32bit, 64bit] (default:32bit) :return: assembly code iterator: :rtype: str """ mode = bits.replace("bit","") diasm = subprocess.check_output(['lib/ZydisDisasm',"-"+mode, filename]) return diasm.decode("utf-8") def getDisassembledCode(self,filename, delimeter='\n', bits='32bit'): """ Disassemble file and concatenete offset, size, hexcode and instruction into string result. :param filename: Binary file name :type filename: str :param delimeter: Line delimeter for instruction set :type delimeter: str :param bits: File platform 16, 32 or 64. :type bits: str [16bit, 32bit, 64bit] (default:32bit) :return assembly instruction list :rtype : str """ return self.diassemble(filename,bits).replace("\n",delimeter) def getAssemblyCode(self,filename, delimeter='\n', bits='32bit'): return self.diassemble(filename,bits).replace("\n",delimeter) def getAssemblyCodeList(self,filename, bits='32bit'): return self.diassemble(filename,bits).split("\n") ''' zydisDissambler = ZydisDisasembler() x = zydisDissambler.getDisassembledCode("/home/nislab2/Desktop/DissamblerEffect/metamorphic/00fe0c08024f7db771d6711787d890a3.exe") print(x) '''
flexible
{ "blob_id": "fedec397ac0346bad1790315b4f85fbb1a662a4e", "index": 9466, "step-1": "<mask token>\n\n\nclass ZydisDisassembler(disassemblerAbstract):\n\n def diassemble(self, filename, bits='32bit'):\n \"\"\"\n Disassembly executable file return iterable instruction set.\n\n :param filename : Executable file path\n :type filename: str\n :param bits : File platform 16, 32 or 64.\n :type bits : str [16bit, 32bit, 64bit] (default:32bit)\n :return: assembly code iterator:\n :rtype: str\n \"\"\"\n mode = bits.replace('bit', '')\n diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode,\n filename])\n return diasm.decode('utf-8')\n <mask token>\n <mask token>\n\n def getAssemblyCodeList(self, filename, bits='32bit'):\n return self.diassemble(filename, bits).split('\\n')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ZydisDisassembler(disassemblerAbstract):\n\n def diassemble(self, filename, bits='32bit'):\n \"\"\"\n Disassembly executable file return iterable instruction set.\n\n :param filename : Executable file path\n :type filename: str\n :param bits : File platform 16, 32 or 64.\n :type bits : str [16bit, 32bit, 64bit] (default:32bit)\n :return: assembly code iterator:\n :rtype: str\n \"\"\"\n mode = bits.replace('bit', '')\n diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode,\n filename])\n return diasm.decode('utf-8')\n\n def getDisassembledCode(self, filename, delimeter='\\n', bits='32bit'):\n \"\"\"\n Disassemble file and concatenete offset, size, hexcode and instruction into string result.\n\n :param filename: Binary file name\n :type filename: str\n :param delimeter: Line delimeter for instruction set\n :type delimeter: str\n :param bits: File platform 16, 32 or 64.\n :type bits: str [16bit, 32bit, 64bit] (default:32bit)\n :return assembly instruction list\n :rtype : str\n \"\"\"\n return self.diassemble(filename, bits).replace('\\n', delimeter)\n <mask token>\n\n def getAssemblyCodeList(self, filename, bits='32bit'):\n return self.diassemble(filename, bits).split('\\n')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ZydisDisassembler(disassemblerAbstract):\n\n def diassemble(self, filename, bits='32bit'):\n \"\"\"\n Disassembly executable file return iterable instruction set.\n\n :param filename : Executable file path\n :type filename: str\n :param bits : File platform 16, 32 or 64.\n :type bits : str [16bit, 32bit, 64bit] (default:32bit)\n :return: assembly code iterator:\n :rtype: str\n \"\"\"\n mode = bits.replace('bit', '')\n diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode,\n filename])\n return diasm.decode('utf-8')\n\n def getDisassembledCode(self, filename, delimeter='\\n', bits='32bit'):\n \"\"\"\n Disassemble file and concatenete offset, size, hexcode and instruction into string result.\n\n :param filename: Binary file name\n :type filename: str\n :param delimeter: Line delimeter for instruction set\n :type delimeter: str\n :param bits: File platform 16, 32 or 64.\n :type bits: str [16bit, 32bit, 64bit] (default:32bit)\n :return assembly instruction list\n :rtype : str\n \"\"\"\n return self.diassemble(filename, bits).replace('\\n', delimeter)\n\n def getAssemblyCode(self, filename, delimeter='\\n', bits='32bit'):\n return self.diassemble(filename, bits).replace('\\n', delimeter)\n\n def getAssemblyCodeList(self, filename, bits='32bit'):\n return self.diassemble(filename, bits).split('\\n')\n\n\n<mask token>\n", "step-4": "import subprocess\nfrom dissamblerAbstract import disassemblerAbstract\n\n\nclass ZydisDisassembler(disassemblerAbstract):\n\n def diassemble(self, filename, bits='32bit'):\n \"\"\"\n Disassembly executable file return iterable instruction set.\n\n :param filename : Executable file path\n :type filename: str\n :param bits : File platform 16, 32 or 64.\n :type bits : str [16bit, 32bit, 64bit] (default:32bit)\n :return: assembly code iterator:\n :rtype: str\n \"\"\"\n mode = bits.replace('bit', '')\n diasm = subprocess.check_output(['lib/ZydisDisasm', '-' + mode,\n filename])\n return diasm.decode('utf-8')\n\n def getDisassembledCode(self, filename, delimeter='\\n', bits='32bit'):\n \"\"\"\n Disassemble file and concatenete offset, size, hexcode and instruction into string result.\n\n :param filename: Binary file name\n :type filename: str\n :param delimeter: Line delimeter for instruction set\n :type delimeter: str\n :param bits: File platform 16, 32 or 64.\n :type bits: str [16bit, 32bit, 64bit] (default:32bit)\n :return assembly instruction list\n :rtype : str\n \"\"\"\n return self.diassemble(filename, bits).replace('\\n', delimeter)\n\n def getAssemblyCode(self, filename, delimeter='\\n', bits='32bit'):\n return self.diassemble(filename, bits).replace('\\n', delimeter)\n\n def getAssemblyCodeList(self, filename, bits='32bit'):\n return self.diassemble(filename, bits).split('\\n')\n\n\n<mask token>\n", "step-5": "import subprocess\nfrom dissamblerAbstract import disassemblerAbstract\n\n#lib/ZydisDisasm -64 /home/nislab2/Desktop/DissamblerEffect/metamorphic/00fe0c08024f7db771d6711787d890a3.exe\nclass ZydisDisassembler(disassemblerAbstract):\n\n def diassemble(self,filename, bits='32bit'):\n \"\"\"\n Disassembly executable file return iterable instruction set.\n\n :param filename : Executable file path\n :type filename: str\n :param bits : File platform 16, 32 or 64.\n :type bits : str [16bit, 32bit, 64bit] (default:32bit)\n :return: assembly code iterator:\n :rtype: str\n \"\"\"\n mode = bits.replace(\"bit\",\"\")\n diasm = subprocess.check_output(['lib/ZydisDisasm',\"-\"+mode, filename])\n return diasm.decode(\"utf-8\")\n\n def getDisassembledCode(self,filename, delimeter='\\n', bits='32bit'):\n \"\"\"\n Disassemble file and concatenete offset, size, hexcode and instruction into string result.\n\n :param filename: Binary file name\n :type filename: str\n :param delimeter: Line delimeter for instruction set\n :type delimeter: str\n :param bits: File platform 16, 32 or 64.\n :type bits: str [16bit, 32bit, 64bit] (default:32bit)\n :return assembly instruction list\n :rtype : str\n \"\"\"\n\n return self.diassemble(filename,bits).replace(\"\\n\",delimeter)\n\n def getAssemblyCode(self,filename, delimeter='\\n', bits='32bit'):\n\n return self.diassemble(filename,bits).replace(\"\\n\",delimeter)\n\n\n def getAssemblyCodeList(self,filename, bits='32bit'):\n\n return self.diassemble(filename,bits).split(\"\\n\")\n\n\n'''\nzydisDissambler = ZydisDisasembler()\nx = zydisDissambler.getDisassembledCode(\"/home/nislab2/Desktop/DissamblerEffect/metamorphic/00fe0c08024f7db771d6711787d890a3.exe\")\nprint(x)\n'''", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# este script comprar diferente metodos de base2number from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split #from matplotlib import pyplot as plt #from matplotlib import cm import matplotlib.pyplot as plt from matplotlib import pyplot import math import os import sys import cv2 import numpy as np import math from scipy.stats import kurtosis, skew from Bio import SeqIO import pandas as pd import seaborn as sns from descriptor import get_features from descriptor import get_features_glcm from descriptor import get_features_lbp from descriptor import get_features_mlbp from ete3 import PhyloTree, TreeStyle from ete3 import Tree from skbio import DistanceMatrix from skbio.tree import nj current_dir = os.path.dirname(os.path.abspath(__file__)) ################################################################################################################################### ################################################################################################################################### sequences = [ 'J01859.fna', 'NR_037066.fna', 'NR_040849.fna', 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna', 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna', 'KP317497.fna', 'NR_156072.fna' ] names = [ 'Escherichia coli', 'T.Thermophilus', 'B.Wakoensis', 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis', 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris', 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis'] csv_mega = current_dir + "/sample_genomes/seqs_db1_distances.csv" seq_file_full = current_dir + "/sample_genomes/seqs_db1.fasta" results_file = current_dir + "/results/compare_features/db1" ################################################################################################################################### ################################################################################################################################### sequences = [ 'L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna', 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna', 'V00659.fna', 'V00672.fna', 'V00675.fna'] names = [ 'Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis', 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta', 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan'] csv_mega = current_dir + "/sample_genomes/seqs_db2_distances.csv" seq_file_full = current_dir + "/sample_genomes/seqs_db2.fasta" results_file = current_dir + "/results/compare_features/db2" ################################################################################################################################### ################################################################################################################################### sequences = [ 'V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna', 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna', 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna', 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna'] names = [ 'Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla', 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros', 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow', 'Rat', 'Mouse', 'Platypus'] csv_mega = current_dir + "/sample_genomes/seqs_db3_distances.csv" seq_file_full = current_dir + "/sample_genomes/seqs_db3.fasta" results_file = current_dir + "/results/compare_features/db3" ################################################################################################################################### ################################################################################################################################### data_features_fos = [] data_features_glcm = [] data_features_lbp = [] data_features_mlbp = [] mapping_function_size = 6 # trere is 6 types of mapping functions f_out = open(seq_file_full, "w") for sequence_file in sequences: f_in = open(current_dir + "/sample_genomes/" + sequence_file, "r") f_out.write(f_in.read()) f_in.close() data = [] fa_file = current_dir + "/sample_genomes/" + sequence_file seqs = SeqIO.parse(fa_file, "fasta") for record in seqs: data.append(record.seq.upper()) seq = data[0] temp_fos = [] temp_glcm = [] temp_lbp = [] temp_mlbp = [] # here we evaluate each mapping funciton for mapping_type in range(mapping_function_size): skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type) temp_fos.append( [skewness, my_kurtosis, energy, entropy] ) #rint("fos mapping=",mapping_type, [skewness, my_kurtosis, energy, entropy]) entropy, contrast, energy, correlation, homogeneity = get_features_glcm(seq, mapping_type) temp_glcm.append( [entropy, contrast, energy, correlation, homogeneity] ) #print("glcm mapping=",mapping_type, [entropy, contrast, energy, correlation, homogeneity]) hist_lbp = get_features_lbp(seq, mapping_type) temp_lbp.append( hist_lbp ) #print("lbp mapping=",mapping_type, hist_lbp) hist_mlbp = get_features_mlbp(seq, mapping_type) temp_mlbp.append( hist_mlbp ) #print("mlbp mapping=",mapping_type, hist_mlbp) data_features_fos.append(temp_fos) data_features_glcm.append(temp_glcm) data_features_lbp.append(temp_lbp) data_features_mlbp.append(temp_mlbp) f_out.close() data_features_fos = np.array(data_features_fos) data_features_glcm = np.array(data_features_glcm) data_features_lbp = np.array(data_features_lbp) data_features_mlbp = np.array(data_features_mlbp) ###################################################################################################################3 # procesamos las distancias con FOS ################################################################################################################### full_distances_fos = [] for mapping_type in range(mapping_function_size): DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.shape[0])) for i in range(data_features_fos.shape[0]): row = np.zeros(data_features_fos.shape[0]) for j in range(i, data_features_fos.shape[0]): dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] - data_features_fos[j][mapping_type])**2)) row[j] = dist DIST_fos[i] = row DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos)) DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(DIST_fos)) full_distances_fos.append( DIST_fos[0,1:DIST_fos.shape[0]] ) full_distances_fos = np.array(full_distances_fos) print("full_distances_fos", full_distances_fos.shape) ###################################################################################################################3 # procesamos las distancias con GLCM ################################################################################################################### full_distances_glcm = [] for mapping_type in range(mapping_function_size): DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.shape[0])) for i in range(data_features_glcm.shape[0]): row = np.zeros(data_features_glcm.shape[0]) for j in range(i, data_features_glcm.shape[0]): dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] - data_features_glcm[j][mapping_type])**2)) row[j] = dist DIST_glcm[i] = row DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm)) DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.min(DIST_glcm)) full_distances_glcm.append( DIST_glcm[0,1:DIST_glcm.shape[0]] ) full_distances_glcm = np.array(full_distances_glcm) print("full_distances_glcm", full_distances_glcm.shape) ###################################################################################################################3 # procesamos las distancias con LBP ################################################################################################################### full_distances_lbp = [] for mapping_type in range(mapping_function_size): DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.shape[0])) for i in range(data_features_lbp.shape[0]): row = np.zeros(data_features_lbp.shape[0]) for j in range(i, data_features_lbp.shape[0]): dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] - data_features_lbp[j][mapping_type])**2)) row[j] = dist DIST_lbp[i] = row DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp)) DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(DIST_lbp)) full_distances_lbp.append( DIST_lbp[0,1:DIST_lbp.shape[0]] ) full_distances_lbp = np.array(full_distances_lbp) print("full_distances_lbp", full_distances_lbp.shape) ###################################################################################################################3 # procesamos las distancias con MLBP ################################################################################################################### full_distances_mlbp = [] for mapping_type in range(mapping_function_size): DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.shape[0])) for i in range(data_features_mlbp.shape[0]): row = np.zeros(data_features_mlbp.shape[0]) for j in range(i, data_features_mlbp.shape[0]): dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] - data_features_mlbp[j][mapping_type])**2)) row[j] = dist DIST_mlbp[i] = row DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp)) DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.min(DIST_mlbp)) full_distances_mlbp.append( DIST_mlbp[0,1:DIST_mlbp.shape[0]] ) full_distances_mlbp = np.array(full_distances_mlbp) print("full_distances_mlbp", full_distances_mlbp.shape) ################################################################################################################### ### distances from mega ########################################################### ################################################################################################################### mega_dist_csv = pd.read_csv(csv_mega) mega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0]) DIST_mega = mega_dist_csv.values DIST_mega[np.isnan(DIST_mega)] = 0 # lllenamos con ceros los valores nan DIST_mega = DIST_mega + DIST_mega.T #copiamos el triangulo inferior al superir en la matriz distances_mega = DIST_mega[0,1:DIST_mega.shape[0]] distances_mega = (distances_mega - np.min(distances_mega)) / (np.max(distances_mega) - np.min(distances_mega)) ################################################################################################################### ################################################################################################################### names_temp = np.array(sequences) names_temp = names_temp[1:names_temp.shape[0]] # eliminamos el primer elemento ###################################################################################################################3 # procesamos las distancias con FOS ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_fos.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # procesamos las distancias con GLCM ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_glcm.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # procesamos las distancias con LBP ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_lbp.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # procesamos las distancias con MLBP ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_mlbp.png", dpi = 200, bbox_inches='tight') data_csv = [] error_fos = [] # save the error for each mappoing function with FOS error_glcm = [] # save the error for each mappoing function with GLCM error_lbp = [] # save the error for each mappoing function with LBP error_mlbp = [] # save the error for each mappoing function with MLBP for mapping_type in range(mapping_function_size): error_fos.append((np.sum((full_distances_fos[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) error_glcm.append((np.sum((full_distances_glcm[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) error_lbp.append((np.sum((full_distances_lbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) error_mlbp.append((np.sum((full_distances_mlbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) data_csv.append(error_fos) data_csv.append(error_glcm) data_csv.append(error_lbp) data_csv.append(error_mlbp) data_csv = np.array(data_csv) df = pd.DataFrame(data=data_csv.T, index=["map0", "map1", "map2", "map3", "map4", "map5"], columns=["FOS", "GLCM", "LBP", "MLBP"]) print(df) df.to_csv(results_file + ".csv", index=True) #print(error_fos) #print(error_glcm) #print(error_lbp) #print(error_mlbp) ################################################################################################################### # proccesing a MAPPING 0 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_0map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 1 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_1map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 2 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_2map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 3 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_3map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 4 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_4map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 5 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_5map.png", dpi = 200, bbox_inches='tight')
normal
{ "blob_id": "9696e5799d46adb5b92c0900e2064b927addfd93", "index": 2506, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\n<mask token>\nprint('full_distances_fos', full_distances_fos.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\n<mask token>\nprint('full_distances_glcm', full_distances_glcm.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\n<mask token>\nprint('full_distances_lbp', full_distances_lbp.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\n<mask token>\nprint('full_distances_mlbp', full_distances_mlbp.shape)\n<mask token>\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\n<mask token>\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\n<mask token>\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n", "step-3": "<mask token>\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',\n 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',\n 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',\n 'KP317497.fna', 'NR_156072.fna']\nnames = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',\n 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',\n 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',\n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\ncsv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'\nresults_file = current_dir + '/results/compare_features/db1'\nsequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',\n 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',\n 'V00659.fna', 'V00672.fna', 'V00675.fna']\nnames = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',\n 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',\n 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']\ncsv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'\nresults_file = current_dir + '/results/compare_features/db2'\nsequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',\n 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',\n 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',\n 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']\nnames = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',\n 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',\n 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',\n 'Rat', 'Mouse', 'Platypus']\ncsv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'\nresults_file = current_dir + '/results/compare_features/db3'\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\nmapping_function_size = 6\nf_out = open(seq_file_full, 'w')\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\nfull_distances_fos = np.array(full_distances_fos)\nprint('full_distances_fos', full_distances_fos.shape)\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\nfull_distances_glcm = np.array(full_distances_glcm)\nprint('full_distances_glcm', full_distances_glcm.shape)\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\nfull_distances_lbp = np.array(full_distances_lbp)\nprint('full_distances_lbp', full_distances_lbp.shape)\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint('full_distances_mlbp', full_distances_mlbp.shape)\nmega_dist_csv = pd.read_csv(csv_mega)\nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0\nDIST_mega = DIST_mega + DIST_mega.T\ndistances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(\n distances_mega) - np.min(distances_mega))\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]]\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\ndata_csv = []\nerror_fos = []\nerror_glcm = []\nerror_lbp = []\nerror_mlbp = []\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',\n 'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n", "step-4": "from sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot\nimport math\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport math\nfrom scipy.stats import kurtosis, skew\nfrom Bio import SeqIO\nimport pandas as pd\nimport seaborn as sns\nfrom descriptor import get_features\nfrom descriptor import get_features_glcm\nfrom descriptor import get_features_lbp\nfrom descriptor import get_features_mlbp\nfrom ete3 import PhyloTree, TreeStyle\nfrom ete3 import Tree\nfrom skbio import DistanceMatrix\nfrom skbio.tree import nj\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',\n 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',\n 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',\n 'KP317497.fna', 'NR_156072.fna']\nnames = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',\n 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',\n 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',\n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\ncsv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'\nresults_file = current_dir + '/results/compare_features/db1'\nsequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',\n 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',\n 'V00659.fna', 'V00672.fna', 'V00675.fna']\nnames = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',\n 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',\n 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']\ncsv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'\nresults_file = current_dir + '/results/compare_features/db2'\nsequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',\n 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',\n 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',\n 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']\nnames = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',\n 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',\n 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',\n 'Rat', 'Mouse', 'Platypus']\ncsv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'\nresults_file = current_dir + '/results/compare_features/db3'\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\nmapping_function_size = 6\nf_out = open(seq_file_full, 'w')\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\nfull_distances_fos = np.array(full_distances_fos)\nprint('full_distances_fos', full_distances_fos.shape)\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\nfull_distances_glcm = np.array(full_distances_glcm)\nprint('full_distances_glcm', full_distances_glcm.shape)\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\nfull_distances_lbp = np.array(full_distances_lbp)\nprint('full_distances_lbp', full_distances_lbp.shape)\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint('full_distances_mlbp', full_distances_mlbp.shape)\nmega_dist_csv = pd.read_csv(csv_mega)\nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0\nDIST_mega = DIST_mega + DIST_mega.T\ndistances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(\n distances_mega) - np.min(distances_mega))\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]]\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\ndata_csv = []\nerror_fos = []\nerror_glcm = []\nerror_lbp = []\nerror_mlbp = []\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',\n 'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n", "step-5": "# este script comprar diferente metodos de base2number\n\nfrom sklearn.model_selection import KFold \nfrom sklearn.model_selection import train_test_split\n#from matplotlib import pyplot as plt\n#from matplotlib import cm\nimport matplotlib.pyplot as plt \nfrom matplotlib import pyplot\nimport math\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport math\nfrom scipy.stats import kurtosis, skew\nfrom Bio import SeqIO\nimport pandas as pd\nimport seaborn as sns\n\nfrom descriptor import get_features\nfrom descriptor import get_features_glcm\nfrom descriptor import get_features_lbp\nfrom descriptor import get_features_mlbp\n\nfrom ete3 import PhyloTree, TreeStyle\nfrom ete3 import Tree\n\nfrom skbio import DistanceMatrix\nfrom skbio.tree import nj\n\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'J01859.fna', 'NR_037066.fna', 'NR_040849.fna', 'NR_117152.fna', 'NR_132306.fna', \n 'NR_134817.fna', 'NR_134818.fna', 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', \n 'NR_152063.fna', 'KP317497.fna', 'NR_156072.fna' ]\n\nnames = [ 'Escherichia coli', 'T.Thermophilus', 'B.Wakoensis', 'T.Filiformis', 'T.Tengchongensis', \n 'S.Cameli', 'S.Tangierensis', 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris', \n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db1_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db1.fasta\"\nresults_file = current_dir + \"/results/compare_features/db1\"\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna', 'M22654.fna', \n 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna', 'V00659.fna', \n 'V00672.fna', 'V00675.fna']\n\nnames = [ 'Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis', 'Macaca sylvanus', \n 'Saimiri sciureus', 'Tarsius syrichta', 'Lemur catta', 'Gorilla', 'Hylobates', \n 'Chimpanzee', 'Sumatran Orangutan']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db2_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db2.fasta\"\nresults_file = current_dir + \"/results/compare_features/db2\"\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna', 'D38115.fna', \n 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna', 'X63726.fna', \n 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna', 'V00654.fna', \n 'X14848.fna', 'V00711.fna', 'X83427.fna']\n\nnames = [ 'Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla', 'Orangutan', \n 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros', 'Harbor seal', \n 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow', \n 'Rat', 'Mouse', 'Platypus']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db3_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db3.fasta\"\nresults_file = current_dir + \"/results/compare_features/db3\"\n\n###################################################################################################################################\n###################################################################################################################################\n\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\n\nmapping_function_size = 6 # trere is 6 types of mapping functions\n\nf_out = open(seq_file_full, \"w\")\n\nfor sequence_file in sequences:\n\n f_in = open(current_dir + \"/sample_genomes/\" + sequence_file, \"r\")\n f_out.write(f_in.read())\n f_in.close()\n\n data = [] \n fa_file = current_dir + \"/sample_genomes/\" + sequence_file\n seqs = SeqIO.parse(fa_file, \"fasta\")\n for record in seqs:\n data.append(record.seq.upper()) \n\n seq = data[0] \n\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n # here we evaluate each mapping funciton\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type)\n temp_fos.append( [skewness, my_kurtosis, energy, entropy] )\n #rint(\"fos mapping=\",mapping_type, [skewness, my_kurtosis, energy, entropy])\n\n entropy, contrast, energy, correlation, homogeneity = get_features_glcm(seq, mapping_type)\n temp_glcm.append( [entropy, contrast, energy, correlation, homogeneity] )\n #print(\"glcm mapping=\",mapping_type, [entropy, contrast, energy, correlation, homogeneity])\n\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append( hist_lbp )\n #print(\"lbp mapping=\",mapping_type, hist_lbp)\n\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append( hist_mlbp )\n #print(\"mlbp mapping=\",mapping_type, hist_mlbp)\n\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\n\nf_out.close()\n\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\n\n###################################################################################################################3\n# procesamos las distancias con FOS\n###################################################################################################################\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] - data_features_fos[j][mapping_type])**2))\n row[j] = dist \n DIST_fos[i] = row\n\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(DIST_fos))\n full_distances_fos.append( DIST_fos[0,1:DIST_fos.shape[0]] )\n\nfull_distances_fos = np.array(full_distances_fos)\nprint(\"full_distances_fos\", full_distances_fos.shape)\n\n###################################################################################################################3\n# procesamos las distancias con GLCM\n###################################################################################################################\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] - data_features_glcm[j][mapping_type])**2))\n row[j] = dist \n DIST_glcm[i] = row\n\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.min(DIST_glcm))\n full_distances_glcm.append( DIST_glcm[0,1:DIST_glcm.shape[0]] )\n\nfull_distances_glcm = np.array(full_distances_glcm)\nprint(\"full_distances_glcm\", full_distances_glcm.shape)\n\n###################################################################################################################3\n# procesamos las distancias con LBP\n###################################################################################################################\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] - data_features_lbp[j][mapping_type])**2))\n row[j] = dist \n DIST_lbp[i] = row\n\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(DIST_lbp))\n full_distances_lbp.append( DIST_lbp[0,1:DIST_lbp.shape[0]] )\n\nfull_distances_lbp = np.array(full_distances_lbp)\nprint(\"full_distances_lbp\", full_distances_lbp.shape)\n\n###################################################################################################################3\n# procesamos las distancias con MLBP\n###################################################################################################################\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] - data_features_mlbp[j][mapping_type])**2))\n row[j] = dist \n DIST_mlbp[i] = row\n\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.min(DIST_mlbp))\n full_distances_mlbp.append( DIST_mlbp[0,1:DIST_mlbp.shape[0]] )\n\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint(\"full_distances_mlbp\", full_distances_mlbp.shape)\n\n###################################################################################################################\n### distances from mega ###########################################################\n###################################################################################################################\nmega_dist_csv = pd.read_csv(csv_mega) \nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0 # lllenamos con ceros los valores nan\nDIST_mega = DIST_mega + DIST_mega.T #copiamos el triangulo inferior al superir en la matriz\ndistances_mega = DIST_mega[0,1:DIST_mega.shape[0]]\n\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(distances_mega) - np.min(distances_mega))\n###################################################################################################################\n###################################################################################################################\n\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]] # eliminamos el primer elemento\n\n###################################################################################################################3\n# procesamos las distancias con FOS\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_fos.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con GLCM\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_glcm.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con LBP\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_lbp.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con MLBP\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_mlbp.png\", dpi = 200, bbox_inches='tight')\n\n\ndata_csv = []\nerror_fos = [] # save the error for each mappoing function with FOS\nerror_glcm = [] # save the error for each mappoing function with GLCM\nerror_lbp = [] # save the error for each mappoing function with LBP\nerror_mlbp = [] # save the error for each mappoing function with MLBP\nfor mapping_type in range(mapping_function_size):\n error_fos.append((np.sum((full_distances_fos[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_glcm.append((np.sum((full_distances_glcm[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_lbp.append((np.sum((full_distances_lbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_mlbp.append((np.sum((full_distances_mlbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\n\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=[\"map0\", \"map1\", \"map2\", \"map3\", \"map4\", \"map5\"], columns=[\"FOS\", \"GLCM\", \"LBP\", \"MLBP\"])\nprint(df)\ndf.to_csv(results_file + \".csv\", index=True)\n#print(error_fos)\n#print(error_glcm)\n#print(error_lbp)\n#print(error_mlbp)\n\n\n\n###################################################################################################################\n# proccesing a MAPPING 0 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_0map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 1 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_1map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 2 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_2map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 3 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_3map.png\", dpi = 200, bbox_inches='tight')\n\n\n###################################################################################################################\n# proccesing a MAPPING 4 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_4map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 5 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_5map.png\", dpi = 200, bbox_inches='tight')", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def list_types(): return ['application/tar'] <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def list_types(): return ['application/tar'] def handle_part(data, ctype, filename, payload): if ctype == 'application/tar': dir = '%s/%s' % (CACHE_DIR, os.path.splitext(filename)[0]) if not os.path.exists(dir): os.makedirs(dir) buf = StringIO.StringIO(payload) tar = tarfile.open(name=filename, fileobj=buf) tar.extractall(dir) tar.close() buf.close() <|reserved_special_token_1|> <|reserved_special_token_0|> CACHE_DIR = '/var/cache/cloud' def list_types(): return ['application/tar'] def handle_part(data, ctype, filename, payload): if ctype == 'application/tar': dir = '%s/%s' % (CACHE_DIR, os.path.splitext(filename)[0]) if not os.path.exists(dir): os.makedirs(dir) buf = StringIO.StringIO(payload) tar = tarfile.open(name=filename, fileobj=buf) tar.extractall(dir) tar.close() buf.close() <|reserved_special_token_1|> import os import StringIO import tarfile CACHE_DIR = '/var/cache/cloud' def list_types(): return ['application/tar'] def handle_part(data, ctype, filename, payload): if ctype == 'application/tar': dir = '%s/%s' % (CACHE_DIR, os.path.splitext(filename)[0]) if not os.path.exists(dir): os.makedirs(dir) buf = StringIO.StringIO(payload) tar = tarfile.open(name=filename, fileobj=buf) tar.extractall(dir) tar.close() buf.close() <|reserved_special_token_1|> #part-handler # vi: syntax=python ts=4 # # Copyright (C) 2012 Silpion IT-Solutions GmbH # # Author: Malte Stretz <stretz@silpion.de> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import StringIO import tarfile CACHE_DIR = '/var/cache/cloud' def list_types(): return(['application/tar']) def handle_part(data, ctype, filename, payload): if ctype == ('application/tar'): dir = "%s/%s" % (CACHE_DIR, os.path.splitext(filename)[0]) if not os.path.exists(dir): os.makedirs(dir) buf = StringIO.StringIO(payload) tar = tarfile.open(name=filename, fileobj=buf) tar.extractall(dir) tar.close() buf.close()
flexible
{ "blob_id": "98b27c268fe1f47a899269e988ddf798faf827df", "index": 8401, "step-1": "<mask token>\n\n\ndef list_types():\n return ['application/tar']\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef list_types():\n return ['application/tar']\n\n\ndef handle_part(data, ctype, filename, payload):\n if ctype == 'application/tar':\n dir = '%s/%s' % (CACHE_DIR, os.path.splitext(filename)[0])\n if not os.path.exists(dir):\n os.makedirs(dir)\n buf = StringIO.StringIO(payload)\n tar = tarfile.open(name=filename, fileobj=buf)\n tar.extractall(dir)\n tar.close()\n buf.close()\n", "step-3": "<mask token>\nCACHE_DIR = '/var/cache/cloud'\n\n\ndef list_types():\n return ['application/tar']\n\n\ndef handle_part(data, ctype, filename, payload):\n if ctype == 'application/tar':\n dir = '%s/%s' % (CACHE_DIR, os.path.splitext(filename)[0])\n if not os.path.exists(dir):\n os.makedirs(dir)\n buf = StringIO.StringIO(payload)\n tar = tarfile.open(name=filename, fileobj=buf)\n tar.extractall(dir)\n tar.close()\n buf.close()\n", "step-4": "import os\nimport StringIO\nimport tarfile\nCACHE_DIR = '/var/cache/cloud'\n\n\ndef list_types():\n return ['application/tar']\n\n\ndef handle_part(data, ctype, filename, payload):\n if ctype == 'application/tar':\n dir = '%s/%s' % (CACHE_DIR, os.path.splitext(filename)[0])\n if not os.path.exists(dir):\n os.makedirs(dir)\n buf = StringIO.StringIO(payload)\n tar = tarfile.open(name=filename, fileobj=buf)\n tar.extractall(dir)\n tar.close()\n buf.close()\n", "step-5": "#part-handler\n# vi: syntax=python ts=4\n#\n# Copyright (C) 2012 Silpion IT-Solutions GmbH\n#\n# Author: Malte Stretz <stretz@silpion.de>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3, as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\nimport StringIO\nimport tarfile\n\nCACHE_DIR = '/var/cache/cloud'\n\ndef list_types():\n return(['application/tar'])\n\ndef handle_part(data, ctype, filename, payload):\n if ctype == ('application/tar'):\n dir = \"%s/%s\" % (CACHE_DIR, os.path.splitext(filename)[0])\n if not os.path.exists(dir):\n os.makedirs(dir)\n buf = StringIO.StringIO(payload)\n tar = tarfile.open(name=filename, fileobj=buf)\n tar.extractall(dir)\n tar.close()\n buf.close()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> print(10 - 10) print(1000 - 80) print(10 / 5) print(10 / 6) print(10 // 6) print(10 * 800) print(55 * 5) <|reserved_special_token_1|> print(10-10) print(1000-80) print(10/5) print(10/6) print(10//6) # remoção das casas decimais print(10*800) print(55*5)
flexible
{ "blob_id": "e488761c15ee8cddbb7577d5340ee9001193c1a4", "index": 4767, "step-1": "<mask token>\n", "step-2": "print(10 - 10)\nprint(1000 - 80)\nprint(10 / 5)\nprint(10 / 6)\nprint(10 // 6)\nprint(10 * 800)\nprint(55 * 5)\n", "step-3": "print(10-10)\r\nprint(1000-80)\r\nprint(10/5)\r\nprint(10/6)\r\nprint(10//6) # remoção das casas decimais\r\n\r\nprint(10*800)\r\nprint(55*5)\r\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
def countdown(n): def next(): nonlocal n r = n n -= 1 return r return next a = countdown(12) while True: v = a() if not v: break
normal
{ "blob_id": "01eef391f6d37d1e74cb032c5b27e1d8fc4395da", "index": 6122, "step-1": "<mask token>\n", "step-2": "def countdown(n):\n\n def next():\n nonlocal n\n r = n\n n -= 1\n return r\n return next\n\n\n<mask token>\n", "step-3": "def countdown(n):\n\n def next():\n nonlocal n\n r = n\n n -= 1\n return r\n return next\n\n\n<mask token>\nwhile True:\n v = a()\n if not v:\n break\n", "step-4": "def countdown(n):\n\n def next():\n nonlocal n\n r = n\n n -= 1\n return r\n return next\n\n\na = countdown(12)\nwhile True:\n v = a()\n if not v:\n break\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with open(file_one) as fo: reader = csv.reader(fo) header = next(reader) album = {} dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [ ], [] for row in reader: if row[2].strip() == "Harm's Way": dates.append(float(row[0].strip())) cd_income.append(int(float(row[4].strip()))) dd_income.append(int(float(row[5].strip()))) total_profit.append(int(float(row[7].strip()))) artist_payout.append(int(float(row[8].strip()))) else: pass <|reserved_special_token_0|> for key, value in album.items(): print(f'{key}: {value}') plt.style.use('seaborn') <|reserved_special_token_0|> ax.plot(album['period of sales'], album['dd_income_data'], c='red') ax.plot(album['period of sales'], album['cd_income_data'], c='blue') plt.title('{} Sales - All Time'.format(album['title'])) plt.xlabel('', fontsize=16) fig.autofmt_xdate() plt.ylabel('CD (blue) and DD (red)', fontsize=16) plt.tick_params(axis='both', which='major', labelsize=16) <|reserved_special_token_1|> <|reserved_special_token_0|> file_one = 'data/dwifh_all_sales.csv' file_two = 'data/dwifh_bc_sales.csv' with open(file_one) as fo: reader = csv.reader(fo) header = next(reader) album = {} dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [ ], [] for row in reader: if row[2].strip() == "Harm's Way": dates.append(float(row[0].strip())) cd_income.append(int(float(row[4].strip()))) dd_income.append(int(float(row[5].strip()))) total_profit.append(int(float(row[7].strip()))) artist_payout.append(int(float(row[8].strip()))) else: pass album_alltime_profit = sum(total_profit) artist_alltime_payout = sum(artist_payout) album['title'] = "Harm's Way" album['period of sales'] = dates album['cd_income_data'] = cd_income album['dd_income_data'] = dd_income album['all_time_profit'] = album_alltime_profit album['all_time_payout'] = artist_alltime_payout for key, value in album.items(): print(f'{key}: {value}') plt.style.use('seaborn') fig, ax = plt.subplots() ax.plot(album['period of sales'], album['dd_income_data'], c='red') ax.plot(album['period of sales'], album['cd_income_data'], c='blue') plt.title('{} Sales - All Time'.format(album['title'])) plt.xlabel('', fontsize=16) fig.autofmt_xdate() plt.ylabel('CD (blue) and DD (red)', fontsize=16) plt.tick_params(axis='both', which='major', labelsize=16) <|reserved_special_token_1|> import csv from matplotlib import pyplot as plt from datetime import datetime file_one = 'data/dwifh_all_sales.csv' file_two = 'data/dwifh_bc_sales.csv' with open(file_one) as fo: reader = csv.reader(fo) header = next(reader) album = {} dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [ ], [] for row in reader: if row[2].strip() == "Harm's Way": dates.append(float(row[0].strip())) cd_income.append(int(float(row[4].strip()))) dd_income.append(int(float(row[5].strip()))) total_profit.append(int(float(row[7].strip()))) artist_payout.append(int(float(row[8].strip()))) else: pass album_alltime_profit = sum(total_profit) artist_alltime_payout = sum(artist_payout) album['title'] = "Harm's Way" album['period of sales'] = dates album['cd_income_data'] = cd_income album['dd_income_data'] = dd_income album['all_time_profit'] = album_alltime_profit album['all_time_payout'] = artist_alltime_payout for key, value in album.items(): print(f'{key}: {value}') plt.style.use('seaborn') fig, ax = plt.subplots() ax.plot(album['period of sales'], album['dd_income_data'], c='red') ax.plot(album['period of sales'], album['cd_income_data'], c='blue') plt.title('{} Sales - All Time'.format(album['title'])) plt.xlabel('', fontsize=16) fig.autofmt_xdate() plt.ylabel('CD (blue) and DD (red)', fontsize=16) plt.tick_params(axis='both', which='major', labelsize=16) <|reserved_special_token_1|> import csv from matplotlib import pyplot as plt from datetime import datetime file_one = 'data/dwifh_all_sales.csv' file_two = 'data/dwifh_bc_sales.csv' # create code to automatically build a dictionary for each album? with open(file_one) as fo: reader = csv.reader(fo) header = next(reader) album = {} dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [], [] for row in reader: if row[2].strip() == 'Harm\'s Way': dates.append(float(row[0].strip())) cd_income.append(int(float(row[4].strip()))) dd_income.append(int(float(row[5].strip()))) total_profit.append(int(float(row[7].strip()))) artist_payout.append(int(float(row[8].strip()))) else: pass album_alltime_profit = sum(total_profit) artist_alltime_payout = sum(artist_payout) # complete the dictionary for this album album['title'] = 'Harm\'s Way' album['period of sales'] = dates album['cd_income_data'] = cd_income album['dd_income_data'] = dd_income album['all_time_profit'] = album_alltime_profit album['all_time_payout'] = artist_alltime_payout for key, value in album.items(): print(f'{key}: {value}') plt.style.use('seaborn') fig, ax = plt.subplots() ax.plot(album['period of sales'], album['dd_income_data'], c='red') ax.plot(album['period of sales'], album['cd_income_data'], c = 'blue') plt.title('{} Sales - All Time'.format(album['title'])) plt.xlabel('', fontsize=16) fig.autofmt_xdate() plt.ylabel('CD (blue) and DD (red)', fontsize=16) plt.tick_params(axis='both', which='major', labelsize=16) #plt.show() # TASK: # 1. get the names of the albums from the .csv file and store # them in a list. make sure there are no duplicates. # parse the csv file and create a dictionary for each album, # assigning it the name taken from the name list. # use: for album in album_list: so the process is done once # for each album name. # the dict created for each album contains all the data pulled # from the csv file. create the dict, then append it to # a list of dicts. this list will, when done, contain four # dictionaries, one for each album. # but since it's done in a loop, all four dicts get created # automatically, but they contain different data, respective to # each album.
flexible
{ "blob_id": "53380810a3d9787fe7c373cf1829f2d849a91c3c", "index": 8456, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\n<mask token>\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\n<mask token>\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n", "step-3": "<mask token>\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\nalbum['title'] = \"Harm's Way\"\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n", "step-4": "import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\nalbum['title'] = \"Harm's Way\"\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n", "step-5": "import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\n\n# create code to automatically build a dictionary for each album?\n\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [], []\n\n for row in reader:\n if row[2].strip() == 'Harm\\'s Way':\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\n\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\n\n# complete the dictionary for this album\nalbum['title'] = 'Harm\\'s Way'\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\n\nfor key, value in album.items():\n print(f'{key}: {value}')\n\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c = 'blue')\n\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n\n#plt.show()\n\n# TASK:\n# 1. get the names of the albums from the .csv file and store\n# them in a list. make sure there are no duplicates.\n\n# parse the csv file and create a dictionary for each album,\n# assigning it the name taken from the name list.\n# use: for album in album_list: so the process is done once\n# for each album name.\n# the dict created for each album contains all the data pulled\n# from the csv file. create the dict, then append it to\n# a list of dicts. this list will, when done, contain four\n# dictionaries, one for each album.\n# but since it's done in a loop, all four dicts get created\n# automatically, but they contain different data, respective to\n# each album.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @utils.part def part_1(): commands = ['south', 'take food ration', 'west', 'north', 'north', 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north', 'east', 'south', 'take weather machine', 'west', 'take ornament', 'east', 'north', 'east', 'east', 'east', 'south'] run(commands=commands) <|reserved_special_token_1|> <|reserved_special_token_0|> def run(commands=None): memory = utils.get_input()[0] initial_inputs = intcode.commands_to_input(commands or []) program = intcode.Program(memory, initial_inputs=initial_inputs, output_mode=intcode.OutputMode.BUFFER) while True: _, return_signal = program.run() for output in program.yield_outputs(): try: print(chr(output), end='') except ValueError: print(output) if return_signal == intcode.ReturnSignal.AWAITING_INPUT: program.add_inputs(*intcode.commands_to_input([input()])) elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT: return else: raise Exception(f'Unexpected return signal {return_signal}') @utils.part def part_1(): commands = ['south', 'take food ration', 'west', 'north', 'north', 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north', 'east', 'south', 'take weather machine', 'west', 'take ornament', 'east', 'north', 'east', 'east', 'east', 'south'] run(commands=commands) <|reserved_special_token_1|> import utils from problems_2019 import intcode def run(commands=None): memory = utils.get_input()[0] initial_inputs = intcode.commands_to_input(commands or []) program = intcode.Program(memory, initial_inputs=initial_inputs, output_mode=intcode.OutputMode.BUFFER) while True: _, return_signal = program.run() for output in program.yield_outputs(): try: print(chr(output), end='') except ValueError: print(output) if return_signal == intcode.ReturnSignal.AWAITING_INPUT: program.add_inputs(*intcode.commands_to_input([input()])) elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT: return else: raise Exception(f'Unexpected return signal {return_signal}') @utils.part def part_1(): commands = ['south', 'take food ration', 'west', 'north', 'north', 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north', 'east', 'south', 'take weather machine', 'west', 'take ornament', 'east', 'north', 'east', 'east', 'east', 'south'] run(commands=commands) <|reserved_special_token_1|> import utils from problems_2019 import intcode def run(commands=None): memory = utils.get_input()[0] initial_inputs = intcode.commands_to_input(commands or []) program = intcode.Program(memory, initial_inputs=initial_inputs, output_mode=intcode.OutputMode.BUFFER) while True: _, return_signal = program.run() for output in program.yield_outputs(): try: print(chr(output), end='') except ValueError: print(output) if return_signal == intcode.ReturnSignal.AWAITING_INPUT: # Run in interactive mode if more commands needed program.add_inputs(*intcode.commands_to_input([input()])) elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT: return else: raise Exception(f'Unexpected return signal {return_signal}') @utils.part def part_1(): commands = [ 'south', 'take food ration', 'west', 'north', 'north', 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north', 'east', 'south', 'take weather machine', 'west', 'take ornament', 'east', 'north', 'east', 'east', 'east', 'south', ] run(commands=commands)
flexible
{ "blob_id": "e3aa38b5d01823ed27bca65331e9c7315238750a", "index": 8974, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@utils.part\ndef part_1():\n commands = ['south', 'take food ration', 'west', 'north', 'north',\n 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north',\n 'east', 'south', 'take weather machine', 'west', 'take ornament',\n 'east', 'north', 'east', 'east', 'east', 'south']\n run(commands=commands)\n", "step-3": "<mask token>\n\n\ndef run(commands=None):\n memory = utils.get_input()[0]\n initial_inputs = intcode.commands_to_input(commands or [])\n program = intcode.Program(memory, initial_inputs=initial_inputs,\n output_mode=intcode.OutputMode.BUFFER)\n while True:\n _, return_signal = program.run()\n for output in program.yield_outputs():\n try:\n print(chr(output), end='')\n except ValueError:\n print(output)\n if return_signal == intcode.ReturnSignal.AWAITING_INPUT:\n program.add_inputs(*intcode.commands_to_input([input()]))\n elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:\n return\n else:\n raise Exception(f'Unexpected return signal {return_signal}')\n\n\n@utils.part\ndef part_1():\n commands = ['south', 'take food ration', 'west', 'north', 'north',\n 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north',\n 'east', 'south', 'take weather machine', 'west', 'take ornament',\n 'east', 'north', 'east', 'east', 'east', 'south']\n run(commands=commands)\n", "step-4": "import utils\nfrom problems_2019 import intcode\n\n\ndef run(commands=None):\n memory = utils.get_input()[0]\n initial_inputs = intcode.commands_to_input(commands or [])\n program = intcode.Program(memory, initial_inputs=initial_inputs,\n output_mode=intcode.OutputMode.BUFFER)\n while True:\n _, return_signal = program.run()\n for output in program.yield_outputs():\n try:\n print(chr(output), end='')\n except ValueError:\n print(output)\n if return_signal == intcode.ReturnSignal.AWAITING_INPUT:\n program.add_inputs(*intcode.commands_to_input([input()]))\n elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:\n return\n else:\n raise Exception(f'Unexpected return signal {return_signal}')\n\n\n@utils.part\ndef part_1():\n commands = ['south', 'take food ration', 'west', 'north', 'north',\n 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north',\n 'east', 'south', 'take weather machine', 'west', 'take ornament',\n 'east', 'north', 'east', 'east', 'east', 'south']\n run(commands=commands)\n", "step-5": "import utils\n\nfrom problems_2019 import intcode\n\n\ndef run(commands=None):\n memory = utils.get_input()[0]\n initial_inputs = intcode.commands_to_input(commands or [])\n program = intcode.Program(memory, initial_inputs=initial_inputs, output_mode=intcode.OutputMode.BUFFER)\n\n while True:\n _, return_signal = program.run()\n for output in program.yield_outputs():\n try:\n print(chr(output), end='')\n except ValueError:\n print(output)\n\n if return_signal == intcode.ReturnSignal.AWAITING_INPUT:\n # Run in interactive mode if more commands needed\n program.add_inputs(*intcode.commands_to_input([input()]))\n elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:\n return\n else:\n raise Exception(f'Unexpected return signal {return_signal}')\n\n\n@utils.part\ndef part_1():\n commands = [\n 'south',\n 'take food ration',\n 'west',\n 'north',\n 'north',\n 'east',\n 'take astrolabe',\n 'west',\n 'south',\n 'south',\n 'east',\n 'north',\n 'east',\n 'south',\n 'take weather machine',\n 'west',\n 'take ornament',\n 'east',\n 'north',\n 'east',\n 'east',\n 'east',\n 'south',\n ]\n run(commands=commands)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def parse_and_print_args(): fields = None in_args = None if request.args is not None: in_args = dict(copy.copy(request.args)) fields = copy.copy(in_args.get('fields', None)) if fields: del in_args['fields'] offset = copy.copy(in_args.get('offset', None)) if offset: del in_args['offset'] limit = copy.copy(in_args.get('limit', None)) if limit: del in_args['limit'] try: if request.data: body = json.loads(request.data) else: body = None except Exception as e: print('exception here is: ', e) body = None print('Request.args : ', json.dumps(in_args)) return in_args, fields, body, limit, offset @app.route('/api/<resource>', methods=['GET', 'POST']) def Basic_resource(resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_template(resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(resource, body) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE']) def Specific_resource(resource, primary_key): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_primary_key(resource, primary_key, fields) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'PUT': result = SimpleBO.Update(resource, body, primary_key) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'DELETE': result = SimpleBO.Delete(resource, primary_key) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[ 'GET', 'POST']) def related_resource(resource, primary_key, related_resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_fk(resource, primary_key, related_resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root all_resource = resource + '/' + primary_key + '/' + related_resource links = SimpleBO.generate_links(url, url_root, all_resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(related_resource, body) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/teammates/<playerid>', methods=['GET']) def get_teammates(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_teammates(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'teammates/' + playerid links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} @app.route('/api/people/<playerid>/career_stats', methods=['GET']) def get_career_stats(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_career_stats(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'people/' + playerid + '/career_stats' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/roster', methods=['GET']) def get_roster(): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_roster(in_args, limit, offset) url = request.url url_root = request.url_root resource = 'roster' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def parse_and_print_args(): fields = None in_args = None if request.args is not None: in_args = dict(copy.copy(request.args)) fields = copy.copy(in_args.get('fields', None)) if fields: del in_args['fields'] offset = copy.copy(in_args.get('offset', None)) if offset: del in_args['offset'] limit = copy.copy(in_args.get('limit', None)) if limit: del in_args['limit'] try: if request.data: body = json.loads(request.data) else: body = None except Exception as e: print('exception here is: ', e) body = None print('Request.args : ', json.dumps(in_args)) return in_args, fields, body, limit, offset @app.route('/api/<resource>', methods=['GET', 'POST']) def Basic_resource(resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_template(resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(resource, body) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE']) def Specific_resource(resource, primary_key): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_primary_key(resource, primary_key, fields) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'PUT': result = SimpleBO.Update(resource, body, primary_key) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'DELETE': result = SimpleBO.Delete(resource, primary_key) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[ 'GET', 'POST']) def related_resource(resource, primary_key, related_resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_fk(resource, primary_key, related_resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root all_resource = resource + '/' + primary_key + '/' + related_resource links = SimpleBO.generate_links(url, url_root, all_resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(related_resource, body) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/teammates/<playerid>', methods=['GET']) def get_teammates(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_teammates(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'teammates/' + playerid links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} @app.route('/api/people/<playerid>/career_stats', methods=['GET']) def get_career_stats(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_career_stats(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'people/' + playerid + '/career_stats' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/roster', methods=['GET']) def get_roster(): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_roster(in_args, limit, offset) url = request.url url_root = request.url_root resource = 'roster' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) if __name__ == '__main__': app.run() <|reserved_special_token_1|> <|reserved_special_token_0|> app = Flask(__name__) def parse_and_print_args(): fields = None in_args = None if request.args is not None: in_args = dict(copy.copy(request.args)) fields = copy.copy(in_args.get('fields', None)) if fields: del in_args['fields'] offset = copy.copy(in_args.get('offset', None)) if offset: del in_args['offset'] limit = copy.copy(in_args.get('limit', None)) if limit: del in_args['limit'] try: if request.data: body = json.loads(request.data) else: body = None except Exception as e: print('exception here is: ', e) body = None print('Request.args : ', json.dumps(in_args)) return in_args, fields, body, limit, offset @app.route('/api/<resource>', methods=['GET', 'POST']) def Basic_resource(resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_template(resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(resource, body) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE']) def Specific_resource(resource, primary_key): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_primary_key(resource, primary_key, fields) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'PUT': result = SimpleBO.Update(resource, body, primary_key) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'DELETE': result = SimpleBO.Delete(resource, primary_key) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[ 'GET', 'POST']) def related_resource(resource, primary_key, related_resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_fk(resource, primary_key, related_resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root all_resource = resource + '/' + primary_key + '/' + related_resource links = SimpleBO.generate_links(url, url_root, all_resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(related_resource, body) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/teammates/<playerid>', methods=['GET']) def get_teammates(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_teammates(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'teammates/' + playerid links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} @app.route('/api/people/<playerid>/career_stats', methods=['GET']) def get_career_stats(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_career_stats(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'people/' + playerid + '/career_stats' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/roster', methods=['GET']) def get_roster(): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_roster(in_args, limit, offset) url = request.url url_root = request.url_root resource = 'roster' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) if __name__ == '__main__': app.run() <|reserved_special_token_1|> import json from flask import Flask from flask import request import copy import SimpleBO app = Flask(__name__) def parse_and_print_args(): fields = None in_args = None if request.args is not None: in_args = dict(copy.copy(request.args)) fields = copy.copy(in_args.get('fields', None)) if fields: del in_args['fields'] offset = copy.copy(in_args.get('offset', None)) if offset: del in_args['offset'] limit = copy.copy(in_args.get('limit', None)) if limit: del in_args['limit'] try: if request.data: body = json.loads(request.data) else: body = None except Exception as e: print('exception here is: ', e) body = None print('Request.args : ', json.dumps(in_args)) return in_args, fields, body, limit, offset @app.route('/api/<resource>', methods=['GET', 'POST']) def Basic_resource(resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_template(resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(resource, body) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE']) def Specific_resource(resource, primary_key): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_primary_key(resource, primary_key, fields) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'PUT': result = SimpleBO.Update(resource, body, primary_key) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'DELETE': result = SimpleBO.Delete(resource, primary_key) return result else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[ 'GET', 'POST']) def related_resource(resource, primary_key, related_resource): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_fk(resource, primary_key, related_resource, in_args, fields, limit, offset) url = request.url url_root = request.url_root all_resource = resource + '/' + primary_key + '/' + related_resource links = SimpleBO.generate_links(url, url_root, all_resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} elif request.method == 'POST': result = SimpleBO.Insert(related_resource, body) return json.dumps(result), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/teammates/<playerid>', methods=['GET']) def get_teammates(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_teammates(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'teammates/' + playerid links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} @app.route('/api/people/<playerid>/career_stats', methods=['GET']) def get_career_stats(playerid): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_career_stats(playerid, limit, offset) url = request.url url_root = request.url_root resource = 'people/' + playerid + '/career_stats' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) @app.route('/api/roster', methods=['GET']) def get_roster(): in_args, fields, body, offset, limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_roster(in_args, limit, offset) url = request.url url_root = request.url_root resource = 'roster' links = SimpleBO.generate_links(url, url_root, resource, in_args, fields, offset, limit, result) output = [{'data': result, 'links': links}] return json.dumps(output), 200, {'content-type': 'application/json; charset:utf-8'} else: return ('Method ' + request.method + ' on resource ' + resource + ' not implemented!', 501, {'content-type': 'text/plain; charset: utf-8'}) if __name__ == '__main__': app.run() <|reserved_special_token_1|> # Lahman.py # Convert to/from web native JSON and Python/RDB types. import json # Include Flask packages from flask import Flask from flask import request import copy import SimpleBO # The main program that executes. This call creates an instance of a # class and the constructor starts the runtime. app = Flask(__name__) def parse_and_print_args(): fields = None in_args = None if request.args is not None: in_args = dict(copy.copy(request.args)) fields = copy.copy(in_args.get('fields',None)) if fields: del(in_args['fields']) offset = copy.copy(in_args.get('offset',None)) if offset: del(in_args['offset']) limit = copy.copy(in_args.get('limit',None)) if limit: del(in_args['limit']) try: if request.data: body = json.loads(request.data) else: body = None except Exception as e: print("exception here is: ", e) body = None print("Request.args : ", json.dumps(in_args)) return in_args,fields,body,limit,offset @app.route('/api/<resource>',methods = ['GET','POST']) def Basic_resource(resource): in_args,fields,body,offset,limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_template(resource,in_args,fields,limit,offset) url = request.url url_root = request.url_root links = SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result) output=[{"data":result, "links":links}] return json.dumps(output), 200, \ {"content-type": "application/json; charset:utf-8"} elif request.method == 'POST': result = SimpleBO.Insert(resource,body) return result else: return "Method " + request.method + " on resource " + resource + \ " not implemented!", 501, {"content-type": "text/plain; charset: utf-8"} @app.route('/api/<resource>/<primary_key>',methods = ['GET','PUT','DELETE']) def Specific_resource(resource,primary_key): in_args,fields,body,offset,limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_primary_key(resource,primary_key,fields) return json.dumps(result), 200, \ {"content-type": "application/json; charset:utf-8"} elif request.method == 'PUT': result = SimpleBO.Update(resource,body,primary_key) return json.dumps(result), 200, \ {"content-type": "application/json; charset:utf-8"} elif request.method == 'DELETE': result = SimpleBO.Delete(resource,primary_key) return result else: return "Method " + request.method + " on resource " + resource + \ " not implemented!", 501, {"content-type": "text/plain; charset: utf-8"} @app.route('/api/<resource>/<primary_key>/<related_resource>',methods = ['GET','POST']) def related_resource(resource,primary_key,related_resource): in_args,fields,body,offset,limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_by_fk(resource,primary_key,related_resource,in_args,fields,limit,offset) url = request.url url_root = request.url_root all_resource = resource+"/"+primary_key+"/"+related_resource links=SimpleBO.generate_links(url,url_root,all_resource,in_args,fields,offset,limit,result) output=[{"data":result, "links":links}] return json.dumps(output), 200, \ {"content-type": "application/json; charset:utf-8"} elif request.method == 'POST': result = SimpleBO.Insert(related_resource,body) return json.dumps(result), 200, \ {"content-type": "application/json; charset:utf-8"} else: return "Method " + request.method + " on resource " + resource + \ " not implemented!", 501, {"content-type": "text/plain; charset: utf-8"} @app.route('/api/teammates/<playerid>', methods=['GET']) def get_teammates(playerid): in_args,fields,body,offset,limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_teammates(playerid,limit,offset) url = request.url url_root = request.url_root resource = 'teammates/'+playerid links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result) output=[{"data":result, "links":links}] return json.dumps(output), 200, \ {"content-type": "application/json; charset:utf-8"} @app.route('/api/people/<playerid>/career_stats', methods=['GET']) def get_career_stats(playerid): in_args,fields,body,offset,limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_career_stats(playerid,limit,offset) url = request.url url_root = request.url_root resource = 'people/'+playerid+'/career_stats' links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result) output=[{"data":result, "links":links}] return json.dumps(output), 200, \ {"content-type": "application/json; charset:utf-8"} else: return "Method " + request.method + " on resource " + resource + \ " not implemented!", 501, {"content-type": "text/plain; charset: utf-8"} @app.route('/api/roster', methods=['GET']) def get_roster(): in_args,fields,body,offset,limit = parse_and_print_args() if request.method == 'GET': result = SimpleBO.find_roster(in_args,limit,offset) url = request.url url_root = request.url_root resource = 'roster' links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result) output=[{"data":result, "links":links}] return json.dumps(output), 200, \ {"content-type": "application/json; charset:utf-8"} else: return "Method " + request.method + " on resource " + resource + \ " not implemented!", 501, {"content-type": "text/plain; charset: utf-8"} if __name__ == '__main__': app.run()
flexible
{ "blob_id": "d03a8076b77851ae4df5cf657ff898eb132c49c3", "index": 5616, "step-1": "<mask token>\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\n@app.route('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\n@app.route('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\n@app.route('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\n@app.route('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\nif __name__ == '__main__':\n app.run()\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\n@app.route('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\n@app.route('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\nif __name__ == '__main__':\n app.run()\n", "step-4": "import json\nfrom flask import Flask\nfrom flask import request\nimport copy\nimport SimpleBO\napp = Flask(__name__)\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\n@app.route('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\n@app.route('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n@app.route('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\nif __name__ == '__main__':\n app.run()\n", "step-5": "# Lahman.py\n\n# Convert to/from web native JSON and Python/RDB types.\nimport json\n\n# Include Flask packages\nfrom flask import Flask\nfrom flask import request\nimport copy\n\nimport SimpleBO\n\n# The main program that executes. This call creates an instance of a\n# class and the constructor starts the runtime.\napp = Flask(__name__)\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields',None))\n if fields:\n del(in_args['fields'])\n offset = copy.copy(in_args.get('offset',None))\n if offset:\n del(in_args['offset'])\n limit = copy.copy(in_args.get('limit',None))\n if limit:\n del(in_args['limit'])\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print(\"exception here is: \", e)\n body = None\n\n\n\n print(\"Request.args : \", json.dumps(in_args))\n return in_args,fields,body,limit,offset\n\n\n\n\n\n@app.route('/api/<resource>',methods = ['GET','POST'])\ndef Basic_resource(resource):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource,in_args,fields,limit,offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource,body)\n return result\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\n@app.route('/api/<resource>/<primary_key>',methods = ['GET','PUT','DELETE'])\ndef Specific_resource(resource,primary_key):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource,primary_key,fields)\n return json.dumps(result), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource,body,primary_key)\n return json.dumps(result), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource,primary_key)\n return result\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\n@app.route('/api/<resource>/<primary_key>/<related_resource>',methods = ['GET','POST'])\ndef related_resource(resource,primary_key,related_resource):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource,primary_key,related_resource,in_args,fields,limit,offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource+\"/\"+primary_key+\"/\"+related_resource\n links=SimpleBO.generate_links(url,url_root,all_resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource,body)\n return json.dumps(result), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\n@app.route('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid,limit,offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/'+playerid\n links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n@app.route('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid,limit,offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/'+playerid+'/career_stats'\n links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\n@app.route('/api/roster', methods=['GET'])\ndef get_roster():\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args,limit,offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\nif __name__ == '__main__':\n app.run()\n", "step-ids": [ 7, 8, 9, 10, 11 ] }
[ 7, 8, 9, 10, 11 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST']) @expects_json(guest_calendar_schema) def booking(link_id): request_body = request.get_json() link = get_link(link_id) if link is None: return build_response('link id is invalid', 401) admin_id = link.admin_id try: uuid = add_booking_info_and_get_uuid(request_body['start'], request_body['end'], admin_id, request_body['guest_name'], request_body['guest_email'], request_body['topic'] if 'topic' in request_body else None) request_body['uuid'] = uuid except Exception: return build_response('already booked or deleted', 409) return make_response(request_body, 200) <|reserved_special_token_1|> <|reserved_special_token_0|> guest_calendar_post = Blueprint('guest_calendar_post', __name__) @guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST']) @expects_json(guest_calendar_schema) def booking(link_id): request_body = request.get_json() link = get_link(link_id) if link is None: return build_response('link id is invalid', 401) admin_id = link.admin_id try: uuid = add_booking_info_and_get_uuid(request_body['start'], request_body['end'], admin_id, request_body['guest_name'], request_body['guest_email'], request_body['topic'] if 'topic' in request_body else None) request_body['uuid'] = uuid except Exception: return build_response('already booked or deleted', 409) return make_response(request_body, 200) <|reserved_special_token_1|> from flask import Blueprint, request, make_response from flask_expects_json import expects_json from server.validation.schemas import guest_calendar_schema from tools.for_db.work_with_booking_info import add_booking_info_and_get_uuid from tools.for_db.work_with_links import get_link from tools.build_response import build_response guest_calendar_post = Blueprint('guest_calendar_post', __name__) @guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST']) @expects_json(guest_calendar_schema) def booking(link_id): request_body = request.get_json() link = get_link(link_id) if link is None: return build_response('link id is invalid', 401) admin_id = link.admin_id try: uuid = add_booking_info_and_get_uuid(request_body['start'], request_body['end'], admin_id, request_body['guest_name'], request_body['guest_email'], request_body['topic'] if 'topic' in request_body else None) request_body['uuid'] = uuid except Exception: return build_response('already booked or deleted', 409) return make_response(request_body, 200)
flexible
{ "blob_id": "75ef5dd2b82cf79819f18045559f9850c74bb55a", "index": 5565, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n", "step-3": "<mask token>\nguest_calendar_post = Blueprint('guest_calendar_post', __name__)\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n", "step-4": "from flask import Blueprint, request, make_response\nfrom flask_expects_json import expects_json\nfrom server.validation.schemas import guest_calendar_schema\nfrom tools.for_db.work_with_booking_info import add_booking_info_and_get_uuid\nfrom tools.for_db.work_with_links import get_link\nfrom tools.build_response import build_response\nguest_calendar_post = Blueprint('guest_calendar_post', __name__)\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if __name__ == '__main__': filename = 'example_payloads/payload1.json' data = open(filename, 'rb').read() headers = {'Accept': 'application/json', 'Content-Type': 'application/json' } response = requests.post(url, data=data, headers=headers) if response.ok: print(response.json()) else: print('error!') <|reserved_special_token_1|> <|reserved_special_token_0|> url = 'http://127.0.0.1:8888/productionplan' if __name__ == '__main__': filename = 'example_payloads/payload1.json' data = open(filename, 'rb').read() headers = {'Accept': 'application/json', 'Content-Type': 'application/json' } response = requests.post(url, data=data, headers=headers) if response.ok: print(response.json()) else: print('error!') <|reserved_special_token_1|> <|reserved_special_token_0|> import requests url = 'http://127.0.0.1:8888/productionplan' if __name__ == '__main__': filename = 'example_payloads/payload1.json' data = open(filename, 'rb').read() headers = {'Accept': 'application/json', 'Content-Type': 'application/json' } response = requests.post(url, data=data, headers=headers) if response.ok: print(response.json()) else: print('error!') <|reserved_special_token_1|> #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Nov 18 18:21:37 2021 @author: benoitdeschrynmakers """ import requests url = 'http://127.0.0.1:8888/productionplan' if __name__ == "__main__": filename = "example_payloads/payload1.json" data = open(filename, 'rb').read() headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} response = requests.post(url, data=data, headers=headers) if response.ok: print(response.json()) else: print("error!")
flexible
{ "blob_id": "255130082ee5f8428f1700b47dee717465fed72f", "index": 4067, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n filename = 'example_payloads/payload1.json'\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n response = requests.post(url, data=data, headers=headers)\n if response.ok:\n print(response.json())\n else:\n print('error!')\n", "step-3": "<mask token>\nurl = 'http://127.0.0.1:8888/productionplan'\nif __name__ == '__main__':\n filename = 'example_payloads/payload1.json'\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n response = requests.post(url, data=data, headers=headers)\n if response.ok:\n print(response.json())\n else:\n print('error!')\n", "step-4": "<mask token>\nimport requests\nurl = 'http://127.0.0.1:8888/productionplan'\nif __name__ == '__main__':\n filename = 'example_payloads/payload1.json'\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n response = requests.post(url, data=data, headers=headers)\n if response.ok:\n print(response.json())\n else:\n print('error!')\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 18 18:21:37 2021\n\n@author: benoitdeschrynmakers\n\"\"\"\n\nimport requests\n\nurl = 'http://127.0.0.1:8888/productionplan'\n\nif __name__ == \"__main__\":\n filename = \"example_payloads/payload1.json\"\n\n data = open(filename, 'rb').read()\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}\n response = requests.post(url, data=data, headers=headers)\n\n if response.ok:\n print(response.json())\n else:\n print(\"error!\")\n \n \n ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> disp.begin() disp.clear() disp.display() <|reserved_special_token_1|> <|reserved_special_token_0|> RST = 24 DC = 23 SPI_PORT = 0 SPI_DEVICE = 0 disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev( SPI_PORT, SPI_DEVICE, max_speed_hz=8000000)) disp.begin() disp.clear() disp.display() <|reserved_special_token_1|> import Adafruit_GPIO.SPI as SPI import Adafruit_SSD1306 RST = 24 DC = 23 SPI_PORT = 0 SPI_DEVICE = 0 disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev( SPI_PORT, SPI_DEVICE, max_speed_hz=8000000)) disp.begin() disp.clear() disp.display() <|reserved_special_token_1|> import Adafruit_GPIO.SPI as SPI import Adafruit_SSD1306 # Raspberry Pi pin configuration: RST = 24 # Note the following are only used with SPI: DC = 23 SPI_PORT = 0 SPI_DEVICE = 0 # Beaglebone Black pin configuration: # RST = 'P9_12' # Note the following are only used with SPI: # DC = 'P9_15' # SPI_PORT = 1 # SPI_DEVICE = 0 # 128x32 display with hardware I2C: #disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST) # 128x64 display with hardware I2C: #disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) # Note you can change the I2C address by passing an i2c_address parameter like: # disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C) # Alternatively you can specify an explicit I2C bus number, for example # with the 128x32 display you would use: # disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2) # 128x32 display with hardware SPI: # disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000)) # 128x64 display with hardware SPI: disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000)) # Alternatively you can specify a software SPI implementation by providing # digital GPIO pin numbers for all the required display pins. For example # on a Raspberry Pi with the 128x32 display you might use: # disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22) # Initialize library. disp.begin() # Clear display. disp.clear() disp.display()
flexible
{ "blob_id": "d8cbed25f4c97be5a74a6e1f097fcb9fa9439a9a", "index": 8160, "step-1": "<mask token>\n", "step-2": "<mask token>\ndisp.begin()\ndisp.clear()\ndisp.display()\n", "step-3": "<mask token>\nRST = 24\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(\n SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\ndisp.begin()\ndisp.clear()\ndisp.display()\n", "step-4": "import Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\nRST = 24\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(\n SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\ndisp.begin()\ndisp.clear()\ndisp.display()\n", "step-5": "import Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\n\n# Raspberry Pi pin configuration:\nRST = 24\n# Note the following are only used with SPI:\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\n\n# Beaglebone Black pin configuration:\n# RST = 'P9_12'\n# Note the following are only used with SPI:\n# DC = 'P9_15'\n# SPI_PORT = 1\n# SPI_DEVICE = 0\n\n# 128x32 display with hardware I2C:\n#disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)\n\n# 128x64 display with hardware I2C:\n#disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)\n\n# Note you can change the I2C address by passing an i2c_address parameter like:\n# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)\n\n# Alternatively you can specify an explicit I2C bus number, for example\n# with the 128x32 display you would use:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)\n\n# 128x32 display with hardware SPI:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\n\n# 128x64 display with hardware SPI:\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\n\n# Alternatively you can specify a software SPI implementation by providing\n# digital GPIO pin numbers for all the required display pins. For example\n# on a Raspberry Pi with the 128x32 display you might use:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)\n\n# Initialize library.\ndisp.begin()\n\n# Clear display.\ndisp.clear()\ndisp.display()\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.db import models from django.contrib.auth.models import AbstractUser from django.core.validators import MinLengthValidator, MaxLengthValidator, RegexValidator from pizzaclub.settings import MAX_DNI_LENGTH, MAX_CUIL_LENGTH, PASSWORD_RESET_TIMEOUT from pizzaclub.settings import MIN_DNI_LENGTH, MIN_CUIL_LENGTH from pizzaclub.settings import MAX_PHONE_LENGTH, MIN_PHONE_LENGTH import secrets import datetime # Create your models here. class Address(models.Model): address = models.CharField(max_length=100, unique=True) lat = models.DecimalField(max_digits=9, decimal_places=7, default=0) lon= models.DecimalField(max_digits=9, decimal_places=7, default=0) elev = models.DecimalField(max_digits=9, decimal_places=2, default=0) class Meta: verbose_name_plural = "Address" def __str__(self): return self.address class User(AbstractUser): ''' Extend the User Django built in model. Add token data for password reset, ans is_employee flag for Employee Profile. ''' is_employee = models.BooleanField(default=False) token = models.CharField(max_length=50) token_date = models.DateTimeField(auto_now=True) token_valid = models.BooleanField(default=True) def is_order_manager(self): return (self.is_employee and self.is_active) or self.is_superuser def generate_token(self): return secrets.token_urlsafe() def check_token(self, token): ''' Check token validity for an hour since was generated. ''' tz = self.token_date.tzinfo t_now = datetime.datetime.now(tz=tz) # Check the token time less than hour dt = t_now - self.token_date if dt.total_seconds() > PASSWORD_RESET_TIMEOUT: self.token_valid = False # Return True if the token is correct and is_valid res = (token == self.token) and self.token_valid # Set the token invalid self.token_valid = False return res def save(self, *args, **kwargs): ''' Until save generate a new token and set valid. ''' # Generate a token and set valid self.token = self.generate_token() self.token_valid = True super(User, self).save(*args, **kwargs) class Employee(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True) dni = models.CharField( max_length=MAX_DNI_LENGTH, unique=True, validators=[ MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex=r'^\d+$') ]) cuil = models.CharField( max_length=MAX_CUIL_LENGTH, unique=True, validators=[ MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator(MAX_CUIL_LENGTH), RegexValidator(regex=r'^\d+$') ]) phone = models.CharField( max_length=MAX_PHONE_LENGTH, null=True, blank=True, validators=[ MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex=r'^\d+$') ]) address = models.ManyToManyField(Address) def __str__(self): return self.user.get_full_name() def save(self, *args, **kwargs): # Check user is employee if not self.user.is_employee: raise TypeError('The User must be an Employee') # Check validation fields self.full_clean() # Save instance super(Employee, self).save(*args, **kwargs) class Client(models.Model): name = models.CharField(max_length=30) email = models.EmailField() phone = models.CharField( max_length=MAX_PHONE_LENGTH, validators=[ MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex=r'^\d+$') ]) address = models.ManyToManyField(Address)
normal
{ "blob_id": "b7511c156c241accaf1668d83ee0a5263b41af0d", "index": 3465, "step-1": "<mask token>\n\n\nclass User(AbstractUser):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n <mask token>\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n", "step-2": "<mask token>\n\n\nclass User(AbstractUser):\n \"\"\"\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n \"\"\"\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return self.is_employee and self.is_active or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n\n def save(self, *args, **kwargs):\n \"\"\"\n Until save generate a new token and set valid.\n \"\"\"\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n", "step-3": "<mask token>\n\n\nclass Address(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Address'\n\n def __str__(self):\n return self.address\n\n\nclass User(AbstractUser):\n \"\"\"\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n \"\"\"\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return self.is_employee and self.is_active or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n\n def save(self, *args, **kwargs):\n \"\"\"\n Until save generate a new token and set valid.\n \"\"\"\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n", "step-4": "<mask token>\n\n\nclass Address(models.Model):\n address = models.CharField(max_length=100, unique=True)\n lat = models.DecimalField(max_digits=9, decimal_places=7, default=0)\n lon = models.DecimalField(max_digits=9, decimal_places=7, default=0)\n elev = models.DecimalField(max_digits=9, decimal_places=2, default=0)\n\n\n class Meta:\n verbose_name_plural = 'Address'\n\n def __str__(self):\n return self.address\n\n\nclass User(AbstractUser):\n \"\"\"\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n \"\"\"\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return self.is_employee and self.is_active or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n\n def save(self, *args, **kwargs):\n \"\"\"\n Until save generate a new token and set valid.\n \"\"\"\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n", "step-5": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.validators import MinLengthValidator, MaxLengthValidator, RegexValidator\n\nfrom pizzaclub.settings import MAX_DNI_LENGTH, MAX_CUIL_LENGTH, PASSWORD_RESET_TIMEOUT\nfrom pizzaclub.settings import MIN_DNI_LENGTH, MIN_CUIL_LENGTH\nfrom pizzaclub.settings import MAX_PHONE_LENGTH, MIN_PHONE_LENGTH\n\nimport secrets\nimport datetime\n# Create your models here.\nclass Address(models.Model):\n address = models.CharField(max_length=100, unique=True)\n lat = models.DecimalField(max_digits=9, decimal_places=7, default=0)\n lon= models.DecimalField(max_digits=9, decimal_places=7, default=0)\n elev = models.DecimalField(max_digits=9, decimal_places=2, default=0)\n\n class Meta:\n verbose_name_plural = \"Address\"\n\n def __str__(self):\n return self.address\n\nclass User(AbstractUser):\n '''\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n '''\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return (self.is_employee and self.is_active) or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n '''\n Check token validity for an hour since was generated.\n '''\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n\n # Check the token time less than hour\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n\n # Return True if the token is correct and is_valid\n res = (token == self.token) and self.token_valid\n \n # Set the token invalid\n self.token_valid = False\n\n return res\n\n def save(self, *args, **kwargs):\n '''\n Until save generate a new token and set valid.\n '''\n # Generate a token and set valid\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)\n dni = models.CharField(\n max_length=MAX_DNI_LENGTH,\n unique=True,\n validators=[\n MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n cuil = models.CharField(\n max_length=MAX_CUIL_LENGTH,\n unique=True,\n validators=[\n MinLengthValidator(MIN_CUIL_LENGTH),\n MaxLengthValidator(MAX_CUIL_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n phone = models.CharField(\n max_length=MAX_PHONE_LENGTH,\n null=True,\n blank=True,\n validators=[\n MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n \n def save(self, *args, **kwargs):\n # Check user is employee\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n # Check validation fields\n self.full_clean()\n # Save instance\n super(Employee, self).save(*args, **kwargs)\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(\n max_length=MAX_PHONE_LENGTH,\n validators=[\n MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n address = models.ManyToManyField(Address)\n\n", "step-ids": [ 8, 13, 15, 16, 18 ] }
[ 8, 13, 15, 16, 18 ]
from flask.ext.wtf import Form from wtforms import TextField from wtforms.validators import Required class VerifyHandphoneForm(Form): handphone_hash = TextField('Enter verification code here', validators=[ Required()])
normal
{ "blob_id": "cb0df06ee474576b3024678fa0f63ce400d773ea", "index": 4096, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass VerifyHandphoneForm(Form):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass VerifyHandphoneForm(Form):\n handphone_hash = TextField('Enter verification code here', validators=[\n Required()])\n", "step-4": "from flask.ext.wtf import Form\nfrom wtforms import TextField\nfrom wtforms.validators import Required\n\n\nclass VerifyHandphoneForm(Form):\n handphone_hash = TextField('Enter verification code here', validators=[\n Required()])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
class State: def __init__(self, id): self.id = id def NotinClosed(problem, node): #restituisce 1 se lo stato non è stato già visitato (al netto di controlli sulla depth) è quindi bisogna aggiungerlo NotVisited = 1 for tuple in problem.closed: if node.state.id == tuple[0].id and node.depth >= tuple[1]: NotVisited = 0 #presente nei visited ma selected_node ha maggiore/uguale depth return NotVisited
normal
{ "blob_id": "200deda300e39b07e0e558277a340b7ad01c7dee", "index": 2216, "step-1": "<mask token>\n", "step-2": "class State:\n <mask token>\n\n\n<mask token>\n", "step-3": "class State:\n\n def __init__(self, id):\n self.id = id\n\n\n<mask token>\n", "step-4": "class State:\n\n def __init__(self, id):\n self.id = id\n\n\ndef NotinClosed(problem, node):\n NotVisited = 1\n for tuple in problem.closed:\n if node.state.id == tuple[0].id and node.depth >= tuple[1]:\n NotVisited = 0\n return NotVisited\n", "step-5": "\nclass State:\n def __init__(self, id):\n self.id = id\n\n\ndef NotinClosed(problem, node): #restituisce 1 se lo stato non è stato già visitato (al netto di controlli sulla depth) è quindi bisogna aggiungerlo\n NotVisited = 1\n for tuple in problem.closed:\n if node.state.id == tuple[0].id and node.depth >= tuple[1]:\n NotVisited = 0 #presente nei visited ma selected_node ha maggiore/uguale depth\n return NotVisited", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if u_avg < u_bat_min: print('proper shut down of the machine due to low battery') else: print('tout va bien dormez braves gens') <|reserved_special_token_1|> <|reserved_special_token_0|> pidcmes = Pidcmes() u_bat_min = 3.7 n_moy = 20 stop_run = False u_avg = pidcmes.get_tension(n_moy) if u_avg < u_bat_min: print('proper shut down of the machine due to low battery') else: print('tout va bien dormez braves gens') <|reserved_special_token_1|> <|reserved_special_token_0|> import time import datetime as dt from subprocess import call from pidcmes_lib import Pidcmes pidcmes = Pidcmes() u_bat_min = 3.7 n_moy = 20 stop_run = False u_avg = pidcmes.get_tension(n_moy) if u_avg < u_bat_min: print('proper shut down of the machine due to low battery') else: print('tout va bien dormez braves gens') <|reserved_special_token_1|> #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This program is run at regular intervals to check the battery charge status of the uninterruptible power supply. In our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the Raspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown. This program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler. The crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes: 5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py """ import time import datetime as dt from subprocess import call from pidcmes_lib import Pidcmes # class for 'pidcmes' procedures pidcmes = Pidcmes() # initialize pidcmese class u_bat_min = 3.7 # minumum battery voltage n_moy = 20 # averaging to reduce glitches stop_run = False # to control the execution (run/stop) u_avg = pidcmes.get_tension(n_moy) # read the value in volts if u_avg < u_bat_min:# or i > 10: print("proper shut down of the machine due to low battery") # time.sleep(5) # call("sudo shutdown -h now", shell=True) # shutdown the RASPI else: print("tout va bien dormez braves gens")
flexible
{ "blob_id": "67b967b688aeac1270eee836e0f6e6b3555b933e", "index": 5, "step-1": "<mask token>\n", "step-2": "<mask token>\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "step-3": "<mask token>\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "step-4": "<mask token>\nimport time\nimport datetime as dt\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis program is run at regular intervals to check the battery charge status of the uninterruptible power supply.\nIn our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the\nRaspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown.\n\nThis program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler.\nThe crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes:\n5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py\n\"\"\"\n\nimport time\nimport datetime as dt\n\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes # class for 'pidcmes' procedures\n \npidcmes = Pidcmes() # initialize pidcmese class\n\nu_bat_min = 3.7 # minumum battery voltage \nn_moy = 20 # averaging to reduce glitches\nstop_run = False # to control the execution (run/stop)\n\nu_avg = pidcmes.get_tension(n_moy) # read the value in volts\n\n \nif u_avg < u_bat_min:# or i > 10: \n print(\"proper shut down of the machine due to low battery\")\n# time.sleep(5)\n# call(\"sudo shutdown -h now\", shell=True) # shutdown the RASPI\nelse:\n print(\"tout va bien dormez braves gens\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class InsertWord(Operation): @classmethod def getValue(cls, t, h, w, b, arg=None): return math.log(arg['prob']) * w[cls] <|reserved_special_token_0|> <|reserved_special_token_0|> @classmethod def transFormT_H(cls, t_h, arg=None): parentNodeIndex = arg['parentNodeInex'] word = arg['word'] t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word, parentNodeIndex)]) <|reserved_special_token_1|> <|reserved_special_token_0|> class InsertWord(Operation): @classmethod def getValue(cls, t, h, w, b, arg=None): return math.log(arg['prob']) * w[cls] <|reserved_special_token_0|> @classmethod def transFormTree(cls, h, arg=None): parentNodeIndex = arg['parentNodeIndex'] word = arg['word'] h[0].insert_node(word, parentNodeIndex) @classmethod def transFormT_H(cls, t_h, arg=None): parentNodeIndex = arg['parentNodeInex'] word = arg['word'] t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word, parentNodeIndex)]) <|reserved_special_token_1|> <|reserved_special_token_0|> class InsertWord(Operation): @classmethod def getValue(cls, t, h, w, b, arg=None): return math.log(arg['prob']) * w[cls] @classmethod def getKBest(cls, t, h, args, w, b, k): valueList = [] for index in args['parenNodeIndexList']: for word, prob in args['vocabDic'].items(): arg['parentNodeIndex'] = index arg['word'] = word arg['prob'] = prob valueList.append([self.getValue(t, h, w, b, arg), arg]) valueList = sorted(ValueList, reverse=True)[:k - 1] return valueList @classmethod def transFormTree(cls, h, arg=None): parentNodeIndex = arg['parentNodeIndex'] word = arg['word'] h[0].insert_node(word, parentNodeIndex) @classmethod def transFormT_H(cls, t_h, arg=None): parentNodeIndex = arg['parentNodeInex'] word = arg['word'] t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word, parentNodeIndex)]) <|reserved_special_token_1|> from operation import * import math class InsertWord(Operation): @classmethod def getValue(cls, t, h, w, b, arg=None): return math.log(arg['prob']) * w[cls] @classmethod def getKBest(cls, t, h, args, w, b, k): valueList = [] for index in args['parenNodeIndexList']: for word, prob in args['vocabDic'].items(): arg['parentNodeIndex'] = index arg['word'] = word arg['prob'] = prob valueList.append([self.getValue(t, h, w, b, arg), arg]) valueList = sorted(ValueList, reverse=True)[:k - 1] return valueList @classmethod def transFormTree(cls, h, arg=None): parentNodeIndex = arg['parentNodeIndex'] word = arg['word'] h[0].insert_node(word, parentNodeIndex) @classmethod def transFormT_H(cls, t_h, arg=None): parentNodeIndex = arg['parentNodeInex'] word = arg['word'] t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word, parentNodeIndex)]) <|reserved_special_token_1|> #!/usr/bin/env python # -*- coding: utf-8 -*- from operation import * import math class InsertWord(Operation): @classmethod def getValue(cls, t, h, w, b, arg=None): return math.log(arg["prob"]) * w[cls] @classmethod def getKBest(cls, t, h , args, w, b, k): valueList = [] for index in args["parenNodeIndexList"]: for word, prob in args["vocabDic"].items(): arg["parentNodeIndex"] = index arg["word"] = word arg["prob"] = prob valueList.append([self.getValue(t, h, w, b, arg), arg]) valueList = sorted(ValueList, reverse=True)[:k-1] return valueList @classmethod def transFormTree(cls, h, arg=None): parentNodeIndex = arg["parentNodeIndex"] word = arg["word"] h[0].insert_node(word, parentNodeIndex) @classmethod def transFormT_H(cls, t_h, arg=None): parentNodeIndex = arg["parentNodeInex"] word = arg["word"] t_h.hs.appens( [t_h.hs[len(t_h.hs) -1][0].insert_node(word, parentNodeIndex), ] )
flexible
{ "blob_id": "355e3932c8bd9105e0c1ce9259e3b7416997523c", "index": 3668, "step-1": "<mask token>\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n <mask token>\n <mask token>\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n", "step-2": "<mask token>\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n <mask token>\n\n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg['parentNodeIndex']\n word = arg['word']\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n", "step-3": "<mask token>\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n\n @classmethod\n def getKBest(cls, t, h, args, w, b, k):\n valueList = []\n for index in args['parenNodeIndexList']:\n for word, prob in args['vocabDic'].items():\n arg['parentNodeIndex'] = index\n arg['word'] = word\n arg['prob'] = prob\n valueList.append([self.getValue(t, h, w, b, arg), arg])\n valueList = sorted(ValueList, reverse=True)[:k - 1]\n return valueList\n\n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg['parentNodeIndex']\n word = arg['word']\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n", "step-4": "from operation import *\nimport math\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n\n @classmethod\n def getKBest(cls, t, h, args, w, b, k):\n valueList = []\n for index in args['parenNodeIndexList']:\n for word, prob in args['vocabDic'].items():\n arg['parentNodeIndex'] = index\n arg['word'] = word\n arg['prob'] = prob\n valueList.append([self.getValue(t, h, w, b, arg), arg])\n valueList = sorted(ValueList, reverse=True)[:k - 1]\n return valueList\n\n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg['parentNodeIndex']\n word = arg['word']\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom operation import *\nimport math\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg[\"prob\"]) * w[cls]\n\n @classmethod\n def getKBest(cls, t, h , args, w, b, k):\n valueList = []\n for index in args[\"parenNodeIndexList\"]:\n for word, prob in args[\"vocabDic\"].items():\n arg[\"parentNodeIndex\"] = index\n arg[\"word\"] = word\n arg[\"prob\"] = prob\n valueList.append([self.getValue(t, h, w, b, arg), arg])\n valueList = sorted(ValueList, reverse=True)[:k-1]\n return valueList\n \n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg[\"parentNodeIndex\"]\n word = arg[\"word\"]\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg[\"parentNodeInex\"]\n word = arg[\"word\"]\n t_h.hs.appens( [t_h.hs[len(t_h.hs) -1][0].insert_node(word, parentNodeIndex), ] )\n\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from Models.AdminPageModel import AdminPageModel class StudentDebtsController: def __init__(self, master, model, view): self._master = master self._model = model self._view = view def BackToAdminPage(self): from Views.AdminPage import AdminPage self._master.switch_frame(AdminPage, AdminPageModel) def GetStudentsInfo(self, text): studentsList = self._model.GetStudentsList(text) return studentsList def GetStudentDebtsAndShowTable(self, text): self._view.HideUserInfo() if (not text): self._view.ClearTable() self._view.ShowNoDataLabelWithText("No issues found. Select user first.") else: self._view.ClearTable() info = self._model.GetStudentInfo(text) self._view.ShowUserInfo(info[0], info[1], info[2], info[3]) studentDebts = self._model.GetStudentDebts(text) if (len(studentDebts) > 0): self._view.HideNoDataLabel() else: self._view.ShowNoDataLabelWithText("Student don't have issues yet.") self._view.FillTable(studentDebts) def ReturnBooks(self, idCopies): if (len(idCopies) > 0): try: id = self._model.GetStudentId(idCopies[0]) self._model.ReturnBooks(idCopies) self._view.ClearTable() studentDebts = self._model.GetStudentDebts(id) self._view.FillTable(studentDebts) singOrPlural = 'Book' if (len(idCopies) > 1): singOrPlural = 'Books' self._view.SetMessageLabel(singOrPlural + " successfully returned" , "green") except Exception as e: print(e) self._view.SetMessageLabel("Something went wrong", "red") else: self._view.SetMessageLabel("0 books have been selected. Nothing to return", "red")
normal
{ "blob_id": "8aacc8dbfdd70d24689ae17b9c29b1ffc80fb231", "index": 9013, "step-1": "<mask token>\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n <mask token>\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n\n def BackToAdminPage(self):\n from Views.AdminPage import AdminPage\n self._master.switch_frame(AdminPage, AdminPageModel)\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n\n def GetStudentDebtsAndShowTable(self, text):\n self._view.HideUserInfo()\n if not text:\n self._view.ClearTable()\n self._view.ShowNoDataLabelWithText(\n 'No issues found. Select user first.')\n else:\n self._view.ClearTable()\n info = self._model.GetStudentInfo(text)\n self._view.ShowUserInfo(info[0], info[1], info[2], info[3])\n studentDebts = self._model.GetStudentDebts(text)\n if len(studentDebts) > 0:\n self._view.HideNoDataLabel()\n else:\n self._view.ShowNoDataLabelWithText(\n \"Student don't have issues yet.\")\n self._view.FillTable(studentDebts)\n\n def ReturnBooks(self, idCopies):\n if len(idCopies) > 0:\n try:\n id = self._model.GetStudentId(idCopies[0])\n self._model.ReturnBooks(idCopies)\n self._view.ClearTable()\n studentDebts = self._model.GetStudentDebts(id)\n self._view.FillTable(studentDebts)\n singOrPlural = 'Book'\n if len(idCopies) > 1:\n singOrPlural = 'Books'\n self._view.SetMessageLabel(singOrPlural +\n ' successfully returned', 'green')\n except Exception as e:\n print(e)\n self._view.SetMessageLabel('Something went wrong', 'red')\n else:\n self._view.SetMessageLabel(\n '0 books have been selected. Nothing to return', 'red')\n", "step-4": "from Models.AdminPageModel import AdminPageModel\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n\n def BackToAdminPage(self):\n from Views.AdminPage import AdminPage\n self._master.switch_frame(AdminPage, AdminPageModel)\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n\n def GetStudentDebtsAndShowTable(self, text):\n self._view.HideUserInfo()\n if not text:\n self._view.ClearTable()\n self._view.ShowNoDataLabelWithText(\n 'No issues found. Select user first.')\n else:\n self._view.ClearTable()\n info = self._model.GetStudentInfo(text)\n self._view.ShowUserInfo(info[0], info[1], info[2], info[3])\n studentDebts = self._model.GetStudentDebts(text)\n if len(studentDebts) > 0:\n self._view.HideNoDataLabel()\n else:\n self._view.ShowNoDataLabelWithText(\n \"Student don't have issues yet.\")\n self._view.FillTable(studentDebts)\n\n def ReturnBooks(self, idCopies):\n if len(idCopies) > 0:\n try:\n id = self._model.GetStudentId(idCopies[0])\n self._model.ReturnBooks(idCopies)\n self._view.ClearTable()\n studentDebts = self._model.GetStudentDebts(id)\n self._view.FillTable(studentDebts)\n singOrPlural = 'Book'\n if len(idCopies) > 1:\n singOrPlural = 'Books'\n self._view.SetMessageLabel(singOrPlural +\n ' successfully returned', 'green')\n except Exception as e:\n print(e)\n self._view.SetMessageLabel('Something went wrong', 'red')\n else:\n self._view.SetMessageLabel(\n '0 books have been selected. Nothing to return', 'red')\n", "step-5": "\nfrom Models.AdminPageModel import AdminPageModel\n\nclass StudentDebtsController:\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n\n\n def BackToAdminPage(self):\n from Views.AdminPage import AdminPage\n self._master.switch_frame(AdminPage, AdminPageModel)\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n\n def GetStudentDebtsAndShowTable(self, text):\n self._view.HideUserInfo()\n if (not text):\n self._view.ClearTable()\n self._view.ShowNoDataLabelWithText(\"No issues found. Select user first.\")\n else:\n self._view.ClearTable()\n info = self._model.GetStudentInfo(text)\n self._view.ShowUserInfo(info[0], info[1], info[2], info[3])\n studentDebts = self._model.GetStudentDebts(text)\n if (len(studentDebts) > 0):\n self._view.HideNoDataLabel()\n else:\n self._view.ShowNoDataLabelWithText(\"Student don't have issues yet.\")\n self._view.FillTable(studentDebts)\n\n\n def ReturnBooks(self, idCopies):\n if (len(idCopies) > 0):\n try:\n id = self._model.GetStudentId(idCopies[0])\n self._model.ReturnBooks(idCopies)\n self._view.ClearTable()\n studentDebts = self._model.GetStudentDebts(id)\n self._view.FillTable(studentDebts)\n singOrPlural = 'Book'\n if (len(idCopies) > 1):\n singOrPlural = 'Books'\n self._view.SetMessageLabel(singOrPlural + \" successfully returned\" , \"green\")\n except Exception as e:\n print(e)\n self._view.SetMessageLabel(\"Something went wrong\", \"red\")\n else:\n self._view.SetMessageLabel(\"0 books have been selected. Nothing to return\", \"red\")\n\n ", "step-ids": [ 2, 3, 6, 7, 8 ] }
[ 2, 3, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> try: myclient = pymongo.MongoClient('mongodb://localhost:27017/') myclient.server_info() print('Database Connected') except: print('Database Error') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> myclient = {} try: myclient = pymongo.MongoClient('mongodb://localhost:27017/') myclient.server_info() print('Database Connected') except: print('Database Error') mydb = myclient['jmitproject'] user = user(mydb) blog = blog(mydb) <|reserved_special_token_1|> import pymongo from FlaskScripts.database.user_database import user from FlaskScripts.database.blog_database import blog myclient = {} try: myclient = pymongo.MongoClient('mongodb://localhost:27017/') myclient.server_info() print('Database Connected') except: print('Database Error') mydb = myclient['jmitproject'] user = user(mydb) blog = blog(mydb) <|reserved_special_token_1|> import pymongo from FlaskScripts.database.user_database import user from FlaskScripts.database.blog_database import blog myclient = {} try: myclient = pymongo.MongoClient("mongodb://localhost:27017/") myclient.server_info() print('Database Connected') except: print('Database Error') mydb = myclient["jmitproject"] user = user(mydb) # use user for users interaction blog = blog(mydb) # use blog for blogs interaction
flexible
{ "blob_id": "aafdd228cf2859d7f013b088263eab544e19c481", "index": 9995, "step-1": "<mask token>\n", "step-2": "<mask token>\ntry:\n myclient = pymongo.MongoClient('mongodb://localhost:27017/')\n myclient.server_info()\n print('Database Connected')\nexcept:\n print('Database Error')\n<mask token>\n", "step-3": "<mask token>\nmyclient = {}\ntry:\n myclient = pymongo.MongoClient('mongodb://localhost:27017/')\n myclient.server_info()\n print('Database Connected')\nexcept:\n print('Database Error')\nmydb = myclient['jmitproject']\nuser = user(mydb)\nblog = blog(mydb)\n", "step-4": "import pymongo\nfrom FlaskScripts.database.user_database import user\nfrom FlaskScripts.database.blog_database import blog\nmyclient = {}\ntry:\n myclient = pymongo.MongoClient('mongodb://localhost:27017/')\n myclient.server_info()\n print('Database Connected')\nexcept:\n print('Database Error')\nmydb = myclient['jmitproject']\nuser = user(mydb)\nblog = blog(mydb)\n", "step-5": "import pymongo\nfrom FlaskScripts.database.user_database import user\nfrom FlaskScripts.database.blog_database import blog\nmyclient = {}\ntry:\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n myclient.server_info()\n print('Database Connected')\nexcept:\n print('Database Error')\n\nmydb = myclient[\"jmitproject\"]\n\nuser = user(mydb) # use user for users interaction\nblog = blog(mydb) # use blog for blogs interaction\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# coding: utf-8 """ Upbit Open API ## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [open-api@upbit.com] # noqa: E501 OpenAPI spec version: 1.0.0 Contact: ujhin942@gmail.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class DepositCompleteResponse(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'currency': 'str', 'deposit_address': 'str', 'secondary_address': 'str' } attribute_map = { 'currency': 'currency', 'deposit_address': 'deposit_address', 'secondary_address': 'secondary_address' } def __init__(self, currency=None, deposit_address=None, secondary_address=None): # noqa: E501 """DepositCompleteResponse - a model defined in Swagger""" # noqa: E501 self._currency = None self._deposit_address = None self._secondary_address = None self.discriminator = None if currency is not None: self.currency = currency if deposit_address is not None: self.deposit_address = deposit_address if secondary_address is not None: self.secondary_address = secondary_address @property def currency(self): """Gets the currency of this DepositCompleteResponse. # noqa: E501 화폐를 의미하는 영문 대문자 코드 # noqa: E501 :return: The currency of this DepositCompleteResponse. # noqa: E501 :rtype: str """ return self._currency @currency.setter def currency(self, currency): """Sets the currency of this DepositCompleteResponse. 화폐를 의미하는 영문 대문자 코드 # noqa: E501 :param currency: The currency of this DepositCompleteResponse. # noqa: E501 :type: str """ self._currency = currency @property def deposit_address(self): """Gets the deposit_address of this DepositCompleteResponse. # noqa: E501 입금 주소 # noqa: E501 :return: The deposit_address of this DepositCompleteResponse. # noqa: E501 :rtype: str """ return self._deposit_address @deposit_address.setter def deposit_address(self, deposit_address): """Sets the deposit_address of this DepositCompleteResponse. 입금 주소 # noqa: E501 :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501 :type: str """ self._deposit_address = deposit_address @property def secondary_address(self): """Gets the secondary_address of this DepositCompleteResponse. # noqa: E501 2차 입금 주소 # noqa: E501 :return: The secondary_address of this DepositCompleteResponse. # noqa: E501 :rtype: str """ return self._secondary_address @secondary_address.setter def secondary_address(self, secondary_address): """Sets the secondary_address of this DepositCompleteResponse. 2차 입금 주소 # noqa: E501 :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501 :type: str """ self._secondary_address = secondary_address def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DepositCompleteResponse, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DepositCompleteResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
normal
{ "blob_id": "b9bd1c0f4a5d2e6eeb75ba4f27d33ad5fb22530e", "index": 557, "step-1": "<mask token>\n\n\nclass DepositCompleteResponse(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n <mask token>\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <mask token>\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <mask token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-2": "<mask token>\n\n\nclass DepositCompleteResponse(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n <mask token>\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <mask token>\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n <mask token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-3": "<mask token>\n\n\nclass DepositCompleteResponse(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n <mask token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-4": "<mask token>\nimport pprint\nimport re\nimport six\n\n\nclass DepositCompleteResponse(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'currency': 'str', 'deposit_address': 'str',\n 'secondary_address': 'str'}\n attribute_map = {'currency': 'currency', 'deposit_address':\n 'deposit_address', 'secondary_address': 'secondary_address'}\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-5": "# coding: utf-8\n\n\"\"\"\n Upbit Open API\n\n ## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [open-api@upbit.com] # noqa: E501\n\n OpenAPI spec version: 1.0.0\n Contact: ujhin942@gmail.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass DepositCompleteResponse(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'currency': 'str',\n 'deposit_address': 'str',\n 'secondary_address': 'str'\n }\n\n attribute_map = {\n 'currency': 'currency',\n 'deposit_address': 'deposit_address',\n 'secondary_address': 'secondary_address'\n }\n\n def __init__(self, currency=None, deposit_address=None, secondary_address=None): # noqa: E501\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\" # noqa: E501\n\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-ids": [ 6, 9, 11, 16, 17 ] }
[ 6, 9, 11, 16, 17 ]
######################################## __author__ = "Abdelrahman Eldesokey" __license__ = "GNU GPLv3" __version__ = "0.1" __maintainer__ = "Abdelrahman Eldesokey" __email__ = "abdo.eldesokey@gmail.com" ######################################## import torch import torch.nn.functional as F import torch.nn as nn from torch.nn.modules.conv import _ConvNd from torch.nn.modules.utils import _pair from torch.nn.parameter import Parameter import numpy as np from scipy.stats import poisson from scipy import signal from utils.util import retrieve_elements_from_indices # The proposed Normalized Convolution Layer class NConv2d(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus', init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) # Call _ConvNd constructor super(NConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode) self.eps = 1e-20 self.pos_fn = pos_fn self.init_method = init_method # Initialize weights and bias self.init_parameters() if self.pos_fn is not None: EnforcePos.apply(self, 'weight', pos_fn) def forward(self, data, conf): # Normalized Convolution denom = F.conv2d(conf, self.weight, None, self.stride, self.padding, self.dilation, self.groups) nomin = F.conv2d(data*conf, self.weight, None, self.stride, self.padding, self.dilation, self.groups) nconv = nomin / (denom+self.eps) # Add bias b = self.bias sz = b.size(0) b = b.view(1,sz,1,1) b = b.expand_as(nconv) nconv += b # Propagate confidence cout = denom sz = cout.size() cout = cout.view(sz[0], sz[1], -1) k = self.weight k_sz = k.size() k = k.view(k_sz[0], -1) s = torch.sum(k, dim=-1, keepdim=True) cout = cout / s cout = cout.view(sz) return nconv, cout def init_parameters(self): # Init weights if self.init_method == 'x': # Xavier torch.nn.init.xavier_uniform_(self.weight) elif self.init_method == 'k': # Kaiming torch.nn.init.kaiming_uniform_(self.weight) elif self.init_method == 'p': # Poisson mu=self.kernel_size[0]/2 dist = poisson(mu) x = np.arange(0, self.kernel_size[0]) y = np.expand_dims(dist.pmf(x),1) w = signal.convolve2d(y, y.transpose(), 'full') w = torch.tensor(w).type_as(self.weight) w = torch.unsqueeze(w,0) w = torch.unsqueeze(w,1) w = w.repeat(self.out_channels, 1, 1, 1) w = w.repeat(1, self.in_channels, 1, 1) self.weight.data = w + torch.rand(w.shape) # Init bias self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01) # My modification is in this class # Non-negativity enforcement class class EnforcePos(object): def __init__(self, pos_fn, name): self.name = name self.pos_fn = pos_fn @staticmethod def apply(module, name, pos_fn): fn = EnforcePos(pos_fn, name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name+'_pre', Parameter(weight.data)) setattr(module, name, fn._pos(getattr(module, name+'_pre'))) module.register_forward_pre_hook(fn) return fn def __call__(self, module, inputs): #if module.training: # weight = getattr(module, self.name) #del module._parameters[self.name] pos_weight = self._pos(getattr(module, self.name+'_pre')) setattr(module, self.name, pos_weight) #else: # pass def _pos(self, p): pos_fn = self.pos_fn.lower() if pos_fn == 'softmax': p_sz = p.size() p = p.view(p_sz[0],p_sz[1], -1) p = F.softmax(p, -1) return p.view(p_sz) elif pos_fn == 'exp': return torch.exp(p) elif pos_fn == 'softplus': return F.softplus(p, beta=10) elif pos_fn == 'sigmoid': return F.sigmoid(p) else: print('Undefined positive function!') return class NormCNN(nn.Module): def __init__(self, pos_fn=None, num_channels=2): super().__init__() self.pos_fn = pos_fn self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2) self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2) self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2) self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1) self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1) self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1) self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k') def forward(self, x0, c0): x1, c1 = self.nconv1(x0, c0) x1, c1 = self.nconv2(x1, c1) x1, c1 = self.nconv3(x1, c1) # Downsample 1 ds = 2 c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True) x1_ds = retrieve_elements_from_indices(x1, idx) c1_ds /= 4 x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds) x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds) # Downsample 2 ds = 2 c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True) x2_dss = retrieve_elements_from_indices(x2_ds, idx) c2_dss /= 4 x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss) # Downsample 3 ds = 2 c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True) x3_dss = retrieve_elements_from_indices(x3_ds, idx) c3_dss /= 4 x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss) # Upsample 1 x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest') c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest') x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((c3_ds, c4), 1)) # Upsample 2 x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest') c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest') x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat((c2_ds, c34), 1)) # Upsample 3 x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest') c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest') xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23, c1), 1)) xout, cout = self.nconv7(xout, cout) return xout, cout class PretrainedCNN(nn.Module): def __init__(self, pos_fn=None, num_channels=2): super().__init__() self.pos_fn = pos_fn self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True) self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True) self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True) self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True) self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True) self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True) self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True) self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01) self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01) self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01) self.bias4 = nn.Parameter(torch.zeros(1) + 0.01) self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01) self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01) self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01) def forward(self, x0, c0): x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1) x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2) x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3) ds = 2 c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True) x1_ds = torch.zeros(c1_ds.size()).cuda() for i in range(x1_ds.size(0)): for j in range(x1_ds.size(1)): x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:]) c1_ds /= 4 x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2) x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3) ds = 2 c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True) x2_dss = torch.zeros(c2_dss.size()).cuda() for i in range(x2_dss.size(0)): for j in range(x2_dss.size(1)): x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:]) c2_dss /= 4 x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2) # x3_ds, c3_ds = self.navg_forward(self.navg3, c3_ds, x3_ds, self.bias3) ds = 2 c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True) x3_dss = torch.zeros(c3_dss.size()).cuda() for i in range(x3_dss.size(0)): for j in range(x3_dss.size(1)): x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:]) c3_dss /= 4 x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2) x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest') c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest') x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds, c4), 1), torch.cat((x3_ds, x4), 1), self.bias34) x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest') c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest') x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds, c34), 1), torch.cat((x2_ds, x34), 1), self.bias23) x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest') c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest') xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1), torch.cat((x23, x1), 1), self.bias12) xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4) return xout, cout def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False): # Normalized Averaging ca = navg(c) xout = torch.div(navg(x * c), ca + eps) # Add bias sz = b.size(0) b = b.view(1, sz, 1, 1) b = b.expand_as(xout) xout = xout + b if restore: cm = (c == 0).float() xout = torch.mul(xout, cm) + torch.mul(1 - cm, x) # Propagate confidence # cout = torch.ne(ca, 0).float() cout = ca sz = cout.size() cout = cout.view(sz[0], sz[1], -1) k = navg.weight k_sz = k.size() k = k.view(k_sz[0], -1) s = torch.sum(k, dim=-1, keepdim=True) cout = cout / s cout = cout.view(sz) k = k.view(k_sz) return xout, cout def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1, out_channels=1, initalizer='x', pos=False, groups=1): navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2, kernel_size[1] // 2), bias=False, groups=groups) weights = navg.weight if initalizer == 'x': # Xavier torch.nn.init.xavier_uniform(weights) elif initalizer == 'k': torch.nn.init.kaiming_uniform(weights) elif initalizer == 'p': mu = kernel_size[0] / 2 dist = poisson(mu) x = np.arange(0, kernel_size[0]) y = np.expand_dims(dist.pmf(x), 1) w = signal.convolve2d(y, y.transpose(), 'full') w = torch.from_numpy(w).float().cuda() w = torch.unsqueeze(w, 0) w = torch.unsqueeze(w, 1) w = w.repeat(out_channels, 1, 1, 1) w = w.repeat(1, in_channels, 1, 1) weights.data = w + torch.rand(w.shape).cuda() return navg if __name__ == '__main__': ncnn = NormCNN(pos_fn='softplus') print(ncnn.__str__())
normal
{ "blob_id": "64b4deaad548a38ba646423d33fc6a985483a042", "index": 3592, "step-1": "<mask token>\n\n\nclass NConv2d(_ConvNd):\n <mask token>\n <mask token>\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass NConv2d(_ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn=\n 'softplus', init_method='k', stride=1, padding=0, dilation=1,\n groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(NConv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride, padding, dilation, False, _pair(0), groups,\n bias, padding_mode)\n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n\n def forward(self, data, conf):\n denom = F.conv2d(conf, self.weight, None, self.stride, self.padding,\n self.dilation, self.groups)\n nomin = F.conv2d(data * conf, self.weight, None, self.stride, self.\n padding, self.dilation, self.groups)\n nconv = nomin / (denom + self.eps)\n b = self.bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(nconv)\n nconv += b\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n return nconv, cout\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass NConv2d(_ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn=\n 'softplus', init_method='k', stride=1, padding=0, dilation=1,\n groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(NConv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride, padding, dilation, False, _pair(0), groups,\n bias, padding_mode)\n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n\n def forward(self, data, conf):\n denom = F.conv2d(conf, self.weight, None, self.stride, self.padding,\n self.dilation, self.groups)\n nomin = F.conv2d(data * conf, self.weight, None, self.stride, self.\n padding, self.dilation, self.groups)\n nconv = nomin / (denom + self.eps)\n b = self.bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(nconv)\n nconv += b\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n return nconv, cout\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\nif __name__ == '__main__':\n ncnn = NormCNN(pos_fn='softplus')\n print(ncnn.__str__())\n", "step-4": "__author__ = 'Abdelrahman Eldesokey'\n__license__ = 'GNU GPLv3'\n__version__ = '0.1'\n__maintainer__ = 'Abdelrahman Eldesokey'\n__email__ = 'abdo.eldesokey@gmail.com'\n<mask token>\n\n\nclass NConv2d(_ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn=\n 'softplus', init_method='k', stride=1, padding=0, dilation=1,\n groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(NConv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride, padding, dilation, False, _pair(0), groups,\n bias, padding_mode)\n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n\n def forward(self, data, conf):\n denom = F.conv2d(conf, self.weight, None, self.stride, self.padding,\n self.dilation, self.groups)\n nomin = F.conv2d(data * conf, self.weight, None, self.stride, self.\n padding, self.dilation, self.groups)\n nconv = nomin / (denom + self.eps)\n b = self.bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(nconv)\n nconv += b\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n return nconv, cout\n\n def init_parameters(self):\n if self.init_method == 'x':\n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k':\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p':\n mu = self.kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels) + 0.01)\n\n\nclass EnforcePos(object):\n\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name + '_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name + '_pre')))\n module.register_forward_pre_hook(fn)\n return fn\n\n def __call__(self, module, inputs):\n pos_weight = self._pos(getattr(module, self.name + '_pre'))\n setattr(module, self.name, pos_weight)\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0], p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn,\n 'p', padding=2)\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3),\n pos_fn, 'p', padding=1)\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((\n c3_ds, c4), 1))\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat(\n (c2_ds, c34), 1))\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23,\n c1), 1))\n xout, cout = self.nconv7(xout, cout)\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n self.pos_fn = pos_fn\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels,\n 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels,\n num_channels, 'p', True)\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :]\n .view(-1)].view(idx.size()[2:])\n c1_ds /= 4\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2\n )\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :,\n :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2\n )\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds,\n c4), 1), torch.cat((x3_ds, x4), 1), self.bias34)\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds,\n c34), 1), torch.cat((x2_ds, x34), 1), self.bias23)\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1),\n torch.cat((x23, x1), 1), self.bias12)\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n cout = cout / s\n cout = cout.view(sz)\n k = k.view(k_sz)\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1,\n out_channels=1, initalizer='x', pos=False, groups=1):\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=1, padding=(kernel_size[0] // 2,\n kernel_size[1] // 2), bias=False, groups=groups)\n weights = navg.weight\n if initalizer == 'x':\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n return navg\n\n\nif __name__ == '__main__':\n ncnn = NormCNN(pos_fn='softplus')\n print(ncnn.__str__())\n", "step-5": "########################################\n__author__ = \"Abdelrahman Eldesokey\"\n__license__ = \"GNU GPLv3\"\n__version__ = \"0.1\"\n__maintainer__ = \"Abdelrahman Eldesokey\"\n__email__ = \"abdo.eldesokey@gmail.com\"\n########################################\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.nn.modules.conv import _ConvNd\nfrom torch.nn.modules.utils import _pair\nfrom torch.nn.parameter import Parameter\nimport numpy as np\nfrom scipy.stats import poisson\nfrom scipy import signal\n\nfrom utils.util import retrieve_elements_from_indices\n\n# The proposed Normalized Convolution Layer\nclass NConv2d(_ConvNd):\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus',\n init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n # Call _ConvNd constructor\n super(NConv2d, self).__init__(in_channels, out_channels, kernel_size,\n stride, padding, dilation, False, _pair(0), groups, bias, padding_mode)\n \n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n \n # Initialize weights and bias\n self.init_parameters()\n if self.pos_fn is not None:\n EnforcePos.apply(self, 'weight', pos_fn)\n \n def forward(self, data, conf):\n # Normalized Convolution\n denom = F.conv2d(conf, self.weight, None, self.stride,\n self.padding, self.dilation, self.groups) \n nomin = F.conv2d(data*conf, self.weight, None, self.stride,\n self.padding, self.dilation, self.groups) \n nconv = nomin / (denom+self.eps)\n\n # Add bias\n b = self.bias\n sz = b.size(0)\n b = b.view(1,sz,1,1)\n b = b.expand_as(nconv)\n nconv += b\n \n # Propagate confidence\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n \n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True) \n\n cout = cout / s\n cout = cout.view(sz)\n \n return nconv, cout\n\n def init_parameters(self):\n # Init weights\n if self.init_method == 'x': # Xavier \n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k': # Kaiming\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p': # Poisson\n mu=self.kernel_size[0]/2 \n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x),1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w,0)\n w = torch.unsqueeze(w,1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n \n # Init bias\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)\n \n# My modification is in this class\n# Non-negativity enforcement class \nclass EnforcePos(object):\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n weight = getattr(module, name)\n del module._parameters[name]\n module.register_parameter(name+'_pre', Parameter(weight.data))\n setattr(module, name, fn._pos(getattr(module, name+'_pre')))\n module.register_forward_pre_hook(fn) \n\n return fn\n\n def __call__(self, module, inputs):\n #if module.training:\n # weight = getattr(module, self.name)\n #del module._parameters[self.name]\n pos_weight = self._pos(getattr(module, self.name+'_pre'))\n setattr(module, self.name, pos_weight)\n #else:\n # pass\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0],p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return\n\n\nclass NormCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n\n self.pos_fn = pos_fn\n\n self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)\n self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)\n\n self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)\n self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)\n self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)\n\n self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')\n\n def forward(self, x0, c0):\n\n x1, c1 = self.nconv1(x0, c0)\n x1, c1 = self.nconv2(x1, c1)\n x1, c1 = self.nconv3(x1, c1)\n\n # Downsample 1\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = retrieve_elements_from_indices(x1, idx)\n c1_ds /= 4\n\n x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)\n x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)\n\n # Downsample 2\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n x2_dss = retrieve_elements_from_indices(x2_ds, idx)\n c2_dss /= 4\n\n x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)\n\n # Downsample 3\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n x3_dss = retrieve_elements_from_indices(x3_ds, idx)\n c3_dss /= 4\n x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)\n\n # Upsample 1\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((c3_ds, c4), 1))\n\n # Upsample 2\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat((c2_ds, c34), 1))\n\n # Upsample 3\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23, c1), 1))\n\n xout, cout = self.nconv7(xout, cout)\n\n return xout, cout\n\n\nclass PretrainedCNN(nn.Module):\n\n def __init__(self, pos_fn=None, num_channels=2):\n super().__init__()\n\n self.pos_fn = pos_fn\n\n self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)\n self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)\n self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)\n self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)\n\n self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)\n self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)\n self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)\n\n self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)\n\n self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)\n\n def forward(self, x0, c0):\n\n x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)\n\n x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)\n\n x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)\n\n ds = 2\n c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)\n x1_ds = torch.zeros(c1_ds.size()).cuda()\n for i in range(x1_ds.size(0)):\n for j in range(x1_ds.size(1)):\n x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])\n\n c1_ds /= 4\n\n x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)\n\n x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)\n\n ds = 2\n c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)\n\n x2_dss = torch.zeros(c2_dss.size()).cuda()\n for i in range(x2_dss.size(0)):\n for j in range(x2_dss.size(1)):\n x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])\n c2_dss /= 4\n\n x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2)\n\n # x3_ds, c3_ds = self.navg_forward(self.navg3, c3_ds, x3_ds, self.bias3)\n\n ds = 2\n c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)\n\n x3_dss = torch.zeros(c3_dss.size()).cuda()\n for i in range(x3_dss.size(0)):\n for j in range(x3_dss.size(1)):\n x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])\n c3_dss /= 4\n\n x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2)\n\n x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')\n c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')\n\n x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds, c4), 1), torch.cat((x3_ds, x4), 1),\n self.bias34)\n\n x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')\n c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')\n\n x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds, c34), 1), torch.cat((x2_ds, x34), 1),\n self.bias23)\n\n x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')\n c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')\n\n xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1), torch.cat((x23, x1), 1), self.bias12)\n\n xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)\n\n return xout, cout\n\n def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):\n\n # Normalized Averaging\n ca = navg(c)\n xout = torch.div(navg(x * c), ca + eps)\n\n # Add bias\n sz = b.size(0)\n b = b.view(1, sz, 1, 1)\n b = b.expand_as(xout)\n xout = xout + b\n\n if restore:\n cm = (c == 0).float()\n xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)\n\n # Propagate confidence\n # cout = torch.ne(ca, 0).float()\n cout = ca\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n\n k = navg.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True)\n\n cout = cout / s\n\n cout = cout.view(sz)\n k = k.view(k_sz)\n\n return xout, cout\n\n def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1, out_channels=1, initalizer='x', pos=False,\n groups=1):\n\n navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,\n padding=(kernel_size[0] // 2, kernel_size[1] // 2), bias=False, groups=groups)\n\n weights = navg.weight\n\n if initalizer == 'x': # Xavier\n torch.nn.init.xavier_uniform(weights)\n elif initalizer == 'k':\n torch.nn.init.kaiming_uniform(weights)\n elif initalizer == 'p':\n mu = kernel_size[0] / 2\n dist = poisson(mu)\n x = np.arange(0, kernel_size[0])\n y = np.expand_dims(dist.pmf(x), 1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.from_numpy(w).float().cuda()\n w = torch.unsqueeze(w, 0)\n w = torch.unsqueeze(w, 1)\n w = w.repeat(out_channels, 1, 1, 1)\n w = w.repeat(1, in_channels, 1, 1)\n weights.data = w + torch.rand(w.shape).cuda()\n\n return navg\n\n\nif __name__ == '__main__':\n ncnn = NormCNN(pos_fn='softplus')\n print(ncnn.__str__())\n", "step-ids": [ 15, 17, 18, 19, 21 ] }
[ 15, 17, 18, 19, 21 ]
# -*- coding: utf-8 -*- ############################################################################### # This file is part of metalibm (https://github.com/kalray/metalibm) ############################################################################### # MIT License # # Copyright (c) 2018 Kalray # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ############################################################################### # Author(s): Nicolas Brunie (nbrunie@kalray.eu) # Created: Aug 8th, 2017 # last-modified: Mar 7th, 2018 ############################################################################### from functools import reduce from metalibm_core.core.ml_formats import ML_Bool from metalibm_core.core.ml_operations import ( ML_LeafNode, Comparison, BooleanOperation, is_leaf_node, LogicalAnd, LogicalOr, Constant, BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift, ) from metalibm_core.core.advanced_operations import PlaceHolder from metalibm_core.core.ml_table import ML_NewTable from metalibm_core.utility.log_report import Log def evaluate_comparison_range(node): """ evaluate the numerical range of Comparison node, if any else returns None """ return None def is_comparison(node): """ test if node is a Comparison node or not """ return isinstance(node, Comparison) LOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel("EvaluateRangeVerbose") ## Assuming @p optree has no pre-defined range, recursively compute a range # from the node inputs def evaluate_range(optree, update_interval=False, memoization_map=None): """ evaluate the range of an Operation node Args: optree (ML_Operation): input Node Return: sollya Interval: evaluated range of optree or None if no range could be determined """ if memoization_map is None: memoization_map = {} init_interval = optree.get_interval() if not init_interval is None: return init_interval else: if optree in memoization_map: return memoization_map[optree] elif isinstance(optree, ML_LeafNode): op_range = optree.get_interval() elif is_comparison(optree): op_range = evaluate_comparison_range(optree) if update_interval: optree.set_interval(op_range) elif isinstance(optree, PlaceHolder): op_range = evaluate_range(optree.get_input(0), update_interval=update_interval, memoization_map=memoization_map) if update_interval: optree.set_interval(op_range) else: args_interval = tuple( evaluate_range(op, update_interval=update_interval, memoization_map=memoization_map ) for op in optree.get_inputs()) args_interval_map = {op: op_interval for op, op_interval in zip(optree.inputs, args_interval)} # evaluate_range cannot rely on bare_range_function only as some # operations (e.g. CountLeadingZeros) do not base interval computation # on their inputs' intervals but on other parameters ops_interval_get = lambda op: args_interval_map[op] op_range = optree.range_function(optree.inputs, ops_interval_getter=ops_interval_get) if update_interval: optree.set_interval(op_range) Log.report(LOG_VERBOSE_EVALUATE_RANGE, "range of {} is {}", optree, op_range) memoization_map[optree] = op_range return op_range def forward_attributes(src, dst): """ forward compatible attributes from src node to dst node :param src: source source for attributes values :type src: ML_Operation :param dst: destination node for attributes copies :type dst: ML_Operation """ dst.set_tag(src.get_tag()) dst.set_debug(src.get_debug()) dst.set_handle(src.get_handle()) if hasattr(src.attributes, "init_stage"): forward_stage_attributes(src, dst) if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation): dst.likely = src.likely def forward_stage_attributes(src, dst): """ copy node's stage attributes from src node to dst node """ dst.attributes.init_stage = src.attributes.init_stage def depth_node_ordering(start_node, end_nodes): """ order the node between root start_node end end_nodes by depth (root first, starting with start_node) :param start_node: root of the sort (first node) :type start_node: ML_Operation :param end_nodes: nodes where the depth sort must end :type end_nodes: iterator over ML_Operation :return: depth ordered list of nodes :rtype: list(ML_Operation) """ ordered_list = [] ordered_set = set() working_list = [start_node] while working_list != []: node = working_list.pop(0) if not node in ordered_set: ordered_set.add(node) ordered_list.append(node) if not is_leaf_node(node) and not node in end_nodes: for node_op in node.get_inputs(): working_list.append(node_op) return ordered_list def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw): """ Logical/Boolean operand list reduction """ local_list = [node for node in op_list] while len(local_list) > 1: op0 = local_list.pop(0) op1 = local_list.pop(0) local_list.append( op_ctor(op0, op1, precision=precision) ) # assigning attributes to the resulting node result = local_list[0] result.set_attributes(**kw) return result ## Specialization of logical reduce to OR operation logical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr, ML_Bool, **kw) ## Specialization of logical reduce to AND operation logical_and_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalAnd, ML_Bool, **kw) def uniform_list_check(value_list): """ Check that value_list is made of only a single value replicated in each element """ return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True) def uniform_vector_constant_check(optree): """ check whether optree is a uniform vector constant """ if isinstance(optree, Constant) and not optree.get_precision() is None \ and optree.get_precision().is_vector_format(): return uniform_list_check(optree.get_value()) return False def uniform_shift_check(optree): """ check whether optree is a bit shift by a uniform vector constant """ if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift)): return uniform_vector_constant_check(optree.get_input(1)) \ or not optree.get_input(1).get_precision().is_vector_format() return False def is_false(node): """ check if node is a Constant node whose value is equal to boolean False """ return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False) def is_true(node): """ check if node is a Constant node whose value is equal to boolean True """ return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True) def is_scalar_cst(node, value): """ check if node is a constant node with value equals to value """ return isinstance(node, Constant) and not node.get_precision().is_vector_format() and node.get_value() == value def is_vector_uniform_cst(node, scalar_value): """ check if node is a vector constant node with each value equals to scalar_value """ return isinstance(node, Constant) and node.get_precision().is_vector_format() and node.get_value() == [scalar_value] * node.get_precision().get_vector_size() def extract_tables(node): """ extract the set of all ML_Table nodes in the graph rooted at node """ processed_set = set([node]) table_set = set() working_set = [node] while working_set: elt = working_set.pop(0) if isinstance(elt, ML_NewTable): table_set.add(elt) elif not isinstance(elt, ML_LeafNode): for op_node in elt.inputs: if not op_node in processed_set: processed_set.add(op_node) working_set.append(op_node) return table_set
normal
{ "blob_id": "3a05ebee8e70321fe53637b4792f5821ce7044be", "index": 4264, "step-1": "<mask token>\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\n<mask token>\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\n<mask token>\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\n<mask token>\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n", "step-2": "<mask token>\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\n<mask token>\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\n<mask token>\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision(\n ) is None and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n", "step-3": "<mask token>\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\nLOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel('EvaluateRangeVerbose')\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\nlogical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr,\n ML_Bool, **kw)\nlogical_and_reduce = lambda op_list, **kw: logical_reduce(op_list,\n LogicalAnd, ML_Bool, **kw)\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision(\n ) is None and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n", "step-4": "from functools import reduce\nfrom metalibm_core.core.ml_formats import ML_Bool\nfrom metalibm_core.core.ml_operations import ML_LeafNode, Comparison, BooleanOperation, is_leaf_node, LogicalAnd, LogicalOr, Constant, BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift\nfrom metalibm_core.core.advanced_operations import PlaceHolder\nfrom metalibm_core.core.ml_table import ML_NewTable\nfrom metalibm_core.utility.log_report import Log\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\nLOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel('EvaluateRangeVerbose')\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\nlogical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr,\n ML_Bool, **kw)\nlogical_and_reduce = lambda op_list, **kw: logical_reduce(op_list,\n LogicalAnd, ML_Bool, **kw)\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision(\n ) is None and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n", "step-5": "# -*- coding: utf-8 -*-\n\n###############################################################################\n# This file is part of metalibm (https://github.com/kalray/metalibm)\n###############################################################################\n# MIT License\n#\n# Copyright (c) 2018 Kalray\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n###############################################################################\n# Author(s): Nicolas Brunie (nbrunie@kalray.eu)\n# Created: Aug 8th, 2017\n# last-modified: Mar 7th, 2018\n###############################################################################\n\nfrom functools import reduce\n\nfrom metalibm_core.core.ml_formats import ML_Bool\nfrom metalibm_core.core.ml_operations import (\n ML_LeafNode, Comparison, BooleanOperation,\n is_leaf_node,\n LogicalAnd, LogicalOr, Constant,\n BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift,\n)\nfrom metalibm_core.core.advanced_operations import PlaceHolder\nfrom metalibm_core.core.ml_table import ML_NewTable\n\nfrom metalibm_core.utility.log_report import Log\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\nLOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel(\"EvaluateRangeVerbose\")\n\n## Assuming @p optree has no pre-defined range, recursively compute a range\n# from the node inputs\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0),\n update_interval=update_interval,\n memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(\n evaluate_range(op, update_interval=update_interval,\n memoization_map=memoization_map\n ) for op in optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip(optree.inputs, args_interval)}\n # evaluate_range cannot rely on bare_range_function only as some\n # operations (e.g. CountLeadingZeros) do not base interval computation\n # on their inputs' intervals but on other parameters\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, \"range of {} is {}\", optree, op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, \"init_stage\"):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(\n op_ctor(op0, op1, precision=precision)\n )\n # assigning attributes to the resulting node\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n## Specialization of logical reduce to OR operation\nlogical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr, ML_Bool, **kw)\n## Specialization of logical reduce to AND operation\nlogical_and_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalAnd, ML_Bool, **kw)\n\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision() is None \\\n and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)) \\\n or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision().is_vector_format() and node.get_value() == value\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision().is_vector_format() and node.get_value() == [scalar_value] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n", "step-ids": [ 14, 15, 16, 17, 18 ] }
[ 14, 15, 16, 17, 18 ]
# -*- coding: utf-8 -*- from flask import Blueprint, render_template, flash, redirect, url_for from flask_login import login_required, current_user from ..extensions import db from .forms import MyTaskForm from .models import MyTaskModel tasks = Blueprint('tasks', __name__, url_prefix='/tasks') @tasks.route('/my_tasks', methods=['GET', 'POST']) @login_required def my_tasks(): _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all() return render_template('tasks/my_tasks.html', all_tasks=_all_tasks, _active_tasks=True) @tasks.route('/view_task/<id>', methods=['GET', 'POST']) @login_required def view_task(id): _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first() if not _task: flash('Oops! Something went wrong!.', 'danger') return redirect(url_for("tasks.my_tasks")) return render_template('tasks/view_task.html', task=_task) @tasks.route('/add_task', methods=['GET', 'POST']) @login_required def add_task(): _task = MyTaskModel() _form = MyTaskForm() if _form.validate_on_submit(): _task.users_id = current_user.id _form.populate_obj(_task) db.session.add(_task) db.session.commit() db.session.refresh(_task) flash('Your task is added successfully!', 'success') return redirect(url_for("tasks.my_tasks")) return render_template('tasks/add_task.html', form=_form, _active_tasks=True) @tasks.route('/delete_task/<id>', methods=['GET', 'POST']) @login_required def delete_task(id): _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first() if not _task: flash('Oops! Something went wrong!.', 'danger') return redirect(url_for("tasks.my_tasks")) db.session.delete(_task) db.session.commit() flash('Your task is deleted successfully!', 'success') return redirect(url_for('tasks.my_tasks')) @tasks.route('/edit_task/<id>', methods=['GET', 'POST']) @login_required def edit_task(id): _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first() if not _task: flash('Oops! Something went wrong!.', 'danger') return redirect(url_for("tasks.my_tasks")) _form = MyTaskForm(obj=_task) if _form.validate_on_submit(): _task.users_id = current_user.id _form.populate_obj(_task) db.session.add(_task) db.session.commit() flash('Your task updated successfully!', 'success') return redirect(url_for("tasks.my_tasks")) return render_template('tasks/edit_task.html', form=_form, task=_task, _active_tasks=True)
normal
{ "blob_id": "7882504f08e871f2610ff633608eb3d380179041", "index": 1735, "step-1": "<mask token>\n\n\n@tasks.route('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\n<mask token>\n\n\n@tasks.route('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\n@tasks.route('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\n@tasks.route('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-2": "<mask token>\n\n\n@tasks.route('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\n@tasks.route('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/view_task.html', task=_task)\n\n\n@tasks.route('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\n@tasks.route('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\n@tasks.route('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-3": "<mask token>\ntasks = Blueprint('tasks', __name__, url_prefix='/tasks')\n\n\n@tasks.route('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\n@tasks.route('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/view_task.html', task=_task)\n\n\n@tasks.route('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\n@tasks.route('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\n@tasks.route('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-4": "from flask import Blueprint, render_template, flash, redirect, url_for\nfrom flask_login import login_required, current_user\nfrom ..extensions import db\nfrom .forms import MyTaskForm\nfrom .models import MyTaskModel\ntasks = Blueprint('tasks', __name__, url_prefix='/tasks')\n\n\n@tasks.route('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\n@tasks.route('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/view_task.html', task=_task)\n\n\n@tasks.route('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\n@tasks.route('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\n@tasks.route('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-5": "# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, render_template, flash, redirect, url_for\nfrom flask_login import login_required, current_user\n\nfrom ..extensions import db\n\nfrom .forms import MyTaskForm\nfrom .models import MyTaskModel\n\n\ntasks = Blueprint('tasks', __name__, url_prefix='/tasks')\n\n\n@tasks.route('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n\n return render_template('tasks/my_tasks.html',\n all_tasks=_all_tasks,\n _active_tasks=True)\n\n\n@tasks.route('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first()\n\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n return render_template('tasks/view_task.html',\n task=_task)\n\n\n@tasks.route('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n\n _task = MyTaskModel()\n\n _form = MyTaskForm()\n\n if _form.validate_on_submit():\n\n _task.users_id = current_user.id\n\n _form.populate_obj(_task)\n\n db.session.add(_task)\n db.session.commit()\n\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n return render_template('tasks/add_task.html', form=_form, _active_tasks=True)\n\n\n@tasks.route('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first()\n\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n db.session.delete(_task)\n db.session.commit()\n\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\n@tasks.route('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first()\n\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n _form = MyTaskForm(obj=_task)\n\n if _form.validate_on_submit():\n\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n\n db.session.add(_task)\n db.session.commit()\n\n flash('Your task updated successfully!', 'success')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n return render_template('tasks/edit_task.html', form=_form, task=_task, _active_tasks=True)\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> omniORB.updateModule('managedElementManager') <|reserved_special_token_0|> <|reserved_special_token_1|> import omniORB omniORB.updateModule('managedElementManager') import managedElementManager_idl <|reserved_special_token_1|> # DO NOT EDIT THIS FILE! # # Python module managedElementManager generated by omniidl import omniORB omniORB.updateModule("managedElementManager") # ** 1. Stub files contributing to this module import managedElementManager_idl # ** 2. Sub-modules # ** 3. End
flexible
{ "blob_id": "7727896d4e1b2b415c398b206f9fb7e228e6f26d", "index": 8602, "step-1": "<mask token>\n", "step-2": "<mask token>\nomniORB.updateModule('managedElementManager')\n<mask token>\n", "step-3": "import omniORB\nomniORB.updateModule('managedElementManager')\nimport managedElementManager_idl\n", "step-4": "# DO NOT EDIT THIS FILE!\n#\n# Python module managedElementManager generated by omniidl\n\nimport omniORB\nomniORB.updateModule(\"managedElementManager\")\n\n# ** 1. Stub files contributing to this module\nimport managedElementManager_idl\n\n# ** 2. Sub-modules\n\n# ** 3. End\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pygame import os from network import Network from card import Card from game import Game, Player pygame.font.init() # Initializing window WIDTH, HEIGHT = 700, 800 WIN = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("Zole") CARD_WIDTH = 60 ############################## Uploading cards def get_card_size(card_width, image): card_height = image.get_height() / (image.get_width()/card_width) return round(card_height) CARD_IMAGE_BACK_GRAY = pygame.image.load( os.path.join("images", "gray_back.png")) CARD_HEIGHT = get_card_size(CARD_WIDTH, CARD_IMAGE_BACK_GRAY) # Uploading backside of cards CARD_IMAGE_BACK_GRAY = pygame.transform.scale( CARD_IMAGE_BACK_GRAY, (CARD_WIDTH, CARD_HEIGHT)) # Uploading all the cards def upload_card_images(card_name): card_n = pygame.image.load(os.path.join("images", card_name + ".png")) card_n = pygame.transform.scale( card_n, (CARD_WIDTH, CARD_HEIGHT)) return card_n CARD_NAMES = ["AC", "AH", "AS", "AD", "KS", "KH", "KD", "KC", "QS", "QH", "QD", "QC", "JS", "JH", "JD", "JC", "10S", "10H", "10D", "10C", "9S", "9H", "9D", "9C", "8D", "7D"] CARD_IMAGES = {} # Uploading all card images in dictionary for name in CARD_NAMES: CARD_IMAGES[name] = upload_card_images(name) ############################## Uploading cards End # Card strengths STRENGTH_SCALE_TRUMPS = ["QC", "QS", "QH", "QD", "JC", "JS", "JH", "JD", "AD", "10D", "KD", "9D", "8D", "7D", "AC", "10C", "KC", "9C", "AH", "10H", "KH", "9H", "AS", "10S", "KS", "9S", "None"] STRENGTH_SCALE_NON_TRUMPS = ["A", "10", "K", "9", "None"] def draw_player(win,x, y,width,height, cards, card_images): i = 0 for card in cards: win.blit(card_images[card.name], (x + i * width, y)) card.position = (x + i * width, y, x + i * width + width, y + height) i += 1 def draw_opponents(win,x, y,width,height,back_image,count, hor = True): if hor: for i in range(count): win.blit(back_image, (x + i * width, y)) else: for i in range(count): win.blit(pygame.transform.rotate(back_image, 90), (x , y + i * height)) def draw_played_cards(win, cards, card_images, turn_order): position = [(300,300),(315, 260),(330,300)] counter = turn_order for _ in range(len(cards)): win.blit(card_images[cards[0].name], (position[counter])) turn_order = (counter + 1) % 3 def main(): run = True clock = pygame.time.Clock() main_font = pygame.font.SysFont("comicsans", 30) n = Network() player = n.connect() def redraw_window(win): win.fill((53, 101, 77)) draw_player(win, 60, 650, CARD_WIDTH, CARD_HEIGHT, player.cards,CARD_IMAGES) draw_opponents(win, 60, 150, CARD_WIDTH, CARD_WIDTH, CARD_IMAGE_BACK_GRAY, 8) draw_opponents(win, 550, 150, CARD_WIDTH, CARD_WIDTH, CARD_IMAGE_BACK_GRAY, 8, hor = False) draw_played_cards(win,game.played_cards_round, CARD_IMAGES, game.turn_order) if player.turn == True: for card in player.Cards: if card.position[0] >= pos[0] and card.position[1] >= pos[1] and card.position[2] <= pos[0] and card.position[3] <= pos[1]: player.cards.remove(card) player.played_card = True player.last_played_card = card player.turn = False pygame.display.update() while run: pos = (-5, -5) clock.tick(60) game = n.send(player) for event in pygame.event.get(): if event.type == pygame.QUIT: quit() if event.type == pygame.MOUSEBUTTONDOWN and player.turn == True: pos = pygame.mouse.get_pos() redraw_window(WIN) main()
normal
{ "blob_id": "9c478c59398618d0e447276f9ff6c1c143702f12", "index": 2360, "step-1": "<mask token>\n\n\ndef get_card_size(card_width, image):\n card_height = image.get_height() / (image.get_width() / card_width)\n return round(card_height)\n\n\n<mask token>\n\n\ndef upload_card_images(card_name):\n card_n = pygame.image.load(os.path.join('images', card_name + '.png'))\n card_n = pygame.transform.scale(card_n, (CARD_WIDTH, CARD_HEIGHT))\n return card_n\n\n\n<mask token>\n\n\ndef draw_player(win, x, y, width, height, cards, card_images):\n i = 0\n for card in cards:\n win.blit(card_images[card.name], (x + i * width, y))\n card.position = x + i * width, y, x + i * width + width, y + height\n i += 1\n\n\ndef draw_opponents(win, x, y, width, height, back_image, count, hor=True):\n if hor:\n for i in range(count):\n win.blit(back_image, (x + i * width, y))\n else:\n for i in range(count):\n win.blit(pygame.transform.rotate(back_image, 90), (x, y + i *\n height))\n\n\ndef draw_played_cards(win, cards, card_images, turn_order):\n position = [(300, 300), (315, 260), (330, 300)]\n counter = turn_order\n for _ in range(len(cards)):\n win.blit(card_images[cards[0].name], position[counter])\n turn_order = (counter + 1) % 3\n\n\ndef main():\n run = True\n clock = pygame.time.Clock()\n main_font = pygame.font.SysFont('comicsans', 30)\n n = Network()\n player = n.connect()\n\n def redraw_window(win):\n win.fill((53, 101, 77))\n draw_player(win, 60, 650, CARD_WIDTH, CARD_HEIGHT, player.cards,\n CARD_IMAGES)\n draw_opponents(win, 60, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8)\n draw_opponents(win, 550, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8, hor=False)\n draw_played_cards(win, game.played_cards_round, CARD_IMAGES, game.\n turn_order)\n if player.turn == True:\n for card in player.Cards:\n if card.position[0] >= pos[0] and card.position[1] >= pos[1\n ] and card.position[2] <= pos[0] and card.position[3\n ] <= pos[1]:\n player.cards.remove(card)\n player.played_card = True\n player.last_played_card = card\n player.turn = False\n pygame.display.update()\n while run:\n pos = -5, -5\n clock.tick(60)\n game = n.send(player)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN and player.turn == True:\n pos = pygame.mouse.get_pos()\n redraw_window(WIN)\n\n\n<mask token>\n", "step-2": "<mask token>\npygame.font.init()\n<mask token>\npygame.display.set_caption('Zole')\n<mask token>\n\n\ndef get_card_size(card_width, image):\n card_height = image.get_height() / (image.get_width() / card_width)\n return round(card_height)\n\n\n<mask token>\n\n\ndef upload_card_images(card_name):\n card_n = pygame.image.load(os.path.join('images', card_name + '.png'))\n card_n = pygame.transform.scale(card_n, (CARD_WIDTH, CARD_HEIGHT))\n return card_n\n\n\n<mask token>\nfor name in CARD_NAMES:\n CARD_IMAGES[name] = upload_card_images(name)\n<mask token>\n\n\ndef draw_player(win, x, y, width, height, cards, card_images):\n i = 0\n for card in cards:\n win.blit(card_images[card.name], (x + i * width, y))\n card.position = x + i * width, y, x + i * width + width, y + height\n i += 1\n\n\ndef draw_opponents(win, x, y, width, height, back_image, count, hor=True):\n if hor:\n for i in range(count):\n win.blit(back_image, (x + i * width, y))\n else:\n for i in range(count):\n win.blit(pygame.transform.rotate(back_image, 90), (x, y + i *\n height))\n\n\ndef draw_played_cards(win, cards, card_images, turn_order):\n position = [(300, 300), (315, 260), (330, 300)]\n counter = turn_order\n for _ in range(len(cards)):\n win.blit(card_images[cards[0].name], position[counter])\n turn_order = (counter + 1) % 3\n\n\ndef main():\n run = True\n clock = pygame.time.Clock()\n main_font = pygame.font.SysFont('comicsans', 30)\n n = Network()\n player = n.connect()\n\n def redraw_window(win):\n win.fill((53, 101, 77))\n draw_player(win, 60, 650, CARD_WIDTH, CARD_HEIGHT, player.cards,\n CARD_IMAGES)\n draw_opponents(win, 60, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8)\n draw_opponents(win, 550, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8, hor=False)\n draw_played_cards(win, game.played_cards_round, CARD_IMAGES, game.\n turn_order)\n if player.turn == True:\n for card in player.Cards:\n if card.position[0] >= pos[0] and card.position[1] >= pos[1\n ] and card.position[2] <= pos[0] and card.position[3\n ] <= pos[1]:\n player.cards.remove(card)\n player.played_card = True\n player.last_played_card = card\n player.turn = False\n pygame.display.update()\n while run:\n pos = -5, -5\n clock.tick(60)\n game = n.send(player)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN and player.turn == True:\n pos = pygame.mouse.get_pos()\n redraw_window(WIN)\n\n\nmain()\n", "step-3": "<mask token>\npygame.font.init()\nWIDTH, HEIGHT = 700, 800\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Zole')\nCARD_WIDTH = 60\n\n\ndef get_card_size(card_width, image):\n card_height = image.get_height() / (image.get_width() / card_width)\n return round(card_height)\n\n\nCARD_IMAGE_BACK_GRAY = pygame.image.load(os.path.join('images',\n 'gray_back.png'))\nCARD_HEIGHT = get_card_size(CARD_WIDTH, CARD_IMAGE_BACK_GRAY)\nCARD_IMAGE_BACK_GRAY = pygame.transform.scale(CARD_IMAGE_BACK_GRAY, (\n CARD_WIDTH, CARD_HEIGHT))\n\n\ndef upload_card_images(card_name):\n card_n = pygame.image.load(os.path.join('images', card_name + '.png'))\n card_n = pygame.transform.scale(card_n, (CARD_WIDTH, CARD_HEIGHT))\n return card_n\n\n\nCARD_NAMES = ['AC', 'AH', 'AS', 'AD', 'KS', 'KH', 'KD', 'KC', 'QS', 'QH',\n 'QD', 'QC', 'JS', 'JH', 'JD', 'JC', '10S', '10H', '10D', '10C', '9S',\n '9H', '9D', '9C', '8D', '7D']\nCARD_IMAGES = {}\nfor name in CARD_NAMES:\n CARD_IMAGES[name] = upload_card_images(name)\nSTRENGTH_SCALE_TRUMPS = ['QC', 'QS', 'QH', 'QD', 'JC', 'JS', 'JH', 'JD',\n 'AD', '10D', 'KD', '9D', '8D', '7D', 'AC', '10C', 'KC', '9C', 'AH',\n '10H', 'KH', '9H', 'AS', '10S', 'KS', '9S', 'None']\nSTRENGTH_SCALE_NON_TRUMPS = ['A', '10', 'K', '9', 'None']\n\n\ndef draw_player(win, x, y, width, height, cards, card_images):\n i = 0\n for card in cards:\n win.blit(card_images[card.name], (x + i * width, y))\n card.position = x + i * width, y, x + i * width + width, y + height\n i += 1\n\n\ndef draw_opponents(win, x, y, width, height, back_image, count, hor=True):\n if hor:\n for i in range(count):\n win.blit(back_image, (x + i * width, y))\n else:\n for i in range(count):\n win.blit(pygame.transform.rotate(back_image, 90), (x, y + i *\n height))\n\n\ndef draw_played_cards(win, cards, card_images, turn_order):\n position = [(300, 300), (315, 260), (330, 300)]\n counter = turn_order\n for _ in range(len(cards)):\n win.blit(card_images[cards[0].name], position[counter])\n turn_order = (counter + 1) % 3\n\n\ndef main():\n run = True\n clock = pygame.time.Clock()\n main_font = pygame.font.SysFont('comicsans', 30)\n n = Network()\n player = n.connect()\n\n def redraw_window(win):\n win.fill((53, 101, 77))\n draw_player(win, 60, 650, CARD_WIDTH, CARD_HEIGHT, player.cards,\n CARD_IMAGES)\n draw_opponents(win, 60, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8)\n draw_opponents(win, 550, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8, hor=False)\n draw_played_cards(win, game.played_cards_round, CARD_IMAGES, game.\n turn_order)\n if player.turn == True:\n for card in player.Cards:\n if card.position[0] >= pos[0] and card.position[1] >= pos[1\n ] and card.position[2] <= pos[0] and card.position[3\n ] <= pos[1]:\n player.cards.remove(card)\n player.played_card = True\n player.last_played_card = card\n player.turn = False\n pygame.display.update()\n while run:\n pos = -5, -5\n clock.tick(60)\n game = n.send(player)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN and player.turn == True:\n pos = pygame.mouse.get_pos()\n redraw_window(WIN)\n\n\nmain()\n", "step-4": "import pygame\nimport os\nfrom network import Network\nfrom card import Card\nfrom game import Game, Player\npygame.font.init()\nWIDTH, HEIGHT = 700, 800\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Zole')\nCARD_WIDTH = 60\n\n\ndef get_card_size(card_width, image):\n card_height = image.get_height() / (image.get_width() / card_width)\n return round(card_height)\n\n\nCARD_IMAGE_BACK_GRAY = pygame.image.load(os.path.join('images',\n 'gray_back.png'))\nCARD_HEIGHT = get_card_size(CARD_WIDTH, CARD_IMAGE_BACK_GRAY)\nCARD_IMAGE_BACK_GRAY = pygame.transform.scale(CARD_IMAGE_BACK_GRAY, (\n CARD_WIDTH, CARD_HEIGHT))\n\n\ndef upload_card_images(card_name):\n card_n = pygame.image.load(os.path.join('images', card_name + '.png'))\n card_n = pygame.transform.scale(card_n, (CARD_WIDTH, CARD_HEIGHT))\n return card_n\n\n\nCARD_NAMES = ['AC', 'AH', 'AS', 'AD', 'KS', 'KH', 'KD', 'KC', 'QS', 'QH',\n 'QD', 'QC', 'JS', 'JH', 'JD', 'JC', '10S', '10H', '10D', '10C', '9S',\n '9H', '9D', '9C', '8D', '7D']\nCARD_IMAGES = {}\nfor name in CARD_NAMES:\n CARD_IMAGES[name] = upload_card_images(name)\nSTRENGTH_SCALE_TRUMPS = ['QC', 'QS', 'QH', 'QD', 'JC', 'JS', 'JH', 'JD',\n 'AD', '10D', 'KD', '9D', '8D', '7D', 'AC', '10C', 'KC', '9C', 'AH',\n '10H', 'KH', '9H', 'AS', '10S', 'KS', '9S', 'None']\nSTRENGTH_SCALE_NON_TRUMPS = ['A', '10', 'K', '9', 'None']\n\n\ndef draw_player(win, x, y, width, height, cards, card_images):\n i = 0\n for card in cards:\n win.blit(card_images[card.name], (x + i * width, y))\n card.position = x + i * width, y, x + i * width + width, y + height\n i += 1\n\n\ndef draw_opponents(win, x, y, width, height, back_image, count, hor=True):\n if hor:\n for i in range(count):\n win.blit(back_image, (x + i * width, y))\n else:\n for i in range(count):\n win.blit(pygame.transform.rotate(back_image, 90), (x, y + i *\n height))\n\n\ndef draw_played_cards(win, cards, card_images, turn_order):\n position = [(300, 300), (315, 260), (330, 300)]\n counter = turn_order\n for _ in range(len(cards)):\n win.blit(card_images[cards[0].name], position[counter])\n turn_order = (counter + 1) % 3\n\n\ndef main():\n run = True\n clock = pygame.time.Clock()\n main_font = pygame.font.SysFont('comicsans', 30)\n n = Network()\n player = n.connect()\n\n def redraw_window(win):\n win.fill((53, 101, 77))\n draw_player(win, 60, 650, CARD_WIDTH, CARD_HEIGHT, player.cards,\n CARD_IMAGES)\n draw_opponents(win, 60, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8)\n draw_opponents(win, 550, 150, CARD_WIDTH, CARD_WIDTH,\n CARD_IMAGE_BACK_GRAY, 8, hor=False)\n draw_played_cards(win, game.played_cards_round, CARD_IMAGES, game.\n turn_order)\n if player.turn == True:\n for card in player.Cards:\n if card.position[0] >= pos[0] and card.position[1] >= pos[1\n ] and card.position[2] <= pos[0] and card.position[3\n ] <= pos[1]:\n player.cards.remove(card)\n player.played_card = True\n player.last_played_card = card\n player.turn = False\n pygame.display.update()\n while run:\n pos = -5, -5\n clock.tick(60)\n game = n.send(player)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN and player.turn == True:\n pos = pygame.mouse.get_pos()\n redraw_window(WIN)\n\n\nmain()\n", "step-5": "import pygame\nimport os\nfrom network import Network\nfrom card import Card\nfrom game import Game, Player\npygame.font.init()\n\n# Initializing window\nWIDTH, HEIGHT = 700, 800\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Zole\")\n\nCARD_WIDTH = 60\n\n############################## Uploading cards\ndef get_card_size(card_width, image):\n card_height = image.get_height() / (image.get_width()/card_width)\n return round(card_height)\n\nCARD_IMAGE_BACK_GRAY = pygame.image.load(\n os.path.join(\"images\", \"gray_back.png\"))\n\nCARD_HEIGHT = get_card_size(CARD_WIDTH, CARD_IMAGE_BACK_GRAY)\n\n# Uploading backside of cards\nCARD_IMAGE_BACK_GRAY = pygame.transform.scale(\n CARD_IMAGE_BACK_GRAY, (CARD_WIDTH, CARD_HEIGHT))\n\n# Uploading all the cards\ndef upload_card_images(card_name):\n card_n = pygame.image.load(os.path.join(\"images\", card_name + \".png\"))\n card_n = pygame.transform.scale(\n card_n, (CARD_WIDTH, CARD_HEIGHT))\n return card_n\n\n\nCARD_NAMES = [\"AC\", \"AH\", \"AS\", \"AD\", \"KS\", \"KH\", \"KD\", \"KC\", \"QS\", \"QH\", \"QD\", \"QC\", \"JS\", \"JH\", \"JD\", \"JC\", \"10S\", \"10H\", \"10D\",\n \"10C\", \"9S\", \"9H\", \"9D\", \"9C\", \"8D\", \"7D\"]\n\nCARD_IMAGES = {}\n\n# Uploading all card images in dictionary\nfor name in CARD_NAMES:\n CARD_IMAGES[name] = upload_card_images(name)\n\n############################## Uploading cards End\n\n\n# Card strengths\n\nSTRENGTH_SCALE_TRUMPS = [\"QC\", \"QS\", \"QH\", \"QD\", \"JC\", \"JS\", \"JH\", \"JD\", \"AD\", \"10D\", \"KD\", \"9D\", \"8D\", \"7D\", \"AC\", \"10C\", \"KC\", \"9C\",\n \"AH\", \"10H\", \"KH\", \"9H\", \"AS\", \"10S\", \"KS\", \"9S\", \"None\"]\n\nSTRENGTH_SCALE_NON_TRUMPS = [\"A\", \"10\", \"K\", \"9\", \"None\"]\n\n\ndef draw_player(win,x, y,width,height, cards, card_images):\n i = 0\n for card in cards:\n win.blit(card_images[card.name], (x + i * width, y))\n card.position = (x + i * width, y, x +\n i * width + width, y + height)\n i += 1\n\ndef draw_opponents(win,x, y,width,height,back_image,count, hor = True):\n if hor:\n for i in range(count):\n win.blit(back_image, (x + i * width, y))\n\n else:\n for i in range(count):\n win.blit(pygame.transform.rotate(back_image, 90), (x , y + i * height))\n\ndef draw_played_cards(win, cards, card_images, turn_order):\n position = [(300,300),(315, 260),(330,300)]\n counter = turn_order\n for _ in range(len(cards)):\n win.blit(card_images[cards[0].name], (position[counter]))\n turn_order = (counter + 1) % 3\n\n\n\n\n\ndef main():\n run = True\n clock = pygame.time.Clock()\n main_font = pygame.font.SysFont(\"comicsans\", 30)\n n = Network()\n\n player = n.connect()\n\n\n def redraw_window(win):\n win.fill((53, 101, 77))\n\n\n draw_player(win, 60, 650, CARD_WIDTH, CARD_HEIGHT, player.cards,CARD_IMAGES)\n draw_opponents(win, 60, 150, CARD_WIDTH, CARD_WIDTH, CARD_IMAGE_BACK_GRAY, 8)\n draw_opponents(win, 550, 150, CARD_WIDTH, CARD_WIDTH, CARD_IMAGE_BACK_GRAY, 8, hor = False)\n draw_played_cards(win,game.played_cards_round, CARD_IMAGES, game.turn_order)\n\n if player.turn == True:\n for card in player.Cards:\n if card.position[0] >= pos[0] and card.position[1] >= pos[1] and card.position[2] <= pos[0] and card.position[3] <= pos[1]:\n player.cards.remove(card)\n player.played_card = True\n player.last_played_card = card\n player.turn = False\n\n\n pygame.display.update()\n\n while run:\n pos = (-5, -5)\n clock.tick(60)\n game = n.send(player)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN and player.turn == True:\n pos = pygame.mouse.get_pos()\n\n redraw_window(WIN)\n\n\nmain()\n\n\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
# Generated by Django 3.1.6 on 2021-04-22 07:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('projects', '0004_project_is_featured'), ] operations = [ migrations.AlterField( model_name='project', name='pin_id', field=models.CharField(max_length=20, null=True, unique=True), ), ]
normal
{ "blob_id": "24ed29dfaaf7ce508b2d80740bad1304b291c596", "index": 8466, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0004_project_is_featured')]\n operations = [migrations.AlterField(model_name='project', name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0004_project_is_featured')]\n operations = [migrations.AlterField(model_name='project', name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True))]\n", "step-5": "# Generated by Django 3.1.6 on 2021-04-22 07:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0004_project_is_featured'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf <|reserved_special_token_0|> def unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != 'optimal': raise Exception('unique_equalityset: LP returned status ' + str (sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: E.append(i) if len(E) == 0: raise Exception('unique_equalityset: empty E') return np.array(E) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf <|reserved_special_token_0|> def unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != 'optimal': raise Exception('unique_equalityset: LP returned status ' + str (sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: E.append(i) if len(E) == 0: raise Exception('unique_equalityset: empty E') return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) m = G.shape[0] n = G.shape[1] e = 0.001 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == 'optimal': raise Exception('unique_equalityset: LP returned status ' + str(sol ['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception('unique_equalityset2: affine hulls not the same') return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = -np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == 'optimal': opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.' ) nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print('Projecting from dim ' + str(d + k) + ' to ' + str(d)) if k == 0: return C, bb, [] if d == 1: c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != 'optimal': raise Exception( 'esp: projection to 1D is not full-dimensional, LP returned status ' + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != 'optimal': raise Exception( 'esp: projection to 1D is not full-dimensional, ' + 'LP returned status ' + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.0], [-1.0]]) g = np.array([x_max, -x_min]) if trans: g = g + np.dot(G, xc0) E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print('Returning projection from dim ' + str(d + k) + ' to dim 1 \n') return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print('\nStarting eq set ' + str(E_0) + '\nStarting ridges ') for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print('\nLooking for neighbors to ' + str(rid_fac1.E_0) + ' and ' + str(rid_fac1.E_r) + ' ..') E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print('found neighbor ' + str(E_adj) + '. \n\nLooking for ridges of neighbor..') ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print('found ' + str(len(ridge_list)) + ' ridges\n') found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print('Ridge ' + str(E_r) + ' already visited, removing from L..') if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print('Adding ridge-facet ' + str(E_adj) + ' ' + str( E_r) + '') L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print('Expected ridge ' + str(rid_fac1.E_r)) print('but got ridges ') for rid in ridge_list: print(rid.E_r) raise Exception( 'esp: ridge did not return neighboring ridge as expected') G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) if trans: g = g + np.dot(G, xc0) for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-07): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception('shoot: could not find starting equality set') gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol= abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception('shoot: wrong dimension of affine hull') return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print('Doing recursive ESP call') u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if len(E_t[0]) == 0 or len(E_t[1]) == 0: raise Exception( 'ridge: recursive call did not return any equality sets') for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception('ridge: wrong length of new ridge!') Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print('Doing direct calculation of ridges') X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol: continue Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space(np.vstack([np.hstack([af, bf]), np. hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0] Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) Sq = S[Q_i, :] tq = t[Q_i] c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]]) ), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-07): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br d = C.shape[1] k = D.shape[1] C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != 'optimal': print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data['C'] = C data['D'] = D data['b'] = b sio.savemat('matlabdata', data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception('adjacent: Lp returned status ' + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[ E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data['C'] = C data['D'] = D data['b'] = b data['Er'] = E_r + 1 data['ar'] = ar data['br'] = br data['Ef'] = E + 1 data['af'] = af data['bf'] = bf sio.savemat('matlabdata', data) raise Exception( 'adjacent: equality set computation returned empty set') else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception('proj_aff: wrong dimension calculated in 1') return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception('proj_aff: wrong dimension calculated in 2') return a_n, b_n <|reserved_special_token_0|> def unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != 'optimal': raise Exception('unique_equalityset: LP returned status ' + str (sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: E.append(i) if len(E) == 0: raise Exception('unique_equalityset: empty E') return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) m = G.shape[0] n = G.shape[1] e = 0.001 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == 'optimal': raise Exception('unique_equalityset: LP returned status ' + str(sol ['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception('unique_equalityset2: affine hulls not the same') return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = -np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == 'optimal': opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False <|reserved_special_token_0|> def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and len(N_space) == 0: N_space = v[range(np.amax(n - 1, 1), n), :] return N_space <|reserved_special_token_1|> <|reserved_special_token_0|> class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.' ) nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print('Projecting from dim ' + str(d + k) + ' to ' + str(d)) if k == 0: return C, bb, [] if d == 1: c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != 'optimal': raise Exception( 'esp: projection to 1D is not full-dimensional, LP returned status ' + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != 'optimal': raise Exception( 'esp: projection to 1D is not full-dimensional, ' + 'LP returned status ' + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.0], [-1.0]]) g = np.array([x_max, -x_min]) if trans: g = g + np.dot(G, xc0) E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print('Returning projection from dim ' + str(d + k) + ' to dim 1 \n') return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print('\nStarting eq set ' + str(E_0) + '\nStarting ridges ') for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print('\nLooking for neighbors to ' + str(rid_fac1.E_0) + ' and ' + str(rid_fac1.E_r) + ' ..') E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print('found neighbor ' + str(E_adj) + '. \n\nLooking for ridges of neighbor..') ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print('found ' + str(len(ridge_list)) + ' ridges\n') found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print('Ridge ' + str(E_r) + ' already visited, removing from L..') if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print('Adding ridge-facet ' + str(E_adj) + ' ' + str( E_r) + '') L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print('Expected ridge ' + str(rid_fac1.E_r)) print('but got ridges ') for rid in ridge_list: print(rid.E_r) raise Exception( 'esp: ridge did not return neighboring ridge as expected') G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) if trans: g = g + np.dot(G, xc0) for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-07): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception('shoot: could not find starting equality set') gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol= abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception('shoot: wrong dimension of affine hull') return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print('Doing recursive ESP call') u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if len(E_t[0]) == 0 or len(E_t[1]) == 0: raise Exception( 'ridge: recursive call did not return any equality sets') for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception('ridge: wrong length of new ridge!') Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print('Doing direct calculation of ridges') X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol: continue Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space(np.vstack([np.hstack([af, bf]), np. hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0] Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) Sq = S[Q_i, :] tq = t[Q_i] c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]]) ), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-07): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br d = C.shape[1] k = D.shape[1] C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != 'optimal': print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data['C'] = C data['D'] = D data['b'] = b sio.savemat('matlabdata', data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception('adjacent: Lp returned status ' + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[ E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data['C'] = C data['D'] = D data['b'] = b data['Er'] = E_r + 1 data['ar'] = ar data['br'] = br data['Ef'] = E + 1 data['af'] = af data['bf'] = bf sio.savemat('matlabdata', data) raise Exception( 'adjacent: equality set computation returned empty set') else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception('proj_aff: wrong dimension calculated in 1') return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception('proj_aff: wrong dimension calculated in 2') return a_n, b_n def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-07): """Return `True` if pair of dual problems is dual degenerate. Checks if the pair of dual problems:: (P): min c'x (D): max h'z + b'y s.t Gx <= h s.t G'z + A'y = c Ax = b z <= 0 is dual degenerate, i.e. if (P) has several optimal solutions. Optimal solutions x* and z* are required. Input: `G,h,A,b`: Parameters of (P) `x_opt`: One optimal solution to (P) `z_opt`: The optimal solution to (D) corresponding to _inequality constraints_ in (P) Output: `dual`: Boolean indicating whether (P) has many optimal solutions. """ D = -G d = -h.flatten() mu = -z_opt.flatten() I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0] J = np.nonzero(mu > abs_tol)[0] i = mu < abs_tol i = i.astype(int) j = np.zeros(len(mu), dtype=int) j[I] = 1 L = np.nonzero(i + j == 2)[0] nI = len(I) nJ = len(J) nL = len(L) DI = D[I, :] DJ = D[J, :] DL = D[L, :] dual = 0 if A is None: test = DI else: test = np.vstack([DI, A]) if rank(test) < np.amin(DI.shape): return True elif len(L) > 0: if A is None: Ae = DJ else: Ae = np.vstack([DJ, A]) be = np.zeros(Ae.shape[0]) Ai = -DL bi = np.zeros(nL) sol = solvers._solve_lp_using_cvxopt(c=-np.sum(DL, axis=0), G=Ai, h =bi, A=Ae, b=be) if sol['status'] == 'dual infeasible': return True if sol['primal objective'] > abs_tol: return True return False def unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != 'optimal': raise Exception('unique_equalityset: LP returned status ' + str (sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: E.append(i) if len(E) == 0: raise Exception('unique_equalityset: empty E') return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) m = G.shape[0] n = G.shape[1] e = 0.001 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == 'optimal': raise Exception('unique_equalityset: LP returned status ' + str(sol ['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception('unique_equalityset2: affine hulls not the same') return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = -np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == 'optimal': opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False def normalize(AA, bb, abs_tol=1e-07): """Normalize `A x = b` such that `A'A = 1` and `b > 0`. Also, remove duplicate lines. """ if AA.size == 0: return AA, bb dim = AA.size / bb.size A = AA.copy().reshape(bb.size, dim) b = bb.copy().reshape(bb.size, 1) keepind = np.nonzero(np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0 ] A = A[keepind, :] b = b[keepind] anorm = np.sqrt(np.sum(A * A, axis=1)) for i in range(len(anorm)): A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i] b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i] keep_row = [] for i in range(len(anorm)): unique = True for j in range(i + 1, len(anorm)): test = np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0] ) if test < abs_tol: unique = False break if unique: keep_row.append(i) A_n = A[keep_row, :] b_n = b[keep_row, 0] if A_n.size == dim: A_n = A_n.flatten() return A_n, b_n.flatten() def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and len(N_space) == 0: N_space = v[range(np.amax(n - 1, 1), n), :] return N_space <|reserved_special_token_1|> # Copyright (c) 2011-2014 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. r"""Equality Set Projection (ESP). Non-vertex polytope projection method from - https://web.archive.org/web/20150103142532/ https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html - https://infoscience.epfl.ch/record/169768 Very unstable, can not handle complex polytopes. Reference ========= \cite{Jones04} """ # Created by P. Nilsson, 8/2/11 import pickle import numpy as np from scipy import io as sio from scipy import linalg from polytope import solvers class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( "projection_esp error:" " Equality set projection requires `cvxopt.glpk` to run.") # Remove zero columns and rows nonzerorows = np.nonzero( np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() # Make sure origo is inside polytope if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print("Projecting from dim " + str(d + k) + " to " + str(d)) if k == 0: # Not projecting return C, bb, [] if d == 1: # Projection to 1D c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " "LP returned status " + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " + "LP returned status " + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() # min, max x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): # Min case, relax constraint a little to avoid infeasibility E_min = unique_equalityset( C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): # Max case, relax constraint a little to avoid infeasibility E_max = unique_equalityset( C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.], [-1.]]) g = np.array([x_max, -x_min]) # Relocate if trans: g = g + np.dot(G, xc0) # Return zero cols/rows E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print( "Returning projection from dim " + str(d + k) + " to dim 1 \n") return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print("\nStarting eq set " + str(E_0) + "\nStarting ridges ") for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print("\nLooking for neighbors to " + str(rid_fac1.E_0) + " and " + str(rid_fac1.E_r) + " ..") E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print("found neighbor " + str(E_adj) + ". \n\nLooking for ridges of neighbor..") ridge_list = ridge( C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print("found " + str(len(ridge_list)) + " ridges\n") found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print("Ridge " + str(E_r) + " already visited, removing from L..") if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print("Adding ridge-facet " + str(E_adj) + " " + str(E_r) + "") L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print("Expected ridge " + str(rid_fac1.E_r)) print("but got ridges ") for rid in ridge_list: print(rid.E_r) raise Exception( "esp: ridge did not return neighboring ridge as expected") G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) # Restore center if trans: g = g + np.dot(G, xc0) # Return zero rows for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-7): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception( "shoot: could not find starting equality set") gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero( np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception("shoot: wrong dimension of affine hull") return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) # E slices C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] # E_c slices C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] # dots S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print("Doing recursive ESP call") u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] # Correct sign V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp( Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if (len(E_t[0]) == 0) or (len(E_t[1]) == 0): raise Exception( "ridge: recursive call did not return any equality sets") for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm # Restore center br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception("ridge: wrong length of new ridge!") Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print("Doing direct calculation of ridges") X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol: continue Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space( np.vstack([ np.hstack([af, bf]), np.hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0] Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) # Have Q_i Sq = S[Q_i, :] tq = t[Q_i] c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm # accumulate Er_list.append( Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-7): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf # E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br # shape d = C.shape[1] k = D.shape[1] # E_r slices C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] # stack c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != "optimal": print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data["C"] = C data["D"] = D data["b"] = b sio.savemat("matlabdata", data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception( "adjacent: Lp returned status " + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate( c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): # If degenerate, compute affine hull and take preimage E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff( C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data["C"] = C data["D"] = D data["b"] = b data["Er"] = E_r + 1 data["ar"] = ar data["br"] = br data["Ef"] = E + 1 data["af"] = af data["bf"] = bf sio.savemat("matlabdata", data) raise Exception( "adjacent: equality set computation returned empty set") else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ # Remove zero columns ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception( "proj_aff: wrong dimension calculated in 1") return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception("proj_aff: wrong dimension calculated in 2") return a_n, b_n def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7): """Return `True` if pair of dual problems is dual degenerate. Checks if the pair of dual problems:: (P): min c'x (D): max h'z + b'y s.t Gx <= h s.t G'z + A'y = c Ax = b z <= 0 is dual degenerate, i.e. if (P) has several optimal solutions. Optimal solutions x* and z* are required. Input: `G,h,A,b`: Parameters of (P) `x_opt`: One optimal solution to (P) `z_opt`: The optimal solution to (D) corresponding to _inequality constraints_ in (P) Output: `dual`: Boolean indicating whether (P) has many optimal solutions. """ D = - G d = - h.flatten() mu = - z_opt.flatten() # mu >= 0 # Active constraints I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0] # Positive elements in dual opt J = np.nonzero(mu > abs_tol)[0] # i, j i = mu < abs_tol # Zero elements in dual opt i = i.astype(int) j = np.zeros(len(mu), dtype=int) j[I] = 1 # 1 if active # Indices where active constraints have 0 dual opt L = np.nonzero(i + j == 2)[0] # sizes nI = len(I) nJ = len(J) nL = len(L) # constraints DI = D[I, :] # Active constraints DJ = D[J, :] # Constraints with positive lagrange mult DL = D[L, :] # Active constraints with zero dual opt dual = 0 if A is None: test = DI else: test = np.vstack([DI, A]) if rank(test) < np.amin(DI.shape): return True else: if len(L) > 0: if A is None: Ae = DJ else: Ae = np.vstack([DJ, A]) be = np.zeros(Ae.shape[0]) Ai = - DL bi = np.zeros(nL) sol = solvers._solve_lp_using_cvxopt( c= - np.sum(DL, axis=0), G=Ai, h=bi, A=Ae, b=be) if sol['status'] == "dual infeasible": # Dual infeasible -> primal unbounded -> value>epsilon return True if sol['primal objective'] > abs_tol: return True return False def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt( c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: # Constraint is active everywhere E.append(i) if len(E) == 0: raise Exception("unique_equalityset: empty E") return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) # stack ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) # shape m = G.shape[0] n = G.shape[1] # ht e = 1e-3 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) # stack H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) # Check that they define the same projection at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception("unique_equalityset2: affine hulls not the same") return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = - np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == "optimal": opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False def normalize(AA, bb, abs_tol=1e-7): """Normalize `A x = b` such that `A'A = 1` and `b > 0`. Also, remove duplicate lines. """ if AA.size == 0: return AA, bb dim = AA.size / bb.size A = AA.copy().reshape(bb.size, dim) b = bb.copy().reshape(bb.size, 1) # Remove zero lines keepind = np.nonzero( np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0] A = A[keepind, :] b = b[keepind] # Normalize anorm = np.sqrt(np.sum(A * A, axis=1)) for i in range(len(anorm)): A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i] b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i] # Remove duplicate rows keep_row = [] for i in range(len(anorm)): unique = True for j in range(i + 1, len(anorm)): test = (np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0])) if test < abs_tol: unique = False break if unique: keep_row.append(i) A_n = A[keep_row, :] b_n = b[keep_row, 0] # Return flat A if only one row if A_n.size == dim: A_n = A_n.flatten() return A_n, b_n.flatten() def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and (len(N_space) == 0): N_space = v[range(np.amax(n - 1, 1), n), :] return N_space
flexible
{ "blob_id": "707c83bc83f606b570af973094574e6675cfc83f", "index": 8793, "step-1": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.'\n )\n nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) >\n abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print('Projecting from dim ' + str(d + k) + ' to ' + str(d))\n if k == 0:\n return C, bb, []\n if d == 1:\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, LP returned status '\n + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, ' +\n 'LP returned status ' + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.0], [-1.0]])\n g = np.array([x_max, -x_min])\n if trans:\n g = g + np.dot(G, xc0)\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print('Returning projection from dim ' + str(d + k) +\n ' to dim 1 \\n')\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print('\\nStarting eq set ' + str(E_0) + '\\nStarting ridges ')\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print('\\nLooking for neighbors to ' + str(rid_fac1.E_0) +\n ' and ' + str(rid_fac1.E_r) + ' ..')\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print('found neighbor ' + str(E_adj) +\n '. \\n\\nLooking for ridges of neighbor..')\n ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol,\n verbose=verbose)\n if verbose > 0:\n print('found ' + str(len(ridge_list)) + ' ridges\\n')\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print('Ridge ' + str(E_r) +\n ' already visited, removing from L..')\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print('Adding ridge-facet ' + str(E_adj) + ' ' + str(\n E_r) + '')\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print('Expected ridge ' + str(rid_fac1.E_r))\n print('but got ridges ')\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n 'esp: ridge did not return neighboring ridge as expected')\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n if trans:\n g = g + np.dot(G, xc0)\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-07):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception('shoot: could not find starting equality set')\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) <\n abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=\n abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception('shoot: wrong dimension of affine hull')\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print('Doing recursive ESP call')\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0]\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol,\n verbose=0)\n if len(E_t[0]) == 0 or len(E_t[1]) == 0:\n raise Exception(\n 'ridge: recursive call did not return any equality sets')\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception('ridge: wrong length of new ridge!')\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print('Doing direct calculation of ridges')\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(np.vstack([np.hstack([af, bf]), np.\n hstack([S[i, :], t[i]])]), nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]])\n ), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-07):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n d = C.shape[1]\n k = D.shape[1]\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != 'optimal':\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n sio.savemat('matlabdata', data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception('adjacent: Lp returned status ' + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol,\n dual_opt_sol, abs_tol=abs_tol):\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[\n E_temp], expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n data['Er'] = E_r + 1\n data['ar'] = ar\n data['br'] = br\n data['Ef'] = E + 1\n data['af'] = af\n data['bf'] = bf\n sio.savemat('matlabdata', data)\n raise Exception(\n 'adjacent: equality set computation returned empty set')\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 1')\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 2')\n return a_n, b_n\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\n<mask token>\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and len(N_space) == 0:\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n", "step-4": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.'\n )\n nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) >\n abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print('Projecting from dim ' + str(d + k) + ' to ' + str(d))\n if k == 0:\n return C, bb, []\n if d == 1:\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, LP returned status '\n + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, ' +\n 'LP returned status ' + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.0], [-1.0]])\n g = np.array([x_max, -x_min])\n if trans:\n g = g + np.dot(G, xc0)\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print('Returning projection from dim ' + str(d + k) +\n ' to dim 1 \\n')\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print('\\nStarting eq set ' + str(E_0) + '\\nStarting ridges ')\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print('\\nLooking for neighbors to ' + str(rid_fac1.E_0) +\n ' and ' + str(rid_fac1.E_r) + ' ..')\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print('found neighbor ' + str(E_adj) +\n '. \\n\\nLooking for ridges of neighbor..')\n ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol,\n verbose=verbose)\n if verbose > 0:\n print('found ' + str(len(ridge_list)) + ' ridges\\n')\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print('Ridge ' + str(E_r) +\n ' already visited, removing from L..')\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print('Adding ridge-facet ' + str(E_adj) + ' ' + str(\n E_r) + '')\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print('Expected ridge ' + str(rid_fac1.E_r))\n print('but got ridges ')\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n 'esp: ridge did not return neighboring ridge as expected')\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n if trans:\n g = g + np.dot(G, xc0)\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-07):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception('shoot: could not find starting equality set')\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) <\n abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=\n abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception('shoot: wrong dimension of affine hull')\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print('Doing recursive ESP call')\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0]\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol,\n verbose=0)\n if len(E_t[0]) == 0 or len(E_t[1]) == 0:\n raise Exception(\n 'ridge: recursive call did not return any equality sets')\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception('ridge: wrong length of new ridge!')\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print('Doing direct calculation of ridges')\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(np.vstack([np.hstack([af, bf]), np.\n hstack([S[i, :], t[i]])]), nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]])\n ), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-07):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n d = C.shape[1]\n k = D.shape[1]\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != 'optimal':\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n sio.savemat('matlabdata', data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception('adjacent: Lp returned status ' + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol,\n dual_opt_sol, abs_tol=abs_tol):\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[\n E_temp], expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n data['Er'] = E_r + 1\n data['ar'] = ar\n data['br'] = br\n data['Ef'] = E + 1\n data['af'] = af\n data['bf'] = bf\n sio.savemat('matlabdata', data)\n raise Exception(\n 'adjacent: equality set computation returned empty set')\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 1')\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 2')\n return a_n, b_n\n\n\ndef is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-07):\n \"\"\"Return `True` if pair of dual problems is dual degenerate.\n\n Checks if the pair of dual problems::\n\n (P): min c'x (D): max h'z + b'y\n s.t Gx <= h s.t G'z + A'y = c\n Ax = b z <= 0\n\n is dual degenerate, i.e. if (P) has several optimal solutions.\n Optimal solutions x* and z* are required.\n\n Input:\n\n `G,h,A,b`: Parameters of (P)\n `x_opt`: One optimal solution to (P)\n `z_opt`: The optimal solution to (D) corresponding to\n _inequality constraints_ in (P)\n\n Output:\n `dual`: Boolean indicating whether (P) has many optimal solutions.\n \"\"\"\n D = -G\n d = -h.flatten()\n mu = -z_opt.flatten()\n I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]\n J = np.nonzero(mu > abs_tol)[0]\n i = mu < abs_tol\n i = i.astype(int)\n j = np.zeros(len(mu), dtype=int)\n j[I] = 1\n L = np.nonzero(i + j == 2)[0]\n nI = len(I)\n nJ = len(J)\n nL = len(L)\n DI = D[I, :]\n DJ = D[J, :]\n DL = D[L, :]\n dual = 0\n if A is None:\n test = DI\n else:\n test = np.vstack([DI, A])\n if rank(test) < np.amin(DI.shape):\n return True\n elif len(L) > 0:\n if A is None:\n Ae = DJ\n else:\n Ae = np.vstack([DJ, A])\n be = np.zeros(Ae.shape[0])\n Ai = -DL\n bi = np.zeros(nL)\n sol = solvers._solve_lp_using_cvxopt(c=-np.sum(DL, axis=0), G=Ai, h\n =bi, A=Ae, b=be)\n if sol['status'] == 'dual infeasible':\n return True\n if sol['primal objective'] > abs_tol:\n return True\n return False\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\ndef normalize(AA, bb, abs_tol=1e-07):\n \"\"\"Normalize `A x = b` such that `A'A = 1` and `b > 0`.\n\n Also, remove duplicate lines.\n \"\"\"\n if AA.size == 0:\n return AA, bb\n dim = AA.size / bb.size\n A = AA.copy().reshape(bb.size, dim)\n b = bb.copy().reshape(bb.size, 1)\n keepind = np.nonzero(np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0\n ]\n A = A[keepind, :]\n b = b[keepind]\n anorm = np.sqrt(np.sum(A * A, axis=1))\n for i in range(len(anorm)):\n A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]\n b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]\n keep_row = []\n for i in range(len(anorm)):\n unique = True\n for j in range(i + 1, len(anorm)):\n test = np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0]\n )\n if test < abs_tol:\n unique = False\n break\n if unique:\n keep_row.append(i)\n A_n = A[keep_row, :]\n b_n = b[keep_row, 0]\n if A_n.size == dim:\n A_n = A_n.flatten()\n return A_n, b_n.flatten()\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and len(N_space) == 0:\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n", "step-5": "# Copyright (c) 2011-2014 by California Institute of Technology\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the California Institute of Technology nor\n# the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH\n# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\n# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\nr\"\"\"Equality Set Projection (ESP).\n\nNon-vertex polytope projection method from\n- https://web.archive.org/web/20150103142532/\n https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html\n- https://infoscience.epfl.ch/record/169768\n\nVery unstable, can not handle complex polytopes.\n\n\nReference\n=========\n\n\\cite{Jones04}\n\"\"\"\n# Created by P. Nilsson, 8/2/11\nimport pickle\n\nimport numpy as np\nfrom scipy import io as sio\nfrom scipy import linalg\n\nfrom polytope import solvers\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n \"projection_esp error:\"\n \" Equality set projection requires `cvxopt.glpk` to run.\")\n # Remove zero columns and rows\n nonzerorows = np.nonzero(\n np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n # Make sure origo is inside polytope\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print(\"Projecting from dim \" + str(d + k) + \" to \" + str(d))\n if k == 0:\n # Not projecting\n return C, bb, []\n if d == 1:\n # Projection to 1D\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \"\n \"LP returned status \" + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \" +\n \"LP returned status \" + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n # min, max\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n # Min case, relax constraint a little to avoid infeasibility\n E_min = unique_equalityset(\n C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n # Max case, relax constraint a little to avoid infeasibility\n E_max = unique_equalityset(\n C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.], [-1.]])\n g = np.array([x_max, -x_min])\n # Relocate\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero cols/rows\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print(\n \"Returning projection from dim \" +\n str(d + k) + \" to dim 1 \\n\")\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print(\"\\nStarting eq set \" + str(E_0) + \"\\nStarting ridges \")\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print(\"\\nLooking for neighbors to \" + str(rid_fac1.E_0) +\n \" and \" + str(rid_fac1.E_r) + \" ..\")\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print(\"found neighbor \" + str(E_adj) +\n \". \\n\\nLooking for ridges of neighbor..\")\n ridge_list = ridge(\n C, D, b, E_adj, a_adj, b_adj,\n abs_tol=abs_tol, verbose=verbose)\n if verbose > 0:\n print(\"found \" + str(len(ridge_list)) + \" ridges\\n\")\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print(\"Ridge \" + str(E_r) +\n \" already visited, removing from L..\")\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print(\"Adding ridge-facet \" + str(E_adj) +\n \" \" + str(E_r) + \"\")\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print(\"Expected ridge \" + str(rid_fac1.E_r))\n print(\"but got ridges \")\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n \"esp: ridge did not return neighboring ridge as expected\")\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n # Restore center\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero rows\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-7):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception(\n \"shoot: could not find starting equality set\")\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(\n np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol,\n opt_dual, abs_tol=abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception(\"shoot: wrong dimension of affine hull\")\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n # E slices\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n # E_c slices\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n # dots\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print(\"Doing recursive ESP call\")\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0] # Correct sign\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(\n Cnew, Dnew, bnew,\n centered=True, abs_tol=abs_tol, verbose=0)\n if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):\n raise Exception(\n \"ridge: recursive call did not return any equality sets\")\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n # Make orthogonal to facet\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n # Normalize and make ridge equation point outwards\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n # Restore center\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception(\"ridge: wrong length of new ridge!\")\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print(\"Doing direct calculation of ridges\")\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(\n np.vstack([\n np.hstack([af, bf]),\n np.hstack([S[i, :], t[i]])]),\n nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n # Have Q_i\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(\n c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n # Make orthogonal to facet\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n # Normalize and make ridge equation point outwards\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n # accumulate\n Er_list.append(\n Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-7):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n #\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n # shape\n d = C.shape[1]\n k = D.shape[1]\n # E_r slices\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n # stack\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(\n c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != \"optimal\":\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n sio.savemat(\"matlabdata\", data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception(\n \"adjacent: Lp returned status \" + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(\n c.flatten(), G, h, A, bf * (1 - 0.01),\n opt_sol, dual_opt_sol, abs_tol=abs_tol):\n # If degenerate, compute affine hull and take preimage\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(\n C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp],\n expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n data[\"Er\"] = E_r + 1\n data[\"ar\"] = ar\n data[\"br\"] = br\n data[\"Ef\"] = E + 1\n data[\"af\"] = af\n data[\"bf\"] = bf\n sio.savemat(\"matlabdata\", data)\n raise Exception(\n \"adjacent: equality set computation returned empty set\")\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n # Remove zero columns\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception(\n \"proj_aff: wrong dimension calculated in 1\")\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception(\"proj_aff: wrong dimension calculated in 2\")\n return a_n, b_n\n\n\ndef is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7):\n \"\"\"Return `True` if pair of dual problems is dual degenerate.\n\n Checks if the pair of dual problems::\n\n (P): min c'x (D): max h'z + b'y\n s.t Gx <= h s.t G'z + A'y = c\n Ax = b z <= 0\n\n is dual degenerate, i.e. if (P) has several optimal solutions.\n Optimal solutions x* and z* are required.\n\n Input:\n\n `G,h,A,b`: Parameters of (P)\n `x_opt`: One optimal solution to (P)\n `z_opt`: The optimal solution to (D) corresponding to\n _inequality constraints_ in (P)\n\n Output:\n `dual`: Boolean indicating whether (P) has many optimal solutions.\n \"\"\"\n D = - G\n d = - h.flatten()\n mu = - z_opt.flatten() # mu >= 0\n # Active constraints\n I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]\n # Positive elements in dual opt\n J = np.nonzero(mu > abs_tol)[0]\n # i, j\n i = mu < abs_tol # Zero elements in dual opt\n i = i.astype(int)\n j = np.zeros(len(mu), dtype=int)\n j[I] = 1 # 1 if active\n # Indices where active constraints have 0 dual opt\n L = np.nonzero(i + j == 2)[0]\n # sizes\n nI = len(I)\n nJ = len(J)\n nL = len(L)\n # constraints\n DI = D[I, :] # Active constraints\n DJ = D[J, :] # Constraints with positive lagrange mult\n DL = D[L, :] # Active constraints with zero dual opt\n dual = 0\n if A is None:\n test = DI\n else:\n test = np.vstack([DI, A])\n if rank(test) < np.amin(DI.shape):\n return True\n else:\n if len(L) > 0:\n if A is None:\n Ae = DJ\n else:\n Ae = np.vstack([DJ, A])\n be = np.zeros(Ae.shape[0])\n Ai = - DL\n bi = np.zeros(nL)\n sol = solvers._solve_lp_using_cvxopt(\n c= - np.sum(DL, axis=0), G=Ai,\n h=bi, A=Ae, b=be)\n if sol['status'] == \"dual infeasible\":\n # Dual infeasible -> primal unbounded -> value>epsilon\n return True\n if sol['primal objective'] > abs_tol:\n return True\n return False\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(\n c=A_i, G=A, h=b,\n A=a.T, b=bf)\n if sol['status'] != \"optimal\":\n raise Exception(\n \"unique_equalityset: LP returned status \" +\n str(sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n # Constraint is active everywhere\n E.append(i)\n if len(E) == 0:\n raise Exception(\"unique_equalityset: empty E\")\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n # stack\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n # shape\n m = G.shape[0]\n n = G.shape[1]\n # ht\n e = 1e-3\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n # stack\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == \"optimal\":\n raise Exception(\n \"unique_equalityset: LP returned status \" +\n str(sol['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n # Check that they define the same projection\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception(\"unique_equalityset2: affine hulls not the same\")\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = - np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == \"optimal\":\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\ndef normalize(AA, bb, abs_tol=1e-7):\n \"\"\"Normalize `A x = b` such that `A'A = 1` and `b > 0`.\n\n Also, remove duplicate lines.\n \"\"\"\n if AA.size == 0:\n return AA, bb\n dim = AA.size / bb.size\n A = AA.copy().reshape(bb.size, dim)\n b = bb.copy().reshape(bb.size, 1)\n # Remove zero lines\n keepind = np.nonzero(\n np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0]\n A = A[keepind, :]\n b = b[keepind]\n # Normalize\n anorm = np.sqrt(np.sum(A * A, axis=1))\n for i in range(len(anorm)):\n A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]\n b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]\n # Remove duplicate rows\n keep_row = []\n for i in range(len(anorm)):\n unique = True\n for j in range(i + 1, len(anorm)):\n test = (np.sum(np.abs(A[i, :] - A[j, :])) +\n np.abs(b[i, 0] - b[j, 0]))\n if test < abs_tol:\n unique = False\n break\n if unique:\n keep_row.append(i)\n A_n = A[keep_row, :]\n b_n = b[keep_row, 0]\n # Return flat A if only one row\n if A_n.size == dim:\n A_n = A_n.flatten()\n return A_n, b_n.flatten()\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and (len(N_space) == 0):\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n", "step-ids": [ 7, 9, 16, 18, 20 ] }
[ 7, 9, 16, 18, 20 ]
import cv2 import numpy as np img1 = cv2.imread('img0008.jpg') img2 = cv2.imread('img0009.jpg') #img3 = cv2.imread('img0009.jpg') img3 = np.zeros(img1.shape) iter = 51 def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j): suma = 0 x = ux - wx y = uy - wy while x < ux + wx: while y < uy + wy: xdx = x + dx if x + dx < img1.shape[0] else x ydy = y + dy if y + dy < img1.shape[1] else y suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2) y += 1 x += 1 return suma def hazFuncion(iteracion): for x in range(img1.shape[0]-1): for y in range(img1.shape[1]-1): img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2) for x in range(iter): img3 = np.zeros(img1.shape) hazFuncion(x) if x % 10 == 0: #cv2.imwrite("s"+str(x)+"xy.jpg", img3) cv2.namedWindow(str(x) + "dd.jpg", cv2.WINDOW_NORMAL) cv2.imshow(str(x) + "dd.jpg", img3) cv2.waitKey(0) cv2.destroyAllWindows()
normal
{ "blob_id": "749e6a1f807843c9e2591f51561174cc51668b11", "index": 1588, "step-1": "<mask token>\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n", "step-3": "<mask token>\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n", "step-4": "import cv2\nimport numpy as np\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n", "step-5": "import cv2\nimport numpy as np\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\n#img3 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0]-1):\n for y in range(img1.shape[1]-1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n #cv2.imwrite(\"s\"+str(x)+\"xy.jpg\", img3)\n cv2.namedWindow(str(x) + \"dd.jpg\", cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + \"dd.jpg\", img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(parsed_json2) <|reserved_special_token_1|> <|reserved_special_token_0|> fake_header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36' , 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' , 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'} s = requests.Session() r = s.get('https://xueqiu.com', headers=fake_header) r = s.get( 'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea' , headers=fake_header) parsed_json1 = r.json parsed_json2 = json.loads(r.text) print(parsed_json2) <|reserved_special_token_1|> <|reserved_special_token_0|> import json import requests fake_header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36' , 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' , 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'} s = requests.Session() r = s.get('https://xueqiu.com', headers=fake_header) r = s.get( 'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea' , headers=fake_header) parsed_json1 = r.json parsed_json2 = json.loads(r.text) print(parsed_json2) <|reserved_special_token_1|> #coding=utf8 """ Created on Thu Feb 20 00:53:28 2020 @author: Neal LONG """ import json import requests fake_header = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Encoding":"gzip, deflate, sdch", "Accept-Language":"zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2" } s = requests.Session() r=s.get('https://xueqiu.com',headers = fake_header) r = s.get('https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea',headers = fake_header) #print(r.text) parsed_json1 = r.json parsed_json2 = json.loads(r.text) print(parsed_json2)
flexible
{ "blob_id": "166a1dfbd3baf766230080361d98648ec0a64455", "index": 1038, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(parsed_json2)\n", "step-3": "<mask token>\nfake_header = {'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language':\n 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'}\ns = requests.Session()\nr = s.get('https://xueqiu.com', headers=fake_header)\nr = s.get(\n 'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea'\n , headers=fake_header)\nparsed_json1 = r.json\nparsed_json2 = json.loads(r.text)\nprint(parsed_json2)\n", "step-4": "<mask token>\nimport json\nimport requests\nfake_header = {'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language':\n 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'}\ns = requests.Session()\nr = s.get('https://xueqiu.com', headers=fake_header)\nr = s.get(\n 'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea'\n , headers=fake_header)\nparsed_json1 = r.json\nparsed_json2 = json.loads(r.text)\nprint(parsed_json2)\n", "step-5": "#coding=utf8\r\n\"\"\"\r\nCreated on Thu Feb 20 00:53:28 2020\r\n\r\n@author: Neal LONG\r\n\"\"\"\r\n\r\nimport json\r\nimport requests\r\nfake_header = { \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36\",\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Accept-Encoding\":\"gzip, deflate, sdch\",\r\n \"Accept-Language\":\"zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2\"\r\n }\r\n\r\ns = requests.Session()\r\n\r\nr=s.get('https://xueqiu.com',headers = fake_header)\r\nr = s.get('https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea',headers = fake_header)\r\n\r\n#print(r.text)\r\nparsed_json1 = r.json\r\nparsed_json2 = json.loads(r.text)\r\n\r\nprint(parsed_json2)\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python3 import argparse from glob import glob import sys import numpy as np import matplotlib.pyplot as plt import pysam import math import pandas as pd import haplotagging_stats import os import collections import seaborn as sns NUM_CONTIGS="num_contigs" TOTAL_LEN="total_len" HAPLOTYPE="haplotype" HAPLO_SEX="haplotype-sex" SEX="sex" # cat Y1_assemblies_v2_genbank_QC.csv | sed 's/,/\t/g' | awk '{print $1,$2,$3,"Maternal","\n",$1,$6,$7,"Paternal"}' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv # cat Y1_assemblies_v2_genbank_QC.full.csv | sed 's/,/\t/g' | awk '{print $1,$2,$3,"Maternal","Maternal-",$23,$23,"\n",$1,$6,$7,"Paternal","Paternal-",$23,$23}' | sed 's/- /-/g' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv def parse_args(args = None): parser = argparse.ArgumentParser("Plots information from haplotagging_stats tsv") parser.add_argument('--input_csv', '-i', dest='input_csv', default=None, required=True, type=str, help='CSV file holding data') parser.add_argument('--figure_name', '-f', dest='figure_name', default="HPRC_contig_stats", required=False, type=str, help='Figure name') return parser.parse_args() if args is None else parser.parse_args(args) def log(msg): print(msg, file=sys.stderr) def get_color(filename): if "maternal" in filename.lower(): return "darkred" if "paternal" in filename.lower(): return "darkblue" return "black" def main(): args = parse_args() df = pd.read_csv(args.input_csv) print(df.head()) # sns.set_palette(sns.color_palette(["darkred", "darkblue"])) # sns.boxplot(x=HAPLOTYPE, y=NUM_CONTIGS, data=df)#, palette={"Maternal":"darkred","Paternal":"darkblue"}) # spax = sns.swarmplot(x=HAPLOTYPE, y=NUM_CONTIGS, hue=SEX, data=df, palette={"Female":"fuchsia","Male":"cyan"}) #color="fuchsia") sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"], palette={"Maternal-Male":"darkred","Maternal-Female":"darkred","Paternal-Male":"darkblue","Paternal-Female":"darkblue"}) spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"], palette={"Maternal-Male":"royalblue","Maternal-Female":"crimson","Paternal-Male":"royalblue","Paternal-Female":"crimson"}) plt.title("") plt.ylabel("Contig Count") plt.xlabel("Haplotype") plt.tight_layout() # plt.set_size_inches(12, 12) # if args.figure_name is not None: plt.savefig(args.figure_name+".contig_count.png", format='png', dpi=200) plt.savefig(args.figure_name+".contig_count.pdf", format='pdf', dpi=300) plt.show() plt.close() # sns.boxplot(x=HAPLOTYPE, y=TOTAL_LEN, data=df)#, palette={"Maternal":"darkred","Paternal":"darkblue"}) # spax = sns.swarmplot(x=HAPLOTYPE, y=TOTAL_LEN, hue=SEX, data=df, palette={"Female":"fuchsia","Male":"cyan"}) #color="fuchsia") sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"], palette={"Maternal-Male":"darkred","Maternal-Female":"darkred","Paternal-Male":"darkblue","Paternal-Female":"darkblue"}) spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"], palette={"Maternal-Male":"royalblue","Maternal-Female":"crimson","Paternal-Male":"royalblue","Paternal-Female":"crimson"}) plt.title("") plt.ylabel("Total Length") plt.xlabel("Haplotype") plt.tight_layout() # plt.set_size_inches(12, 12) # if args.figure_name is not None: plt.savefig(args.figure_name+".total_len.png", format='png', dpi=200) plt.savefig(args.figure_name+".total_len.pdf", format='pdf', dpi=300) plt.show() if __name__ == "__main__": main()
normal
{ "blob_id": "0c7816028e6cbd12684b0c7484835e735f1d2838", "index": 4327, "step-1": "<mask token>\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\n<mask token>\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nNUM_CONTIGS = 'num_contigs'\nTOTAL_LEN = 'total_len'\nHAPLOTYPE = 'haplotype'\nHAPLO_SEX = 'haplotype-sex'\nSEX = 'sex'\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import argparse\nfrom glob import glob\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pysam\nimport math\nimport pandas as pd\nimport haplotagging_stats\nimport os\nimport collections\nimport seaborn as sns\nNUM_CONTIGS = 'num_contigs'\nTOTAL_LEN = 'total_len'\nHAPLOTYPE = 'haplotype'\nHAPLO_SEX = 'haplotype-sex'\nSEX = 'sex'\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python3\nimport argparse\nfrom glob import glob\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pysam\nimport math\nimport pandas as pd\nimport haplotagging_stats\nimport os\nimport collections\nimport seaborn as sns\n\nNUM_CONTIGS=\"num_contigs\"\nTOTAL_LEN=\"total_len\"\nHAPLOTYPE=\"haplotype\"\nHAPLO_SEX=\"haplotype-sex\"\nSEX=\"sex\"\n\n# cat Y1_assemblies_v2_genbank_QC.csv | sed 's/,/\\t/g' | awk '{print $1,$2,$3,\"Maternal\",\"\\n\",$1,$6,$7,\"Paternal\"}' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv\n# cat Y1_assemblies_v2_genbank_QC.full.csv | sed 's/,/\\t/g' | awk '{print $1,$2,$3,\"Maternal\",\"Maternal-\",$23,$23,\"\\n\",$1,$6,$7,\"Paternal\",\"Paternal-\",$23,$23}' | sed 's/- /-/g' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv\ndef parse_args(args = None):\n parser = argparse.ArgumentParser(\"Plots information from haplotagging_stats tsv\")\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None, required=True, type=str,\n help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\"HPRC_contig_stats\", required=False, type=str,\n help='Figure name')\n\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if \"maternal\" in filename.lower():\n return \"darkred\"\n if \"paternal\" in filename.lower():\n return \"darkblue\"\n return \"black\"\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n # sns.set_palette(sns.color_palette([\"darkred\", \"darkblue\"]))\n\n # sns.boxplot(x=HAPLOTYPE, y=NUM_CONTIGS, data=df)#, palette={\"Maternal\":\"darkred\",\"Paternal\":\"darkblue\"})\n # spax = sns.swarmplot(x=HAPLOTYPE, y=NUM_CONTIGS, hue=SEX, data=df, palette={\"Female\":\"fuchsia\",\"Male\":\"cyan\"}) #color=\"fuchsia\")\n\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"darkred\",\"Maternal-Female\":\"darkred\",\"Paternal-Male\":\"darkblue\",\"Paternal-Female\":\"darkblue\"})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"royalblue\",\"Maternal-Female\":\"crimson\",\"Paternal-Male\":\"royalblue\",\"Paternal-Female\":\"crimson\"})\n\n\n plt.title(\"\")\n plt.ylabel(\"Contig Count\")\n plt.xlabel(\"Haplotype\")\n plt.tight_layout()\n # plt.set_size_inches(12, 12)\n #\n if args.figure_name is not None:\n plt.savefig(args.figure_name+\".contig_count.png\", format='png', dpi=200)\n plt.savefig(args.figure_name+\".contig_count.pdf\", format='pdf', dpi=300)\n plt.show()\n plt.close()\n\n # sns.boxplot(x=HAPLOTYPE, y=TOTAL_LEN, data=df)#, palette={\"Maternal\":\"darkred\",\"Paternal\":\"darkblue\"})\n # spax = sns.swarmplot(x=HAPLOTYPE, y=TOTAL_LEN, hue=SEX, data=df, palette={\"Female\":\"fuchsia\",\"Male\":\"cyan\"}) #color=\"fuchsia\")\n\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"darkred\",\"Maternal-Female\":\"darkred\",\"Paternal-Male\":\"darkblue\",\"Paternal-Female\":\"darkblue\"})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"royalblue\",\"Maternal-Female\":\"crimson\",\"Paternal-Male\":\"royalblue\",\"Paternal-Female\":\"crimson\"})\n\n\n\n plt.title(\"\")\n plt.ylabel(\"Total Length\")\n plt.xlabel(\"Haplotype\")\n plt.tight_layout()\n # plt.set_size_inches(12, 12)\n #\n if args.figure_name is not None:\n plt.savefig(args.figure_name+\".total_len.png\", format='png', dpi=200)\n plt.savefig(args.figure_name+\".total_len.pdf\", format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
# uncompyle6 version 3.2.4 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] # Embedded file name: filecmp import os, stat from itertools import ifilter, ifilterfalse, imap, izip __all__ = [ 'cmp', 'dircmp', 'cmpfiles'] _cache = {} BUFSIZE = 8192 def cmp(f1, f2, shallow=1): s1 = _sig(os.stat(f1)) s2 = _sig(os.stat(f2)) if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG: return False if shallow and s1 == s2: return True if s1[1] != s2[1]: return False outcome = _cache.get((f1, f2, s1, s2)) if outcome is None: outcome = _do_cmp(f1, f2) if len(_cache) > 100: _cache.clear() _cache[(f1, f2, s1, s2)] = outcome return outcome def _sig(st): return ( stat.S_IFMT(st.st_mode), st.st_size, st.st_mtime) def _do_cmp(f1, f2): bufsize = BUFSIZE with open(f1, 'rb') as (fp1): with open(f2, 'rb') as (fp2): while True: b1 = fp1.read(bufsize) b2 = fp2.read(bufsize) if b1 != b2: return False if not b1: return True class dircmp: def __init__(self, a, b, ignore=None, hide=None): self.left = a self.right = b if hide is None: self.hide = [ os.curdir, os.pardir] else: self.hide = hide if ignore is None: self.ignore = [ 'RCS', 'CVS', 'tags'] else: self.ignore = ignore return def phase0(self): self.left_list = _filter(os.listdir(self.left), self.hide + self.ignore) self.right_list = _filter(os.listdir(self.right), self.hide + self.ignore) self.left_list.sort() self.right_list.sort() def phase1(self): a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list)) b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list)) self.common = map(a.__getitem__, ifilter(b.__contains__, a)) self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a)) self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b)) def phase2(self): self.common_dirs = [] self.common_files = [] self.common_funny = [] for x in self.common: a_path = os.path.join(self.left, x) b_path = os.path.join(self.right, x) ok = 1 try: a_stat = os.stat(a_path) except os.error as why: ok = 0 try: b_stat = os.stat(b_path) except os.error as why: ok = 0 if ok: a_type = stat.S_IFMT(a_stat.st_mode) b_type = stat.S_IFMT(b_stat.st_mode) if a_type != b_type: self.common_funny.append(x) elif stat.S_ISDIR(a_type): self.common_dirs.append(x) elif stat.S_ISREG(a_type): self.common_files.append(x) else: self.common_funny.append(x) else: self.common_funny.append(x) def phase3(self): xx = cmpfiles(self.left, self.right, self.common_files) self.same_files, self.diff_files, self.funny_files = xx def phase4(self): self.subdirs = {} for x in self.common_dirs: a_x = os.path.join(self.left, x) b_x = os.path.join(self.right, x) self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide) def phase4_closure(self): self.phase4() for sd in self.subdirs.itervalues(): sd.phase4_closure() def report(self): print 'diff', self.left, self.right if self.left_only: self.left_only.sort() print 'Only in', self.left, ':', self.left_only if self.right_only: self.right_only.sort() print 'Only in', self.right, ':', self.right_only if self.same_files: self.same_files.sort() print 'Identical files :', self.same_files if self.diff_files: self.diff_files.sort() print 'Differing files :', self.diff_files if self.funny_files: self.funny_files.sort() print 'Trouble with common files :', self.funny_files if self.common_dirs: self.common_dirs.sort() print 'Common subdirectories :', self.common_dirs if self.common_funny: self.common_funny.sort() print 'Common funny cases :', self.common_funny def report_partial_closure(self): self.report() for sd in self.subdirs.itervalues(): print sd.report() def report_full_closure(self): self.report() for sd in self.subdirs.itervalues(): print sd.report_full_closure() methodmap = dict(subdirs=phase4, same_files=phase3, diff_files=phase3, funny_files=phase3, common_dirs=phase2, common_files=phase2, common_funny=phase2, common=phase1, left_only=phase1, right_only=phase1, left_list=phase0, right_list=phase0) def __getattr__(self, attr): if attr not in self.methodmap: raise AttributeError, attr self.methodmap[attr](self) return getattr(self, attr) def cmpfiles(a, b, common, shallow=1): res = ([], [], []) for x in common: ax = os.path.join(a, x) bx = os.path.join(b, x) res[_cmp(ax, bx, shallow)].append(x) return res def _cmp(a, b, sh, abs=abs, cmp=cmp): try: return not abs(cmp(a, b, sh)) except (os.error, IOError): return 2 def _filter(flist, skip): return list(ifilterfalse(skip.__contains__, flist)) def demo(): import sys, getopt options, args = getopt.getopt(sys.argv[1:], 'r') if len(args) != 2: raise getopt.GetoptError('need exactly two args', None) dd = dircmp(args[0], args[1]) if ('-r', '') in options: dd.report_full_closure() else: dd.report() return if __name__ == '__main__': demo()
normal
{ "blob_id": "38f6700b283bdc68a0271cb3ec397ce72aa2de3c", "index": 6589, "step-1": "# uncompyle6 version 3.2.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]\n# Embedded file name: filecmp\nimport os, stat\nfrom itertools import ifilter, ifilterfalse, imap, izip\n__all__ = [\n 'cmp', 'dircmp', 'cmpfiles']\n_cache = {}\nBUFSIZE = 8192\n\ndef cmp(f1, f2, shallow=1):\n s1 = _sig(os.stat(f1))\n s2 = _sig(os.stat(f2))\n if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:\n return False\n if shallow and s1 == s2:\n return True\n if s1[1] != s2[1]:\n return False\n outcome = _cache.get((f1, f2, s1, s2))\n if outcome is None:\n outcome = _do_cmp(f1, f2)\n if len(_cache) > 100:\n _cache.clear()\n _cache[(f1, f2, s1, s2)] = outcome\n return outcome\n\n\ndef _sig(st):\n return (\n stat.S_IFMT(st.st_mode),\n st.st_size,\n st.st_mtime)\n\n\ndef _do_cmp(f1, f2):\n bufsize = BUFSIZE\n with open(f1, 'rb') as (fp1):\n with open(f2, 'rb') as (fp2):\n while True:\n b1 = fp1.read(bufsize)\n b2 = fp2.read(bufsize)\n if b1 != b2:\n return False\n if not b1:\n return True\n\n\nclass dircmp:\n\n def __init__(self, a, b, ignore=None, hide=None):\n self.left = a\n self.right = b\n if hide is None:\n self.hide = [\n os.curdir, os.pardir]\n else:\n self.hide = hide\n if ignore is None:\n self.ignore = [\n 'RCS', 'CVS', 'tags']\n else:\n self.ignore = ignore\n return\n\n def phase0(self):\n self.left_list = _filter(os.listdir(self.left), self.hide + self.ignore)\n self.right_list = _filter(os.listdir(self.right), self.hide + self.ignore)\n self.left_list.sort()\n self.right_list.sort()\n\n def phase1(self):\n a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))\n b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))\n self.common = map(a.__getitem__, ifilter(b.__contains__, a))\n self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))\n self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))\n\n def phase2(self):\n self.common_dirs = []\n self.common_files = []\n self.common_funny = []\n for x in self.common:\n a_path = os.path.join(self.left, x)\n b_path = os.path.join(self.right, x)\n ok = 1\n try:\n a_stat = os.stat(a_path)\n except os.error as why:\n ok = 0\n\n try:\n b_stat = os.stat(b_path)\n except os.error as why:\n ok = 0\n\n if ok:\n a_type = stat.S_IFMT(a_stat.st_mode)\n b_type = stat.S_IFMT(b_stat.st_mode)\n if a_type != b_type:\n self.common_funny.append(x)\n elif stat.S_ISDIR(a_type):\n self.common_dirs.append(x)\n elif stat.S_ISREG(a_type):\n self.common_files.append(x)\n else:\n self.common_funny.append(x)\n else:\n self.common_funny.append(x)\n\n def phase3(self):\n xx = cmpfiles(self.left, self.right, self.common_files)\n self.same_files, self.diff_files, self.funny_files = xx\n\n def phase4(self):\n self.subdirs = {}\n for x in self.common_dirs:\n a_x = os.path.join(self.left, x)\n b_x = os.path.join(self.right, x)\n self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)\n\n def phase4_closure(self):\n self.phase4()\n for sd in self.subdirs.itervalues():\n sd.phase4_closure()\n\n def report(self):\n print 'diff', self.left, self.right\n if self.left_only:\n self.left_only.sort()\n print 'Only in', self.left, ':', self.left_only\n if self.right_only:\n self.right_only.sort()\n print 'Only in', self.right, ':', self.right_only\n if self.same_files:\n self.same_files.sort()\n print 'Identical files :', self.same_files\n if self.diff_files:\n self.diff_files.sort()\n print 'Differing files :', self.diff_files\n if self.funny_files:\n self.funny_files.sort()\n print 'Trouble with common files :', self.funny_files\n if self.common_dirs:\n self.common_dirs.sort()\n print 'Common subdirectories :', self.common_dirs\n if self.common_funny:\n self.common_funny.sort()\n print 'Common funny cases :', self.common_funny\n\n def report_partial_closure(self):\n self.report()\n for sd in self.subdirs.itervalues():\n print\n sd.report()\n\n def report_full_closure(self):\n self.report()\n for sd in self.subdirs.itervalues():\n print\n sd.report_full_closure()\n\n methodmap = dict(subdirs=phase4, same_files=phase3, diff_files=phase3, funny_files=phase3, common_dirs=phase2, common_files=phase2, common_funny=phase2, common=phase1, left_only=phase1, right_only=phase1, left_list=phase0, right_list=phase0)\n\n def __getattr__(self, attr):\n if attr not in self.methodmap:\n raise AttributeError, attr\n self.methodmap[attr](self)\n return getattr(self, attr)\n\n\ndef cmpfiles(a, b, common, shallow=1):\n res = ([], [], [])\n for x in common:\n ax = os.path.join(a, x)\n bx = os.path.join(b, x)\n res[_cmp(ax, bx, shallow)].append(x)\n\n return res\n\n\ndef _cmp(a, b, sh, abs=abs, cmp=cmp):\n try:\n return not abs(cmp(a, b, sh))\n except (os.error, IOError):\n return 2\n\n\ndef _filter(flist, skip):\n return list(ifilterfalse(skip.__contains__, flist))\n\n\ndef demo():\n import sys, getopt\n options, args = getopt.getopt(sys.argv[1:], 'r')\n if len(args) != 2:\n raise getopt.GetoptError('need exactly two args', None)\n dd = dircmp(args[0], args[1])\n if ('-r', '') in options:\n dd.report_full_closure()\n else:\n dd.report()\n return\n\n\nif __name__ == '__main__':\n demo()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if not os.path.exists(directory): os.makedirs(directory) <|reserved_special_token_0|> if not file_exists: x = open(op, 'w') x.write(rand_facts) <|reserved_special_token_1|> <|reserved_special_token_0|> directory = 'C:\\PyHelp' if not os.path.exists(directory): os.makedirs(directory) rand_facts = ( '• Exception is used as a base class for all exceptions. It\'s strongly recommended (but not yet required) that user exceptions are derived from this class too.\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use "catch all" try-except statements.\n• ImportError(StandardError) is raised when Python fails to import a module.\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter\'s environment (that is, they\'re usually not caused by bugs in the program).\n• IOError(EnvironmentError) is used to flag I/O-related errors.\n• OSError(EnvironmentError) is used to flag errors by the os module.\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\n• NameError(StandardError) is raised when Python fails to find a global or local name.\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn\'t contain a given index or key.\n• IndexError(LookupError) is raised by sequence objects when the given index doesn\'t exist.\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn\'t exist.\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn\'t been implemented yet, or methods that should be overridden.\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\n"eval_code2: NULL globals" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that\'s just me.\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\n• NoneType The type of None.\n• TypeType The type of type objects (such as returned by type()). \n• IntType The type of integers (e.g. 1).\n• LongType The type of long integers (e.g. 1L).\n• FloatType The type of floating point numbers (e.g. 1.0).\n• ComplexType The type of complex numbers (e.g. 1.0j).\n• StringType The type of character strings (e.g. ’Spam’). \n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \n• ListType The type of lists (e.g. [0, 1, 2, 3]). \n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \n• DictionaryType An alternate name for DictType. \n• FunctionType The type of user-defined functions and lambdas. \n• LambdaType An alternate name for FunctionType. \n• CodeType The type for code objects such as returned by compile(). \n• ClassType type of user-defined classes. \n• InstanceType The type of instances of user-defined classes. \n• MethodType The type of methods of user-defined class instances. \n• UnboundMethod Type An alternate name for MethodType. \n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \n• ModuleType The type of modules. \n• FileType The type of open file objects such as sys.stdout. \n• XRangeType The type of range objects returned by xrange(). \n• SliceType The type of objects returned by slice().\n• EllipsisType The type of Ellipsis. \n• TracebackType The type of traceback objects such as found in sys.exc traceback. \n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \n• BufferType The type of buffer objects created by the buffer() function.\n• string.capitalize()Return a copy of the string with only its first character capitalized. \n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \n• string.isdigit()Return true if there are only digit characters, false otherwise.\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \n• string.lower() Return a copy of the string converted to lowercase. \n• string.lstrip() Return a copy of the string with leading whitespace removed.\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \n• string.upper() Return a copy of the string converted to uppercase.\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\x00’) if they occurred in the input. \n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \n• file.tell() Return the file’s current position, like stdio’s ftell(). \n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \n• dir([object]) Without arguments, return the list of names in the current local symbol table. \n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \n>>> x = 1 \n>>> print eval(’x+1’) \n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \n>>> import types \n>>> if type(x) == types.StringType: print "It’s a string" unichr(i) \nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \n\n' ) op = 'C:\\PyHelp\\randinfo.txt' file_exists = os.path.isfile(op) if not file_exists: x = open(op, 'w') x.write(rand_facts) <|reserved_special_token_1|> import os import sqlite3 import datetime directory = 'C:\\PyHelp' if not os.path.exists(directory): os.makedirs(directory) rand_facts = ( '• Exception is used as a base class for all exceptions. It\'s strongly recommended (but not yet required) that user exceptions are derived from this class too.\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use "catch all" try-except statements.\n• ImportError(StandardError) is raised when Python fails to import a module.\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter\'s environment (that is, they\'re usually not caused by bugs in the program).\n• IOError(EnvironmentError) is used to flag I/O-related errors.\n• OSError(EnvironmentError) is used to flag errors by the os module.\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\n• NameError(StandardError) is raised when Python fails to find a global or local name.\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn\'t contain a given index or key.\n• IndexError(LookupError) is raised by sequence objects when the given index doesn\'t exist.\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn\'t exist.\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn\'t been implemented yet, or methods that should be overridden.\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\n"eval_code2: NULL globals" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that\'s just me.\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\n• NoneType The type of None.\n• TypeType The type of type objects (such as returned by type()). \n• IntType The type of integers (e.g. 1).\n• LongType The type of long integers (e.g. 1L).\n• FloatType The type of floating point numbers (e.g. 1.0).\n• ComplexType The type of complex numbers (e.g. 1.0j).\n• StringType The type of character strings (e.g. ’Spam’). \n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \n• ListType The type of lists (e.g. [0, 1, 2, 3]). \n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \n• DictionaryType An alternate name for DictType. \n• FunctionType The type of user-defined functions and lambdas. \n• LambdaType An alternate name for FunctionType. \n• CodeType The type for code objects such as returned by compile(). \n• ClassType type of user-defined classes. \n• InstanceType The type of instances of user-defined classes. \n• MethodType The type of methods of user-defined class instances. \n• UnboundMethod Type An alternate name for MethodType. \n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \n• ModuleType The type of modules. \n• FileType The type of open file objects such as sys.stdout. \n• XRangeType The type of range objects returned by xrange(). \n• SliceType The type of objects returned by slice().\n• EllipsisType The type of Ellipsis. \n• TracebackType The type of traceback objects such as found in sys.exc traceback. \n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \n• BufferType The type of buffer objects created by the buffer() function.\n• string.capitalize()Return a copy of the string with only its first character capitalized. \n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \n• string.isdigit()Return true if there are only digit characters, false otherwise.\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \n• string.lower() Return a copy of the string converted to lowercase. \n• string.lstrip() Return a copy of the string with leading whitespace removed.\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \n• string.upper() Return a copy of the string converted to uppercase.\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\x00’) if they occurred in the input. \n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \n• file.tell() Return the file’s current position, like stdio’s ftell(). \n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \n• dir([object]) Without arguments, return the list of names in the current local symbol table. \n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \n>>> x = 1 \n>>> print eval(’x+1’) \n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \n>>> import types \n>>> if type(x) == types.StringType: print "It’s a string" unichr(i) \nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \n\n' ) op = 'C:\\PyHelp\\randinfo.txt' file_exists = os.path.isfile(op) if not file_exists: x = open(op, 'w') x.write(rand_facts) <|reserved_special_token_1|> import os import sqlite3 import datetime directory = 'C:\PyHelp' if not os.path.exists(directory): os.makedirs(directory) rand_facts = '''• Exception is used as a base class for all exceptions. It's strongly recommended (but not yet required) that user exceptions are derived from this class too. • SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message. • StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is). • KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use "catch all" try-except statements. • ImportError(StandardError) is raised when Python fails to import a module. • EnvironmentError is used as a base class for exceptions that can be caused by the interpreter's environment (that is, they're usually not caused by bugs in the program). • IOError(EnvironmentError) is used to flag I/O-related errors. • OSError(EnvironmentError) is used to flag errors by the os module. • WindowsError(OSError) is used to flag Windows-specific errors from the os module. • NameError(StandardError) is raised when Python fails to find a global or local name. • UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead. • AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name. • SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error. • IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead. • TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead. • TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type. • AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is). • LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn't contain a given index or key. • IndexError(LookupError) is raised by sequence objects when the given index doesn't exist. • KeyError(LookupError) is raised by dictionary objects when the given key doesn't exist. • ArithmeticError(StandardError) is used as a base class for math-related exceptions. • OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type). • ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero. • FloatingPointError(ArithmeticError) is raised when a floating point operation fails. • ValueError(StandardError) is raised if an argument has the right type, but an invalid value. • UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later. • RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc. • NotImplementedError(RuntimeError) can be used to flag functions that hasn't been implemented yet, or methods that should be overridden. • SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like "eval_code2: NULL globals" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that's just me. • MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens. • NoneType The type of None. • TypeType The type of type objects (such as returned by type()). • IntType The type of integers (e.g. 1). • LongType The type of long integers (e.g. 1L). • FloatType The type of floating point numbers (e.g. 1.0). • ComplexType The type of complex numbers (e.g. 1.0j). • StringType The type of character strings (e.g. ’Spam’). • UnicodeType The type of Unicode character strings (e.g. u’Spam’). • TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). • ListType The type of lists (e.g. [0, 1, 2, 3]). • DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). • DictionaryType An alternate name for DictType. • FunctionType The type of user-defined functions and lambdas. • LambdaType An alternate name for FunctionType. • CodeType The type for code objects such as returned by compile(). • ClassType type of user-defined classes. • InstanceType The type of instances of user-defined classes. • MethodType The type of methods of user-defined class instances. • UnboundMethod Type An alternate name for MethodType. • BuiltinFunction Type The type of built-in functions like len() or sys.exit(). • BuiltinMethod TypeAn alternate name for BuiltinFunction. • ModuleType The type of modules. • FileType The type of open file objects such as sys.stdout. • XRangeType The type of range objects returned by xrange(). • SliceType The type of objects returned by slice(). • EllipsisType The type of Ellipsis. • TracebackType The type of traceback objects such as found in sys.exc traceback. • FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. • BufferType The type of buffer objects created by the buffer() function. • string.capitalize()Return a copy of the string with only its first character capitalized. • string.center(width) Return centered in a string of length width. Padding is done using spaces. • string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. • string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . • string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. • string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. • string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. • string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. • string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. • string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. • string.isdigit()Return true if there are only digit characters, false otherwise. • string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. • string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise. • string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. • string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. • string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. • string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.lower() Return a copy of the string converted to lowercase. • string.lstrip() Return a copy of the string with leading whitespace removed. • string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. • string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. • string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. • string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. • string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. • string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. • string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. • string.strip() Return a copy of the string with leading and trailing whitespace removed. • string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. • string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. • string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. • string.upper() Return a copy of the string converted to uppercase. • file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. • file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. • file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. • file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! • file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. • file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\0’) if they occurred in the input. • file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. • file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . • file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). • file.tell() Return the file’s current position, like stdio’s ftell(). • file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). • file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. • file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. • file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. • file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. • file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects. • abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. • apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. • buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). • callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method. • chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. • cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. • coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). • complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. • delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. • dir([object]) Without arguments, return the list of names in the current local symbol table. • divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). • eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: >>> x = 1 >>> print eval(’x+1’) 2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). • execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. • filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. • float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned. • getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. • globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). • hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) • hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). • hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. • id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) • input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users. • int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to • string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 • intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). • isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. • issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. • len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). • list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3]. • locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. • long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). • map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. • max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. • min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments. • oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. • ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. • pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed. • range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). • reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. • reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument). • repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). • round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0). • setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123. • slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. • str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. • tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). • type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: >>> import types >>> if type(x) == types.StringType: print "It’s a string" unichr(i) Return the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. . • unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . • vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 • xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). • zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. ''' op='C:\PyHelp\\randinfo.txt' file_exists = os.path.isfile(op) if not file_exists: x = open(op,"w") x.write(rand_facts)
flexible
{ "blob_id": "a2c93fd632a637d47f05e0a4fda851b465d03a31", "index": 4674, "step-1": "<mask token>\n", "step-2": "<mask token>\nif not os.path.exists(directory):\n os.makedirs(directory)\n<mask token>\nif not file_exists:\n x = open(op, 'w')\n x.write(rand_facts)\n", "step-3": "<mask token>\ndirectory = 'C:\\\\PyHelp'\nif not os.path.exists(directory):\n os.makedirs(directory)\nrand_facts = (\n '• Exception is used as a base class for all exceptions. It\\'s strongly recommended (but not yet required) that user exceptions are derived from this class too.\\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use \"catch all\" try-except statements.\\n• ImportError(StandardError) is raised when Python fails to import a module.\\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter\\'s environment (that is, they\\'re usually not caused by bugs in the program).\\n• IOError(EnvironmentError) is used to flag I/O-related errors.\\n• OSError(EnvironmentError) is used to flag errors by the os module.\\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\\n• NameError(StandardError) is raised when Python fails to find a global or local name.\\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn\\'t contain a given index or key.\\n• IndexError(LookupError) is raised by sequence objects when the given index doesn\\'t exist.\\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn\\'t exist.\\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn\\'t been implemented yet, or methods that should be overridden.\\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\\n\"eval_code2: NULL globals\" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that\\'s just me.\\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\\n• NoneType The type of None.\\n• TypeType The type of type objects (such as returned by type()). \\n• IntType The type of integers (e.g. 1).\\n• LongType The type of long integers (e.g. 1L).\\n• FloatType The type of floating point numbers (e.g. 1.0).\\n• ComplexType The type of complex numbers (e.g. 1.0j).\\n• StringType The type of character strings (e.g. ’Spam’). \\n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \\n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \\n• ListType The type of lists (e.g. [0, 1, 2, 3]). \\n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \\n• DictionaryType An alternate name for DictType. \\n• FunctionType The type of user-defined functions and lambdas. \\n• LambdaType An alternate name for FunctionType. \\n• CodeType The type for code objects such as returned by compile(). \\n• ClassType type of user-defined classes. \\n• InstanceType The type of instances of user-defined classes. \\n• MethodType The type of methods of user-defined class instances. \\n• UnboundMethod Type An alternate name for MethodType. \\n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \\n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \\n• ModuleType The type of modules. \\n• FileType The type of open file objects such as sys.stdout. \\n• XRangeType The type of range objects returned by xrange(). \\n• SliceType The type of objects returned by slice().\\n• EllipsisType The type of Ellipsis. \\n• TracebackType The type of traceback objects such as found in sys.exc traceback. \\n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \\n• BufferType The type of buffer objects created by the buffer() function.\\n• string.capitalize()Return a copy of the string with only its first character capitalized. \\n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \\n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \\n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \\n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \\n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \\n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \\n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \\n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \\n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \\n• string.isdigit()Return true if there are only digit characters, false otherwise.\\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \\n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \\n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \\n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \\n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \\n• string.lower() Return a copy of the string converted to lowercase. \\n• string.lstrip() Return a copy of the string with leading whitespace removed.\\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \\n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \\n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \\n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \\n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \\n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \\n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \\n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \\n• string.upper() Return a copy of the string converted to uppercase.\\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \\n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \\n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \\n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \\n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \\n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\\x00’) if they occurred in the input. \\n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \\n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \\n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \\n• file.tell() Return the file’s current position, like stdio’s ftell(). \\n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \\n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \\n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \\n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \\n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \\n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \\n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \\n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \\n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \\n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \\n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \\n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \\n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \\n• dir([object]) Without arguments, return the list of names in the current local symbol table. \\n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \\n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \\n>>> x = 1 \\n>>> print eval(’x+1’) \\n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \\n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \\n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \\n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \\n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \\n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \\n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \\n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \\n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \\n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \\n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \\n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \\n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \\n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \\n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \\n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \\n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \\n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \\n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \\n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \\n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \\n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \\n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \\n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \\n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \\n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \\n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \\n>>> import types \\n>>> if type(x) == types.StringType: print \"It’s a string\" unichr(i) \\nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \\n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \\n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \\n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \\n\\n'\n )\nop = 'C:\\\\PyHelp\\\\randinfo.txt'\nfile_exists = os.path.isfile(op)\nif not file_exists:\n x = open(op, 'w')\n x.write(rand_facts)\n", "step-4": "import os\nimport sqlite3\nimport datetime\ndirectory = 'C:\\\\PyHelp'\nif not os.path.exists(directory):\n os.makedirs(directory)\nrand_facts = (\n '• Exception is used as a base class for all exceptions. It\\'s strongly recommended (but not yet required) that user exceptions are derived from this class too.\\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use \"catch all\" try-except statements.\\n• ImportError(StandardError) is raised when Python fails to import a module.\\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter\\'s environment (that is, they\\'re usually not caused by bugs in the program).\\n• IOError(EnvironmentError) is used to flag I/O-related errors.\\n• OSError(EnvironmentError) is used to flag errors by the os module.\\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\\n• NameError(StandardError) is raised when Python fails to find a global or local name.\\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn\\'t contain a given index or key.\\n• IndexError(LookupError) is raised by sequence objects when the given index doesn\\'t exist.\\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn\\'t exist.\\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn\\'t been implemented yet, or methods that should be overridden.\\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\\n\"eval_code2: NULL globals\" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that\\'s just me.\\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\\n• NoneType The type of None.\\n• TypeType The type of type objects (such as returned by type()). \\n• IntType The type of integers (e.g. 1).\\n• LongType The type of long integers (e.g. 1L).\\n• FloatType The type of floating point numbers (e.g. 1.0).\\n• ComplexType The type of complex numbers (e.g. 1.0j).\\n• StringType The type of character strings (e.g. ’Spam’). \\n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \\n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \\n• ListType The type of lists (e.g. [0, 1, 2, 3]). \\n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \\n• DictionaryType An alternate name for DictType. \\n• FunctionType The type of user-defined functions and lambdas. \\n• LambdaType An alternate name for FunctionType. \\n• CodeType The type for code objects such as returned by compile(). \\n• ClassType type of user-defined classes. \\n• InstanceType The type of instances of user-defined classes. \\n• MethodType The type of methods of user-defined class instances. \\n• UnboundMethod Type An alternate name for MethodType. \\n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \\n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \\n• ModuleType The type of modules. \\n• FileType The type of open file objects such as sys.stdout. \\n• XRangeType The type of range objects returned by xrange(). \\n• SliceType The type of objects returned by slice().\\n• EllipsisType The type of Ellipsis. \\n• TracebackType The type of traceback objects such as found in sys.exc traceback. \\n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \\n• BufferType The type of buffer objects created by the buffer() function.\\n• string.capitalize()Return a copy of the string with only its first character capitalized. \\n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \\n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \\n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \\n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \\n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \\n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \\n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \\n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \\n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \\n• string.isdigit()Return true if there are only digit characters, false otherwise.\\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \\n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \\n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \\n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \\n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \\n• string.lower() Return a copy of the string converted to lowercase. \\n• string.lstrip() Return a copy of the string with leading whitespace removed.\\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \\n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \\n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \\n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \\n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \\n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \\n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \\n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \\n• string.upper() Return a copy of the string converted to uppercase.\\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \\n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \\n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \\n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \\n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \\n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\\x00’) if they occurred in the input. \\n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \\n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \\n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \\n• file.tell() Return the file’s current position, like stdio’s ftell(). \\n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \\n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \\n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \\n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \\n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \\n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \\n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \\n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \\n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \\n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \\n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \\n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \\n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \\n• dir([object]) Without arguments, return the list of names in the current local symbol table. \\n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \\n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \\n>>> x = 1 \\n>>> print eval(’x+1’) \\n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \\n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \\n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \\n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \\n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \\n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \\n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \\n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \\n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \\n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \\n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \\n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \\n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \\n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \\n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \\n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \\n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \\n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \\n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \\n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \\n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \\n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \\n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \\n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \\n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \\n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \\n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \\n>>> import types \\n>>> if type(x) == types.StringType: print \"It’s a string\" unichr(i) \\nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \\n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \\n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \\n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \\n\\n'\n )\nop = 'C:\\\\PyHelp\\\\randinfo.txt'\nfile_exists = os.path.isfile(op)\nif not file_exists:\n x = open(op, 'w')\n x.write(rand_facts)\n", "step-5": "import os\r\nimport sqlite3\r\nimport datetime\r\ndirectory = 'C:\\PyHelp'\r\n\r\nif not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\nrand_facts = '''• Exception is used as a base class for all exceptions. It's strongly recommended (but not yet required) that user exceptions are derived from this class too.\r\n• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.\r\n• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).\r\n• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use \"catch all\" try-except statements.\r\n• ImportError(StandardError) is raised when Python fails to import a module.\r\n• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter's environment (that is, they're usually not caused by bugs in the program).\r\n• IOError(EnvironmentError) is used to flag I/O-related errors.\r\n• OSError(EnvironmentError) is used to flag errors by the os module.\r\n• WindowsError(OSError) is used to flag Windows-specific errors from the os module.\r\n• NameError(StandardError) is raised when Python fails to find a global or local name.\r\n• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.\r\n• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.\r\n• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.\r\n• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\r\n• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.\r\n• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.\r\n• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).\r\n• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn't contain a given index or key.\r\n• IndexError(LookupError) is raised by sequence objects when the given index doesn't exist.\r\n• KeyError(LookupError) is raised by dictionary objects when the given key doesn't exist.\r\n• ArithmeticError(StandardError) is used as a base class for math-related exceptions.\r\n• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).\r\n• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.\r\n• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.\r\n• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.\r\n• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.\r\n• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.\r\n• NotImplementedError(RuntimeError) can be used to flag functions that hasn't been implemented yet, or methods that should be overridden.\r\n• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like\r\n\"eval_code2: NULL globals\" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that's just me.\r\n• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.\r\n• NoneType The type of None.\r\n• TypeType The type of type objects (such as returned by type()). \r\n• IntType The type of integers (e.g. 1).\r\n• LongType The type of long integers (e.g. 1L).\r\n• FloatType The type of floating point numbers (e.g. 1.0).\r\n• ComplexType The type of complex numbers (e.g. 1.0j).\r\n• StringType The type of character strings (e.g. ’Spam’). \r\n• UnicodeType The type of Unicode character strings (e.g. u’Spam’). \r\n• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)). \r\n• ListType The type of lists (e.g. [0, 1, 2, 3]). \r\n• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}). \r\n• DictionaryType An alternate name for DictType. \r\n• FunctionType The type of user-defined functions and lambdas. \r\n• LambdaType An alternate name for FunctionType. \r\n• CodeType The type for code objects such as returned by compile(). \r\n• ClassType type of user-defined classes. \r\n• InstanceType The type of instances of user-defined classes. \r\n• MethodType The type of methods of user-defined class instances. \r\n• UnboundMethod Type An alternate name for MethodType. \r\n• BuiltinFunction Type The type of built-in functions like len() or sys.exit(). \r\n• BuiltinMethod TypeAn alternate name for BuiltinFunction. \r\n• ModuleType The type of modules. \r\n• FileType The type of open file objects such as sys.stdout. \r\n• XRangeType The type of range objects returned by xrange(). \r\n• SliceType The type of objects returned by slice().\r\n• EllipsisType The type of Ellipsis. \r\n• TracebackType The type of traceback objects such as found in sys.exc traceback. \r\n• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object. \r\n• BufferType The type of buffer objects created by the buffer() function.\r\n• string.capitalize()Return a copy of the string with only its first character capitalized. \r\n• string.center(width) Return centered in a string of length width. Padding is done using spaces. \r\n• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. \r\n• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. . \r\n• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position. \r\n• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. \r\n• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found. \r\n• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found. \r\n• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise. \r\n• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise. \r\n• string.isdigit()Return true if there are only digit characters, false otherwise.\r\n• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. \r\n• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.\r\n• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise. \r\n• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise. \r\n• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method. \r\n• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). \r\n• string.lower() Return a copy of the string converted to lowercase. \r\n• string.lstrip() Return a copy of the string with leading whitespace removed.\r\n• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. \r\n• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.\r\n• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found. \r\n• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed. \r\n• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator. \r\n• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.\r\n• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. \r\n• string.strip() Return a copy of the string with leading and trailing whitespace removed.\r\n• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa. \r\n• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase. \r\n• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. \r\n• string.upper() Return a copy of the string converted to uppercase.\r\n• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed. \r\n• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects. \r\n• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented. \r\n• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method! \r\n• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible. \r\n• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\\0’) if they occurred in the input. \r\n• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently. \r\n• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) . \r\n• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’). \r\n• file.tell() Return the file’s current position, like stdio’s ftell(). \r\n• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation). \r\n• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called. \r\n• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object. \r\n• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects. \r\n• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects. \r\n• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.\r\n• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned. \r\n• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list. \r\n• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument). \r\n• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.\r\n• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range. \r\n• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. \r\n• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed). \r\n• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number. \r\n• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar. \r\n• dir([object]) Without arguments, return the list of names in the current local symbol table. \r\n• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b). \r\n• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example: \r\n>>> x = 1 \r\n>>> print eval(’x+1’) \r\n2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile(). \r\n• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None. \r\n• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed. \r\n• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.\r\n• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised. \r\n• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called). \r\n• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.) \r\n• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0). \r\n• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception. \r\n• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.) \r\n• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.\r\n• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to \r\n• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9 \r\n• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected). \r\n• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised. \r\n• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised. \r\n• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary). \r\n• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].\r\n• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter. \r\n• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int(). \r\n• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list. \r\n• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments. \r\n• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.\r\n• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.\r\n• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters. \r\n• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.\r\n• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised). \r\n• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. \r\n• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).\r\n• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval(). \r\n• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).\r\n• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.\r\n• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’. \r\n• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string. \r\n• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3). \r\n• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance: \r\n>>> import types \r\n>>> if type(x) == types.StringType: print \"It’s a string\" unichr(i) \r\nReturn the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .\r\n• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. . \r\n• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11 \r\n• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break). \r\n• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples. \r\n\r\n'''\r\n\r\nop='C:\\PyHelp\\\\randinfo.txt'\r\nfile_exists = os.path.isfile(op) \r\n \r\nif not file_exists:\r\n \r\n x = open(op,\"w\")\r\n x.write(rand_facts)\r\n\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from src.config import Config mock = { "entities": { "foo": [ "bar", "foobar" ] }, "synonimous": { "fizz": [ "fizzfuzz", "fuzz"] }, "templates": [ { "text": "{synonimous.fizz} and {entities.foo}", "intention": "fizzfoo" } ] } def test_should_config_start_correctly(): c = Config(mock) assert c._entities == mock['entities'] assert c._synonimous == mock['synonimous'] assert c.templates == mock['templates'] assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']
normal
{ "blob_id": "987f8ce668f2002b731822fa5f3de143a80aaafe", "index": 9807, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n", "step-3": "<mask token>\nmock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [\n 'fizzfuzz', 'fuzz']}, 'templates': [{'text':\n '{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n", "step-4": "from src.config import Config\nmock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [\n 'fizzfuzz', 'fuzz']}, 'templates': [{'text':\n '{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n", "step-5": "from src.config import Config\n\nmock = {\n \"entities\": {\n \"foo\": [ \"bar\", \"foobar\" ]\n },\n \"synonimous\": {\n \"fizz\": [ \"fizzfuzz\", \"fuzz\"]\n },\n \"templates\": [\n {\n \"text\": \"{synonimous.fizz} and {entities.foo}\",\n \"intention\": \"fizzfoo\"\n }\n ]\n}\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# day one question 1 solution # find product of two numbers in input.txt list that sum to 2020 # pull everything out of input file nums = [] with open('input.txt', 'r') as file: for line in file: nums.append(int(line)) target = 0 product = 0 # for each number in the input, figure out what it's complement to 2020 would be for ini in nums: target = 2020 - ini # then iterate through and check if its complement exists for chk in nums: # if it does, compute the product # semi-hacky since it assumes there'll only be one pair if chk == target: product = ini * chk print(product)
normal
{ "blob_id": "38504dae7b010c2df8c16b752c2179b6b3561c0e", "index": 7770, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('input.txt', 'r') as file:\n for line in file:\n nums.append(int(line))\n<mask token>\nfor ini in nums:\n target = 2020 - ini\n for chk in nums:\n if chk == target:\n product = ini * chk\nprint(product)\n", "step-3": "nums = []\nwith open('input.txt', 'r') as file:\n for line in file:\n nums.append(int(line))\ntarget = 0\nproduct = 0\nfor ini in nums:\n target = 2020 - ini\n for chk in nums:\n if chk == target:\n product = ini * chk\nprint(product)\n", "step-4": "# day one question 1 solution\n# find product of two numbers in input.txt list that sum to 2020\n\n# pull everything out of input file\nnums = []\nwith open('input.txt', 'r') as file:\n for line in file:\n nums.append(int(line))\n\ntarget = 0\nproduct = 0\n\n# for each number in the input, figure out what it's complement to 2020 would be\nfor ini in nums:\n\n target = 2020 - ini\n\n # then iterate through and check if its complement exists\n for chk in nums:\n\n # if it does, compute the product\n # semi-hacky since it assumes there'll only be one pair\n if chk == target:\n product = ini * chk\n\nprint(product)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import tensorflow as tf import keras import numpy as np def house_model(y_new): xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float) # Your Code Here# ys = np.array([0.50, 0.100, 1.50, 2.50, 3.50, 4.50, 5.50], dtype=float) # Your Code Here# model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) # Your Code Here# model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(xs,ys, epochs=100) return model.predict(y_new)[0] prediction = house_model([7.0]) print(prediction)
normal
{ "blob_id": "0b3f16ee9b287c6c77acde674abec9deb4053c83", "index": 946, "step-1": "<mask token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\n<mask token>\nprint(prediction)\n", "step-3": "<mask token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\nprediction = house_model([7.0])\nprint(prediction)\n", "step-4": "import tensorflow as tf\nimport keras\nimport numpy as np\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\nprediction = house_model([7.0])\nprint(prediction)\n", "step-5": "import tensorflow as tf\nimport keras\nimport numpy as np\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float) # Your Code Here#\n ys = np.array([0.50, 0.100, 1.50, 2.50, 3.50, 4.50, 5.50], dtype=float) # Your Code Here#\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) # Your Code Here#\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs,ys, epochs=100)\n return model.predict(y_new)[0]\n\nprediction = house_model([7.0])\nprint(prediction)\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> api.add_resource(Store, '/store/<string:name>') api.add_resource(Item, '/item/<string:name>') api.add_resource(ItemList, '/items') api.add_resource(StoreList, '/stores') api.add_resource(UserRegister, '/register') if __name__ == '__main__': from db import db db.init_app(app) app.run(debug=True) <|reserved_special_token_1|> <|reserved_special_token_0|> app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.secret_key = 'key123' api = Api(app) jwt = JWT(app, authenticate, identity) api.add_resource(Store, '/store/<string:name>') api.add_resource(Item, '/item/<string:name>') api.add_resource(ItemList, '/items') api.add_resource(StoreList, '/stores') api.add_resource(UserRegister, '/register') if __name__ == '__main__': from db import db db.init_app(app) app.run(debug=True) <|reserved_special_token_1|> import os from flask import Flask from flask_restful import Api from flask_jwt import JWT, timedelta from security import authenticate, identity from resources.user import UserRegister from resources.item import Item, ItemList from resources.store import Store, StoreList app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.secret_key = 'key123' api = Api(app) jwt = JWT(app, authenticate, identity) api.add_resource(Store, '/store/<string:name>') api.add_resource(Item, '/item/<string:name>') api.add_resource(ItemList, '/items') api.add_resource(StoreList, '/stores') api.add_resource(UserRegister, '/register') if __name__ == '__main__': from db import db db.init_app(app) app.run(debug=True) <|reserved_special_token_1|> # create item based on name using post method, get specific item or list of items using get method, update item using put and delete item using del method. import os from flask import Flask from flask_restful import Api from flask_jwt import JWT, timedelta from security import authenticate, identity from resources.user import UserRegister from resources.item import Item,ItemList from resources.store import Store, StoreList app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # turn off flask SQLAlchemy modification. app.secret_key = 'key123' api = Api(app) jwt = JWT(app, authenticate, identity) api.add_resource(Store,'/store/<string:name>') api.add_resource(Item,'/item/<string:name>') # http://localhost:5000/student/Rolf api.add_resource(ItemList,'/items') api.add_resource(StoreList,'/stores') api.add_resource(UserRegister, '/register') if __name__ == '__main__': from db import db db.init_app(app) app.run(debug=True)
flexible
{ "blob_id": "7525691ece4fe66bb175e470db3ac78f701e3730", "index": 199, "step-1": "<mask token>\n", "step-2": "<mask token>\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'key123'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-4": "import os\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT, timedelta\nfrom security import authenticate, identity\nfrom resources.user import UserRegister\nfrom resources.item import Item, ItemList\nfrom resources.store import Store, StoreList\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'key123'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-5": "# create item based on name using post method, get specific item or list of items using get method, update item using put and delete item using del method.\nimport os\n\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT, timedelta\n\nfrom security import authenticate, identity\nfrom resources.user import UserRegister\nfrom resources.item import Item,ItemList\nfrom resources.store import Store, StoreList\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # turn off flask SQLAlchemy modification.\napp.secret_key = 'key123'\napi = Api(app)\n\njwt = JWT(app, authenticate, identity)\n\napi.add_resource(Store,'/store/<string:name>')\napi.add_resource(Item,'/item/<string:name>') # http://localhost:5000/student/Rolf\napi.add_resource(ItemList,'/items')\napi.add_resource(StoreList,'/stores')\napi.add_resource(UserRegister, '/register')\n\n\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class MendeleyViewsTestCase(OsfTestCase): def setUp(self): super(MendeleyViewsTestCase, self).setUp() self.account = MendeleyAccountFactory() self.user = AuthUserFactory(external_accounts=[self.account]) self.account.display_name = self.user.fullname self.account.save() self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account) self.project = ProjectFactory(creator=self.user) self.node_addon = MendeleyNodeSettingsFactory(owner=self.project) self.node_addon.set_auth(external_account=self.account, user=self.user) self.provider = MendeleyCitationsProvider() self.node = MockNode() self.node.addon = self.node_addon self.id_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_id') self.secret_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_secret') self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.id_patcher.start() self.secret_patcher.start() def tearDown(self): self.id_patcher.stop() self.secret_patcher.stop() @mock.patch('website.addons.mendeley.model.Mendeley.client', new_callable=mock.PropertyMock) def test_check_mendeley_credentials(self, mock_client): mock_client.side_effect = HTTPError(403) assert_false(self.provider.check_credentials(self.node_addon)) mock_client.side_effect = HTTPError(402) with assert_raises(HTTPError): self.provider.check_credentials(self.node_addon) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_authorizer(self, mock_credentials): mock_credentials.return_value = True res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=self.user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_true(result['userHasAuth']) assert_true(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_non_authorizer(self, mock_credentials): mock_credentials.return_value = True non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=non_authorizing_user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_false(result['userHasAuth']) assert_false(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials' ) def test_set_auth(self, mock_credentials): mock_credentials.return_value = True res = self.app.put_json(self.project.api_url_for( 'mendeley_add_user_auth'), {'external_account_id': self.account ._id}, auth=self.user.auth) assert_equal(res.status_code, 200) assert_true(res.json['result']['userHasAuth']) assert_equal(self.node_addon.user_settings, self.user_addon) assert_equal(self.node_addon.external_account, self.account) def test_remove_user_auth(self): self.node_addon.set_auth(self.account, self.user) res = self.app.delete_json(self.project.api_url_for( 'mendeley_remove_user_auth'), {'external_account_id': self. account._id}, auth=self.user.auth) assert_equal(res.status_code, 200) self.node_addon.reload() assert_is_none(self.node_addon.user_settings) assert_is_none(self.node_addon.external_account) <|reserved_special_token_0|> @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_not_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') user = AuthUserFactory() user.add_addon('mendeley') self.project.add_contributor(user) self.project.save() res = self.app.put_json(self.project.api_url_for( 'mendeley_set_config'), {'external_account_id': self.account. _id, 'external_list_id': 'list'}, auth=user.auth) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None) expected = {'result': serializer.serialized_node_settings} assert_equal(res.json, expected) <|reserved_special_token_0|> <|reserved_special_token_0|> @httpretty.activate def test_mendeley_citation_list_root(self): httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list'), auth=self.user.auth) root = res.json['contents'][0] assert_equal(root['kind'], 'folder') assert_equal(root['id'], 'ROOT') assert_equal(root['parent_list_id'], '__') <|reserved_special_token_0|> @httpretty.activate def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self): non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) self.node_addon.mendeley_list_id = ( 'e843da05-8818-47c2-8c37-41eebfc4fe3f') self.node_addon.save() httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list', mendeley_list_id='ROOT'), auth= non_authorizing_user.auth, expect_errors=True) assert_equal(res.status_code, 403) <|reserved_special_token_1|> <|reserved_special_token_0|> class MendeleyViewsTestCase(OsfTestCase): def setUp(self): super(MendeleyViewsTestCase, self).setUp() self.account = MendeleyAccountFactory() self.user = AuthUserFactory(external_accounts=[self.account]) self.account.display_name = self.user.fullname self.account.save() self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account) self.project = ProjectFactory(creator=self.user) self.node_addon = MendeleyNodeSettingsFactory(owner=self.project) self.node_addon.set_auth(external_account=self.account, user=self.user) self.provider = MendeleyCitationsProvider() self.node = MockNode() self.node.addon = self.node_addon self.id_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_id') self.secret_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_secret') self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.id_patcher.start() self.secret_patcher.start() def tearDown(self): self.id_patcher.stop() self.secret_patcher.stop() @mock.patch('website.addons.mendeley.model.Mendeley.client', new_callable=mock.PropertyMock) def test_check_mendeley_credentials(self, mock_client): mock_client.side_effect = HTTPError(403) assert_false(self.provider.check_credentials(self.node_addon)) mock_client.side_effect = HTTPError(402) with assert_raises(HTTPError): self.provider.check_credentials(self.node_addon) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_authorizer(self, mock_credentials): mock_credentials.return_value = True res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=self.user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_true(result['userHasAuth']) assert_true(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_non_authorizer(self, mock_credentials): mock_credentials.return_value = True non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=non_authorizing_user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_false(result['userHasAuth']) assert_false(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials' ) def test_set_auth(self, mock_credentials): mock_credentials.return_value = True res = self.app.put_json(self.project.api_url_for( 'mendeley_add_user_auth'), {'external_account_id': self.account ._id}, auth=self.user.auth) assert_equal(res.status_code, 200) assert_true(res.json['result']['userHasAuth']) assert_equal(self.node_addon.user_settings, self.user_addon) assert_equal(self.node_addon.external_account, self.account) def test_remove_user_auth(self): self.node_addon.set_auth(self.account, self.user) res = self.app.delete_json(self.project.api_url_for( 'mendeley_remove_user_auth'), {'external_account_id': self. account._id}, auth=self.user.auth) assert_equal(res.status_code, 200) self.node_addon.reload() assert_is_none(self.node_addon.user_settings) assert_is_none(self.node_addon.external_account) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') self.node_addon.associated_user_settings = [] self.node_addon.save() res = self.app.put_json(self.project.api_url_for( 'mendeley_set_config'), {'external_account_id': self.account. _id, 'external_list_id': 'list'}, auth=self.user.auth) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=self.user_addon) expected = {'result': serializer.serialized_node_settings} assert_equal(res.json, expected) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_not_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') user = AuthUserFactory() user.add_addon('mendeley') self.project.add_contributor(user) self.project.save() res = self.app.put_json(self.project.api_url_for( 'mendeley_set_config'), {'external_account_id': self.account. _id, 'external_list_id': 'list'}, auth=user.auth) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None) expected = {'result': serializer.serialized_node_settings} assert_equal(res.json, expected) <|reserved_special_token_0|> def test_widget_view_incomplete(self): assert_false(self.node_addon.complete) assert_equal(self.node_addon.mendeley_list_id, None) url = self.project.api_url_for('mendeley_widget') res = self.app.get(url, auth=self.user.auth).json assert_false(res['complete']) assert_is_none(res['list_id']) @httpretty.activate def test_mendeley_citation_list_root(self): httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list'), auth=self.user.auth) root = res.json['contents'][0] assert_equal(root['kind'], 'folder') assert_equal(root['id'], 'ROOT') assert_equal(root['parent_list_id'], '__') @httpretty.activate def test_mendeley_citation_list_non_root(self): httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self. user.auth) children = res.json['contents'] assert_equal(len(children), 7) assert_equal(children[0]['kind'], 'folder') assert_equal(children[1]['kind'], 'file') assert_true(children[1].get('csl') is not None) @httpretty.activate def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self): non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) self.node_addon.mendeley_list_id = ( 'e843da05-8818-47c2-8c37-41eebfc4fe3f') self.node_addon.save() httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list', mendeley_list_id='ROOT'), auth= non_authorizing_user.auth, expect_errors=True) assert_equal(res.status_code, 403) <|reserved_special_token_1|> <|reserved_special_token_0|> class MockFolder(object): <|reserved_special_token_0|> class MendeleyViewsTestCase(OsfTestCase): def setUp(self): super(MendeleyViewsTestCase, self).setUp() self.account = MendeleyAccountFactory() self.user = AuthUserFactory(external_accounts=[self.account]) self.account.display_name = self.user.fullname self.account.save() self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account) self.project = ProjectFactory(creator=self.user) self.node_addon = MendeleyNodeSettingsFactory(owner=self.project) self.node_addon.set_auth(external_account=self.account, user=self.user) self.provider = MendeleyCitationsProvider() self.node = MockNode() self.node.addon = self.node_addon self.id_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_id') self.secret_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_secret') self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.id_patcher.start() self.secret_patcher.start() def tearDown(self): self.id_patcher.stop() self.secret_patcher.stop() @mock.patch('website.addons.mendeley.model.Mendeley.client', new_callable=mock.PropertyMock) def test_check_mendeley_credentials(self, mock_client): mock_client.side_effect = HTTPError(403) assert_false(self.provider.check_credentials(self.node_addon)) mock_client.side_effect = HTTPError(402) with assert_raises(HTTPError): self.provider.check_credentials(self.node_addon) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_authorizer(self, mock_credentials): mock_credentials.return_value = True res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=self.user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_true(result['userHasAuth']) assert_true(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_non_authorizer(self, mock_credentials): mock_credentials.return_value = True non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=non_authorizing_user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_false(result['userHasAuth']) assert_false(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials' ) def test_set_auth(self, mock_credentials): mock_credentials.return_value = True res = self.app.put_json(self.project.api_url_for( 'mendeley_add_user_auth'), {'external_account_id': self.account ._id}, auth=self.user.auth) assert_equal(res.status_code, 200) assert_true(res.json['result']['userHasAuth']) assert_equal(self.node_addon.user_settings, self.user_addon) assert_equal(self.node_addon.external_account, self.account) def test_remove_user_auth(self): self.node_addon.set_auth(self.account, self.user) res = self.app.delete_json(self.project.api_url_for( 'mendeley_remove_user_auth'), {'external_account_id': self. account._id}, auth=self.user.auth) assert_equal(res.status_code, 200) self.node_addon.reload() assert_is_none(self.node_addon.user_settings) assert_is_none(self.node_addon.external_account) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') self.node_addon.associated_user_settings = [] self.node_addon.save() res = self.app.put_json(self.project.api_url_for( 'mendeley_set_config'), {'external_account_id': self.account. _id, 'external_list_id': 'list'}, auth=self.user.auth) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=self.user_addon) expected = {'result': serializer.serialized_node_settings} assert_equal(res.json, expected) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_not_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') user = AuthUserFactory() user.add_addon('mendeley') self.project.add_contributor(user) self.project.save() res = self.app.put_json(self.project.api_url_for( 'mendeley_set_config'), {'external_account_id': self.account. _id, 'external_list_id': 'list'}, auth=user.auth) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None) expected = {'result': serializer.serialized_node_settings} assert_equal(res.json, expected) def test_mendeley_widget_view_complete(self): assert_false(self.node_addon.complete) assert_equal(self.node_addon.mendeley_list_id, None) self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user =self.user)) url = self.project.api_url_for('mendeley_widget') res = self.app.get(url, auth=self.user.auth).json assert_true(res['complete']) assert_equal(res['list_id'], 'ROOT-ID') def test_widget_view_incomplete(self): assert_false(self.node_addon.complete) assert_equal(self.node_addon.mendeley_list_id, None) url = self.project.api_url_for('mendeley_widget') res = self.app.get(url, auth=self.user.auth).json assert_false(res['complete']) assert_is_none(res['list_id']) @httpretty.activate def test_mendeley_citation_list_root(self): httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list'), auth=self.user.auth) root = res.json['contents'][0] assert_equal(root['kind'], 'folder') assert_equal(root['id'], 'ROOT') assert_equal(root['parent_list_id'], '__') @httpretty.activate def test_mendeley_citation_list_non_root(self): httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self. user.auth) children = res.json['contents'] assert_equal(len(children), 7) assert_equal(children[0]['kind'], 'folder') assert_equal(children[1]['kind'], 'file') assert_true(children[1].get('csl') is not None) @httpretty.activate def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self): non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) self.node_addon.mendeley_list_id = ( 'e843da05-8818-47c2-8c37-41eebfc4fe3f') self.node_addon.save() httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list', mendeley_list_id='ROOT'), auth= non_authorizing_user.auth, expect_errors=True) assert_equal(res.status_code, 403) <|reserved_special_token_1|> <|reserved_special_token_0|> class MockNode(object): <|reserved_special_token_0|> @property def is_deleted(self): return False <|reserved_special_token_0|> def get_addon(self, name): if name == 'mendeley': return self.addon return None class MockFolder(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) class MendeleyViewsTestCase(OsfTestCase): def setUp(self): super(MendeleyViewsTestCase, self).setUp() self.account = MendeleyAccountFactory() self.user = AuthUserFactory(external_accounts=[self.account]) self.account.display_name = self.user.fullname self.account.save() self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account) self.project = ProjectFactory(creator=self.user) self.node_addon = MendeleyNodeSettingsFactory(owner=self.project) self.node_addon.set_auth(external_account=self.account, user=self.user) self.provider = MendeleyCitationsProvider() self.node = MockNode() self.node.addon = self.node_addon self.id_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_id') self.secret_patcher = mock.patch( 'website.addons.mendeley.model.Mendeley.client_secret') self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.id_patcher.start() self.secret_patcher.start() def tearDown(self): self.id_patcher.stop() self.secret_patcher.stop() @mock.patch('website.addons.mendeley.model.Mendeley.client', new_callable=mock.PropertyMock) def test_check_mendeley_credentials(self, mock_client): mock_client.side_effect = HTTPError(403) assert_false(self.provider.check_credentials(self.node_addon)) mock_client.side_effect = HTTPError(402) with assert_raises(HTTPError): self.provider.check_credentials(self.node_addon) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_authorizer(self, mock_credentials): mock_credentials.return_value = True res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=self.user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_true(result['userHasAuth']) assert_true(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials' ) def test_serialize_settings_non_authorizer(self, mock_credentials): mock_credentials.return_value = True non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) res = self.app.get(self.project.api_url_for('mendeley_get_config'), auth=non_authorizing_user.auth) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_false(result['userHasAuth']) assert_false(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch( 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials' ) def test_set_auth(self, mock_credentials): mock_credentials.return_value = True res = self.app.put_json(self.project.api_url_for( 'mendeley_add_user_auth'), {'external_account_id': self.account ._id}, auth=self.user.auth) assert_equal(res.status_code, 200) assert_true(res.json['result']['userHasAuth']) assert_equal(self.node_addon.user_settings, self.user_addon) assert_equal(self.node_addon.external_account, self.account) def test_remove_user_auth(self): self.node_addon.set_auth(self.account, self.user) res = self.app.delete_json(self.project.api_url_for( 'mendeley_remove_user_auth'), {'external_account_id': self. account._id}, auth=self.user.auth) assert_equal(res.status_code, 200) self.node_addon.reload() assert_is_none(self.node_addon.user_settings) assert_is_none(self.node_addon.external_account) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') self.node_addon.associated_user_settings = [] self.node_addon.save() res = self.app.put_json(self.project.api_url_for( 'mendeley_set_config'), {'external_account_id': self.account. _id, 'external_list_id': 'list'}, auth=self.user.auth) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=self.user_addon) expected = {'result': serializer.serialized_node_settings} assert_equal(res.json, expected) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_not_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') user = AuthUserFactory() user.add_addon('mendeley') self.project.add_contributor(user) self.project.save() res = self.app.put_json(self.project.api_url_for( 'mendeley_set_config'), {'external_account_id': self.account. _id, 'external_list_id': 'list'}, auth=user.auth) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None) expected = {'result': serializer.serialized_node_settings} assert_equal(res.json, expected) def test_mendeley_widget_view_complete(self): assert_false(self.node_addon.complete) assert_equal(self.node_addon.mendeley_list_id, None) self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user =self.user)) url = self.project.api_url_for('mendeley_widget') res = self.app.get(url, auth=self.user.auth).json assert_true(res['complete']) assert_equal(res['list_id'], 'ROOT-ID') def test_widget_view_incomplete(self): assert_false(self.node_addon.complete) assert_equal(self.node_addon.mendeley_list_id, None) url = self.project.api_url_for('mendeley_widget') res = self.app.get(url, auth=self.user.auth).json assert_false(res['complete']) assert_is_none(res['list_id']) @httpretty.activate def test_mendeley_citation_list_root(self): httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list'), auth=self.user.auth) root = res.json['contents'][0] assert_equal(root['kind'], 'folder') assert_equal(root['id'], 'ROOT') assert_equal(root['parent_list_id'], '__') @httpretty.activate def test_mendeley_citation_list_non_root(self): httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self. user.auth) children = res.json['contents'] assert_equal(len(children), 7) assert_equal(children[0]['kind'], 'folder') assert_equal(children[1]['kind'], 'file') assert_true(children[1].get('csl') is not None) @httpretty.activate def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self): non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) self.node_addon.mendeley_list_id = ( 'e843da05-8818-47c2-8c37-41eebfc4fe3f') self.node_addon.save() httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type= 'application/json') httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type= 'application/json') res = self.app.get(self.project.api_url_for( 'mendeley_citation_list', mendeley_list_id='ROOT'), auth= non_authorizing_user.auth, expect_errors=True) assert_equal(res.status_code, 403) <|reserved_special_token_1|> # -*- coding: utf-8 -*- from nose.tools import * # noqa import mock import httpretty from tests.base import OsfTestCase from tests.factories import AuthUserFactory, ProjectFactory import urlparse from framework.auth import Auth from website.addons.mendeley.tests.factories import ( MendeleyAccountFactory, MendeleyUserSettingsFactory, MendeleyNodeSettingsFactory ) from framework.exceptions import HTTPError from website.addons.mendeley.provider import MendeleyCitationsProvider from website.addons.mendeley.serializer import MendeleySerializer from utils import mock_responses API_URL = 'https://api.mendeley.com' class MockNode(object): addon = None @property def is_deleted(self): return False @property def is_public(self): return True def get_addon(self, name): if name == 'mendeley': return self.addon return None class MockFolder(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) class MendeleyViewsTestCase(OsfTestCase): def setUp(self): super(MendeleyViewsTestCase, self).setUp() self.account = MendeleyAccountFactory() self.user = AuthUserFactory(external_accounts=[self.account]) self.account.display_name = self.user.fullname self.account.save() self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account) self.project = ProjectFactory(creator=self.user) self.node_addon = MendeleyNodeSettingsFactory(owner=self.project) self.node_addon.set_auth(external_account=self.account, user=self.user) self.provider = MendeleyCitationsProvider() #self.user_addon.grant_oauth_access(self.node_addon, self.account, metadata={'lists': 'list'}) self.node = MockNode() self.node.addon = self.node_addon self.id_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_id') self.secret_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_secret') self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf') self.id_patcher.start() self.secret_patcher.start() def tearDown(self): self.id_patcher.stop() self.secret_patcher.stop() @mock.patch('website.addons.mendeley.model.Mendeley.client', new_callable=mock.PropertyMock) def test_check_mendeley_credentials(self, mock_client): mock_client.side_effect = HTTPError(403) assert_false(self.provider.check_credentials(self.node_addon)) mock_client.side_effect = HTTPError(402) with assert_raises(HTTPError): self.provider.check_credentials(self.node_addon) @mock.patch('website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials') def test_serialize_settings_authorizer(self, mock_credentials): #"""dict: a serialized version of user-specific addon settings""" mock_credentials.return_value = True res = self.app.get( self.project.api_url_for('mendeley_get_config'), auth=self.user.auth, ) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_true(result['userHasAuth']) assert_true(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch('website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials') def test_serialize_settings_non_authorizer(self, mock_credentials): #"""dict: a serialized version of user-specific addon settings""" mock_credentials.return_value = True non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) res = self.app.get( self.project.api_url_for('mendeley_get_config'), auth=non_authorizing_user.auth, ) result = res.json['result'] assert_true(result['nodeHasAuth']) assert_false(result['userHasAuth']) assert_false(result['userIsOwner']) assert_true(result['validCredentials']) assert_equal(result['folder'], {'name': ''}) assert_equal(result['ownerName'], self.user.fullname) assert_true(result['urls']['auth']) assert_true(result['urls']['config']) assert_true(result['urls']['deauthorize']) assert_true(result['urls']['folders']) assert_true(result['urls']['importAuth']) assert_true(result['urls']['settings']) @mock.patch('website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials') def test_set_auth(self, mock_credentials): mock_credentials.return_value = True res = self.app.put_json( self.project.api_url_for('mendeley_add_user_auth'), { 'external_account_id': self.account._id, }, auth=self.user.auth, ) assert_equal( res.status_code, 200 ) assert_true(res.json['result']['userHasAuth']) assert_equal( self.node_addon.user_settings, self.user_addon ) assert_equal( self.node_addon.external_account, self.account ) def test_remove_user_auth(self): self.node_addon.set_auth(self.account, self.user) res = self.app.delete_json( self.project.api_url_for('mendeley_remove_user_auth'), { 'external_account_id': self.account._id, }, auth=self.user.auth, ) assert_equal( res.status_code, 200 ) self.node_addon.reload() assert_is_none(self.node_addon.user_settings) assert_is_none(self.node_addon.external_account) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') # Settings config updates node settings self.node_addon.associated_user_settings = [] self.node_addon.save() res = self.app.put_json( self.project.api_url_for('mendeley_set_config'), { 'external_account_id': self.account._id, 'external_list_id': 'list', }, auth=self.user.auth, ) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=self.user_addon) expected = { 'result': serializer.serialized_node_settings } assert_equal(res.json, expected) @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata') def test_set_config_not_owner(self, mock_metadata): mock_metadata.return_value = MockFolder(name='Fake Folder') user = AuthUserFactory() user.add_addon('mendeley') self.project.add_contributor(user) self.project.save() res = self.app.put_json( self.project.api_url_for('mendeley_set_config'), { 'external_account_id': self.account._id, 'external_list_id': 'list', }, auth=user.auth, ) self.node_addon.reload() assert_equal(self.user_addon, self.node_addon.user_settings) serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None) expected = { 'result': serializer.serialized_node_settings } assert_equal(res.json, expected) def test_mendeley_widget_view_complete(self): # JSON: everything a widget needs assert_false(self.node_addon.complete) assert_equal(self.node_addon.mendeley_list_id, None) self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user=self.user)) url = self.project.api_url_for('mendeley_widget') res = self.app.get(url, auth=self.user.auth).json assert_true(res['complete']) assert_equal(res['list_id'], 'ROOT-ID') def test_widget_view_incomplete(self): # JSON: tell the widget when it hasn't been configured assert_false(self.node_addon.complete) assert_equal(self.node_addon.mendeley_list_id, None) url = self.project.api_url_for('mendeley_widget') res = self.app.get(url, auth=self.user.auth).json assert_false(res['complete']) assert_is_none(res['list_id']) @httpretty.activate def test_mendeley_citation_list_root(self): httpretty.register_uri( httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type='application/json' ) res = self.app.get( self.project.api_url_for('mendeley_citation_list'), auth=self.user.auth ) root = res.json['contents'][0] assert_equal(root['kind'], 'folder') assert_equal(root['id'], 'ROOT') assert_equal(root['parent_list_id'], '__') @httpretty.activate def test_mendeley_citation_list_non_root(self): httpretty.register_uri( httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type='application/json' ) httpretty.register_uri( httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type='application/json' ) res = self.app.get( self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'), auth=self.user.auth ) children = res.json['contents'] assert_equal(len(children), 7) assert_equal(children[0]['kind'], 'folder') assert_equal(children[1]['kind'], 'file') assert_true(children[1].get('csl') is not None) @httpretty.activate def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self): non_authorizing_user = AuthUserFactory() self.project.add_contributor(non_authorizing_user, save=True) self.node_addon.mendeley_list_id = 'e843da05-8818-47c2-8c37-41eebfc4fe3f' self.node_addon.save() httpretty.register_uri( httpretty.GET, urlparse.urljoin(API_URL, 'folders'), body=mock_responses['folders'], content_type='application/json' ) httpretty.register_uri( httpretty.GET, urlparse.urljoin(API_URL, 'documents'), body=mock_responses['documents'], content_type='application/json' ) res = self.app.get( self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'), auth=non_authorizing_user.auth, expect_errors=True ) assert_equal(res.status_code, 403)
flexible
{ "blob_id": "f69351474fb3eb48eeb65eaf1aa46d2f4a390471", "index": 1887, "step-1": "<mask token>\n\n\nclass MendeleyViewsTestCase(OsfTestCase):\n\n def setUp(self):\n super(MendeleyViewsTestCase, self).setUp()\n self.account = MendeleyAccountFactory()\n self.user = AuthUserFactory(external_accounts=[self.account])\n self.account.display_name = self.user.fullname\n self.account.save()\n self.user_addon = MendeleyUserSettingsFactory(owner=self.user,\n external_account=self.account)\n self.project = ProjectFactory(creator=self.user)\n self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)\n self.node_addon.set_auth(external_account=self.account, user=self.user)\n self.provider = MendeleyCitationsProvider()\n self.node = MockNode()\n self.node.addon = self.node_addon\n self.id_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_id')\n self.secret_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_secret')\n self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.id_patcher.start()\n self.secret_patcher.start()\n\n def tearDown(self):\n self.id_patcher.stop()\n self.secret_patcher.stop()\n\n @mock.patch('website.addons.mendeley.model.Mendeley.client',\n new_callable=mock.PropertyMock)\n def test_check_mendeley_credentials(self, mock_client):\n mock_client.side_effect = HTTPError(403)\n assert_false(self.provider.check_credentials(self.node_addon))\n mock_client.side_effect = HTTPError(402)\n with assert_raises(HTTPError):\n self.provider.check_credentials(self.node_addon)\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=self.user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_true(result['userHasAuth'])\n assert_true(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_non_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=non_authorizing_user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_false(result['userHasAuth'])\n assert_false(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'\n )\n def test_set_auth(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_add_user_auth'), {'external_account_id': self.account\n ._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n assert_true(res.json['result']['userHasAuth'])\n assert_equal(self.node_addon.user_settings, self.user_addon)\n assert_equal(self.node_addon.external_account, self.account)\n\n def test_remove_user_auth(self):\n self.node_addon.set_auth(self.account, self.user)\n res = self.app.delete_json(self.project.api_url_for(\n 'mendeley_remove_user_auth'), {'external_account_id': self.\n account._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n self.node_addon.reload()\n assert_is_none(self.node_addon.user_settings)\n assert_is_none(self.node_addon.external_account)\n <mask token>\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_not_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n user = AuthUserFactory()\n user.add_addon('mendeley')\n self.project.add_contributor(user)\n self.project.save()\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_set_config'), {'external_account_id': self.account.\n _id, 'external_list_id': 'list'}, auth=user.auth)\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon,\n user_settings=None)\n expected = {'result': serializer.serialized_node_settings}\n assert_equal(res.json, expected)\n <mask token>\n <mask token>\n\n @httpretty.activate\n def test_mendeley_citation_list_root(self):\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list'), auth=self.user.auth)\n root = res.json['contents'][0]\n assert_equal(root['kind'], 'folder')\n assert_equal(root['id'], 'ROOT')\n assert_equal(root['parent_list_id'], '__')\n <mask token>\n\n @httpretty.activate\n def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n self.node_addon.mendeley_list_id = (\n 'e843da05-8818-47c2-8c37-41eebfc4fe3f')\n self.node_addon.save()\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'documents'), body=mock_responses['documents'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=\n non_authorizing_user.auth, expect_errors=True)\n assert_equal(res.status_code, 403)\n", "step-2": "<mask token>\n\n\nclass MendeleyViewsTestCase(OsfTestCase):\n\n def setUp(self):\n super(MendeleyViewsTestCase, self).setUp()\n self.account = MendeleyAccountFactory()\n self.user = AuthUserFactory(external_accounts=[self.account])\n self.account.display_name = self.user.fullname\n self.account.save()\n self.user_addon = MendeleyUserSettingsFactory(owner=self.user,\n external_account=self.account)\n self.project = ProjectFactory(creator=self.user)\n self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)\n self.node_addon.set_auth(external_account=self.account, user=self.user)\n self.provider = MendeleyCitationsProvider()\n self.node = MockNode()\n self.node.addon = self.node_addon\n self.id_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_id')\n self.secret_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_secret')\n self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.id_patcher.start()\n self.secret_patcher.start()\n\n def tearDown(self):\n self.id_patcher.stop()\n self.secret_patcher.stop()\n\n @mock.patch('website.addons.mendeley.model.Mendeley.client',\n new_callable=mock.PropertyMock)\n def test_check_mendeley_credentials(self, mock_client):\n mock_client.side_effect = HTTPError(403)\n assert_false(self.provider.check_credentials(self.node_addon))\n mock_client.side_effect = HTTPError(402)\n with assert_raises(HTTPError):\n self.provider.check_credentials(self.node_addon)\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=self.user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_true(result['userHasAuth'])\n assert_true(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_non_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=non_authorizing_user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_false(result['userHasAuth'])\n assert_false(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'\n )\n def test_set_auth(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_add_user_auth'), {'external_account_id': self.account\n ._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n assert_true(res.json['result']['userHasAuth'])\n assert_equal(self.node_addon.user_settings, self.user_addon)\n assert_equal(self.node_addon.external_account, self.account)\n\n def test_remove_user_auth(self):\n self.node_addon.set_auth(self.account, self.user)\n res = self.app.delete_json(self.project.api_url_for(\n 'mendeley_remove_user_auth'), {'external_account_id': self.\n account._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n self.node_addon.reload()\n assert_is_none(self.node_addon.user_settings)\n assert_is_none(self.node_addon.external_account)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n self.node_addon.associated_user_settings = []\n self.node_addon.save()\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_set_config'), {'external_account_id': self.account.\n _id, 'external_list_id': 'list'}, auth=self.user.auth)\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon,\n user_settings=self.user_addon)\n expected = {'result': serializer.serialized_node_settings}\n assert_equal(res.json, expected)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_not_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n user = AuthUserFactory()\n user.add_addon('mendeley')\n self.project.add_contributor(user)\n self.project.save()\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_set_config'), {'external_account_id': self.account.\n _id, 'external_list_id': 'list'}, auth=user.auth)\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon,\n user_settings=None)\n expected = {'result': serializer.serialized_node_settings}\n assert_equal(res.json, expected)\n <mask token>\n\n def test_widget_view_incomplete(self):\n assert_false(self.node_addon.complete)\n assert_equal(self.node_addon.mendeley_list_id, None)\n url = self.project.api_url_for('mendeley_widget')\n res = self.app.get(url, auth=self.user.auth).json\n assert_false(res['complete'])\n assert_is_none(res['list_id'])\n\n @httpretty.activate\n def test_mendeley_citation_list_root(self):\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list'), auth=self.user.auth)\n root = res.json['contents'][0]\n assert_equal(root['kind'], 'folder')\n assert_equal(root['id'], 'ROOT')\n assert_equal(root['parent_list_id'], '__')\n\n @httpretty.activate\n def test_mendeley_citation_list_non_root(self):\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'documents'), body=mock_responses['documents'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self.\n user.auth)\n children = res.json['contents']\n assert_equal(len(children), 7)\n assert_equal(children[0]['kind'], 'folder')\n assert_equal(children[1]['kind'], 'file')\n assert_true(children[1].get('csl') is not None)\n\n @httpretty.activate\n def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n self.node_addon.mendeley_list_id = (\n 'e843da05-8818-47c2-8c37-41eebfc4fe3f')\n self.node_addon.save()\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'documents'), body=mock_responses['documents'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=\n non_authorizing_user.auth, expect_errors=True)\n assert_equal(res.status_code, 403)\n", "step-3": "<mask token>\n\n\nclass MockFolder(object):\n <mask token>\n\n\nclass MendeleyViewsTestCase(OsfTestCase):\n\n def setUp(self):\n super(MendeleyViewsTestCase, self).setUp()\n self.account = MendeleyAccountFactory()\n self.user = AuthUserFactory(external_accounts=[self.account])\n self.account.display_name = self.user.fullname\n self.account.save()\n self.user_addon = MendeleyUserSettingsFactory(owner=self.user,\n external_account=self.account)\n self.project = ProjectFactory(creator=self.user)\n self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)\n self.node_addon.set_auth(external_account=self.account, user=self.user)\n self.provider = MendeleyCitationsProvider()\n self.node = MockNode()\n self.node.addon = self.node_addon\n self.id_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_id')\n self.secret_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_secret')\n self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.id_patcher.start()\n self.secret_patcher.start()\n\n def tearDown(self):\n self.id_patcher.stop()\n self.secret_patcher.stop()\n\n @mock.patch('website.addons.mendeley.model.Mendeley.client',\n new_callable=mock.PropertyMock)\n def test_check_mendeley_credentials(self, mock_client):\n mock_client.side_effect = HTTPError(403)\n assert_false(self.provider.check_credentials(self.node_addon))\n mock_client.side_effect = HTTPError(402)\n with assert_raises(HTTPError):\n self.provider.check_credentials(self.node_addon)\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=self.user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_true(result['userHasAuth'])\n assert_true(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_non_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=non_authorizing_user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_false(result['userHasAuth'])\n assert_false(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'\n )\n def test_set_auth(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_add_user_auth'), {'external_account_id': self.account\n ._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n assert_true(res.json['result']['userHasAuth'])\n assert_equal(self.node_addon.user_settings, self.user_addon)\n assert_equal(self.node_addon.external_account, self.account)\n\n def test_remove_user_auth(self):\n self.node_addon.set_auth(self.account, self.user)\n res = self.app.delete_json(self.project.api_url_for(\n 'mendeley_remove_user_auth'), {'external_account_id': self.\n account._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n self.node_addon.reload()\n assert_is_none(self.node_addon.user_settings)\n assert_is_none(self.node_addon.external_account)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n self.node_addon.associated_user_settings = []\n self.node_addon.save()\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_set_config'), {'external_account_id': self.account.\n _id, 'external_list_id': 'list'}, auth=self.user.auth)\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon,\n user_settings=self.user_addon)\n expected = {'result': serializer.serialized_node_settings}\n assert_equal(res.json, expected)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_not_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n user = AuthUserFactory()\n user.add_addon('mendeley')\n self.project.add_contributor(user)\n self.project.save()\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_set_config'), {'external_account_id': self.account.\n _id, 'external_list_id': 'list'}, auth=user.auth)\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon,\n user_settings=None)\n expected = {'result': serializer.serialized_node_settings}\n assert_equal(res.json, expected)\n\n def test_mendeley_widget_view_complete(self):\n assert_false(self.node_addon.complete)\n assert_equal(self.node_addon.mendeley_list_id, None)\n self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user\n =self.user))\n url = self.project.api_url_for('mendeley_widget')\n res = self.app.get(url, auth=self.user.auth).json\n assert_true(res['complete'])\n assert_equal(res['list_id'], 'ROOT-ID')\n\n def test_widget_view_incomplete(self):\n assert_false(self.node_addon.complete)\n assert_equal(self.node_addon.mendeley_list_id, None)\n url = self.project.api_url_for('mendeley_widget')\n res = self.app.get(url, auth=self.user.auth).json\n assert_false(res['complete'])\n assert_is_none(res['list_id'])\n\n @httpretty.activate\n def test_mendeley_citation_list_root(self):\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list'), auth=self.user.auth)\n root = res.json['contents'][0]\n assert_equal(root['kind'], 'folder')\n assert_equal(root['id'], 'ROOT')\n assert_equal(root['parent_list_id'], '__')\n\n @httpretty.activate\n def test_mendeley_citation_list_non_root(self):\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'documents'), body=mock_responses['documents'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self.\n user.auth)\n children = res.json['contents']\n assert_equal(len(children), 7)\n assert_equal(children[0]['kind'], 'folder')\n assert_equal(children[1]['kind'], 'file')\n assert_true(children[1].get('csl') is not None)\n\n @httpretty.activate\n def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n self.node_addon.mendeley_list_id = (\n 'e843da05-8818-47c2-8c37-41eebfc4fe3f')\n self.node_addon.save()\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'documents'), body=mock_responses['documents'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=\n non_authorizing_user.auth, expect_errors=True)\n assert_equal(res.status_code, 403)\n", "step-4": "<mask token>\n\n\nclass MockNode(object):\n <mask token>\n\n @property\n def is_deleted(self):\n return False\n <mask token>\n\n def get_addon(self, name):\n if name == 'mendeley':\n return self.addon\n return None\n\n\nclass MockFolder(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n\nclass MendeleyViewsTestCase(OsfTestCase):\n\n def setUp(self):\n super(MendeleyViewsTestCase, self).setUp()\n self.account = MendeleyAccountFactory()\n self.user = AuthUserFactory(external_accounts=[self.account])\n self.account.display_name = self.user.fullname\n self.account.save()\n self.user_addon = MendeleyUserSettingsFactory(owner=self.user,\n external_account=self.account)\n self.project = ProjectFactory(creator=self.user)\n self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)\n self.node_addon.set_auth(external_account=self.account, user=self.user)\n self.provider = MendeleyCitationsProvider()\n self.node = MockNode()\n self.node.addon = self.node_addon\n self.id_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_id')\n self.secret_patcher = mock.patch(\n 'website.addons.mendeley.model.Mendeley.client_secret')\n self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.id_patcher.start()\n self.secret_patcher.start()\n\n def tearDown(self):\n self.id_patcher.stop()\n self.secret_patcher.stop()\n\n @mock.patch('website.addons.mendeley.model.Mendeley.client',\n new_callable=mock.PropertyMock)\n def test_check_mendeley_credentials(self, mock_client):\n mock_client.side_effect = HTTPError(403)\n assert_false(self.provider.check_credentials(self.node_addon))\n mock_client.side_effect = HTTPError(402)\n with assert_raises(HTTPError):\n self.provider.check_credentials(self.node_addon)\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=self.user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_true(result['userHasAuth'])\n assert_true(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'\n )\n def test_serialize_settings_non_authorizer(self, mock_credentials):\n mock_credentials.return_value = True\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n res = self.app.get(self.project.api_url_for('mendeley_get_config'),\n auth=non_authorizing_user.auth)\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_false(result['userHasAuth'])\n assert_false(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch(\n 'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'\n )\n def test_set_auth(self, mock_credentials):\n mock_credentials.return_value = True\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_add_user_auth'), {'external_account_id': self.account\n ._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n assert_true(res.json['result']['userHasAuth'])\n assert_equal(self.node_addon.user_settings, self.user_addon)\n assert_equal(self.node_addon.external_account, self.account)\n\n def test_remove_user_auth(self):\n self.node_addon.set_auth(self.account, self.user)\n res = self.app.delete_json(self.project.api_url_for(\n 'mendeley_remove_user_auth'), {'external_account_id': self.\n account._id}, auth=self.user.auth)\n assert_equal(res.status_code, 200)\n self.node_addon.reload()\n assert_is_none(self.node_addon.user_settings)\n assert_is_none(self.node_addon.external_account)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n self.node_addon.associated_user_settings = []\n self.node_addon.save()\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_set_config'), {'external_account_id': self.account.\n _id, 'external_list_id': 'list'}, auth=self.user.auth)\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon,\n user_settings=self.user_addon)\n expected = {'result': serializer.serialized_node_settings}\n assert_equal(res.json, expected)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_not_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n user = AuthUserFactory()\n user.add_addon('mendeley')\n self.project.add_contributor(user)\n self.project.save()\n res = self.app.put_json(self.project.api_url_for(\n 'mendeley_set_config'), {'external_account_id': self.account.\n _id, 'external_list_id': 'list'}, auth=user.auth)\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon,\n user_settings=None)\n expected = {'result': serializer.serialized_node_settings}\n assert_equal(res.json, expected)\n\n def test_mendeley_widget_view_complete(self):\n assert_false(self.node_addon.complete)\n assert_equal(self.node_addon.mendeley_list_id, None)\n self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user\n =self.user))\n url = self.project.api_url_for('mendeley_widget')\n res = self.app.get(url, auth=self.user.auth).json\n assert_true(res['complete'])\n assert_equal(res['list_id'], 'ROOT-ID')\n\n def test_widget_view_incomplete(self):\n assert_false(self.node_addon.complete)\n assert_equal(self.node_addon.mendeley_list_id, None)\n url = self.project.api_url_for('mendeley_widget')\n res = self.app.get(url, auth=self.user.auth).json\n assert_false(res['complete'])\n assert_is_none(res['list_id'])\n\n @httpretty.activate\n def test_mendeley_citation_list_root(self):\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list'), auth=self.user.auth)\n root = res.json['contents'][0]\n assert_equal(root['kind'], 'folder')\n assert_equal(root['id'], 'ROOT')\n assert_equal(root['parent_list_id'], '__')\n\n @httpretty.activate\n def test_mendeley_citation_list_non_root(self):\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'documents'), body=mock_responses['documents'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self.\n user.auth)\n children = res.json['contents']\n assert_equal(len(children), 7)\n assert_equal(children[0]['kind'], 'folder')\n assert_equal(children[1]['kind'], 'file')\n assert_true(children[1].get('csl') is not None)\n\n @httpretty.activate\n def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n self.node_addon.mendeley_list_id = (\n 'e843da05-8818-47c2-8c37-41eebfc4fe3f')\n self.node_addon.save()\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'folders'), body=mock_responses['folders'], content_type=\n 'application/json')\n httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,\n 'documents'), body=mock_responses['documents'], content_type=\n 'application/json')\n res = self.app.get(self.project.api_url_for(\n 'mendeley_citation_list', mendeley_list_id='ROOT'), auth=\n non_authorizing_user.auth, expect_errors=True)\n assert_equal(res.status_code, 403)\n", "step-5": "# -*- coding: utf-8 -*-\nfrom nose.tools import * # noqa\n\nimport mock\nimport httpretty\n\nfrom tests.base import OsfTestCase\nfrom tests.factories import AuthUserFactory, ProjectFactory\n\nimport urlparse\n\nfrom framework.auth import Auth\n\nfrom website.addons.mendeley.tests.factories import (\n MendeleyAccountFactory,\n MendeleyUserSettingsFactory,\n MendeleyNodeSettingsFactory\n)\n\nfrom framework.exceptions import HTTPError\nfrom website.addons.mendeley.provider import MendeleyCitationsProvider\nfrom website.addons.mendeley.serializer import MendeleySerializer\n\nfrom utils import mock_responses\n\nAPI_URL = 'https://api.mendeley.com'\n\nclass MockNode(object):\n\n addon = None\n\n @property\n def is_deleted(self):\n return False\n\n @property\n def is_public(self):\n return True\n\n def get_addon(self, name):\n if name == 'mendeley':\n return self.addon\n return None\n\nclass MockFolder(object):\n def __init__(self, **kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\nclass MendeleyViewsTestCase(OsfTestCase):\n\n def setUp(self):\n super(MendeleyViewsTestCase, self).setUp()\n self.account = MendeleyAccountFactory()\n self.user = AuthUserFactory(external_accounts=[self.account])\n self.account.display_name = self.user.fullname\n self.account.save()\n self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account)\n self.project = ProjectFactory(creator=self.user)\n self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)\n self.node_addon.set_auth(external_account=self.account, user=self.user)\n self.provider = MendeleyCitationsProvider()\n #self.user_addon.grant_oauth_access(self.node_addon, self.account, metadata={'lists': 'list'})\n self.node = MockNode()\n self.node.addon = self.node_addon\n self.id_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_id')\n self.secret_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_secret')\n self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')\n self.id_patcher.start()\n self.secret_patcher.start()\n\n def tearDown(self):\n self.id_patcher.stop()\n self.secret_patcher.stop()\n\n @mock.patch('website.addons.mendeley.model.Mendeley.client', new_callable=mock.PropertyMock)\n def test_check_mendeley_credentials(self, mock_client):\n mock_client.side_effect = HTTPError(403)\n assert_false(self.provider.check_credentials(self.node_addon))\n\n mock_client.side_effect = HTTPError(402)\n with assert_raises(HTTPError):\n self.provider.check_credentials(self.node_addon)\n\n @mock.patch('website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials')\n def test_serialize_settings_authorizer(self, mock_credentials):\n #\"\"\"dict: a serialized version of user-specific addon settings\"\"\"\n mock_credentials.return_value = True\n res = self.app.get(\n self.project.api_url_for('mendeley_get_config'),\n auth=self.user.auth,\n )\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_true(result['userHasAuth'])\n assert_true(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch('website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials')\n def test_serialize_settings_non_authorizer(self, mock_credentials):\n #\"\"\"dict: a serialized version of user-specific addon settings\"\"\"\n mock_credentials.return_value = True\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n res = self.app.get(\n self.project.api_url_for('mendeley_get_config'),\n auth=non_authorizing_user.auth,\n )\n result = res.json['result']\n assert_true(result['nodeHasAuth'])\n assert_false(result['userHasAuth'])\n assert_false(result['userIsOwner'])\n assert_true(result['validCredentials'])\n assert_equal(result['folder'], {'name': ''})\n assert_equal(result['ownerName'], self.user.fullname)\n assert_true(result['urls']['auth'])\n assert_true(result['urls']['config'])\n assert_true(result['urls']['deauthorize'])\n assert_true(result['urls']['folders'])\n assert_true(result['urls']['importAuth'])\n assert_true(result['urls']['settings'])\n\n @mock.patch('website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials')\n def test_set_auth(self, mock_credentials):\n\n mock_credentials.return_value = True\n res = self.app.put_json(\n self.project.api_url_for('mendeley_add_user_auth'),\n {\n 'external_account_id': self.account._id,\n },\n auth=self.user.auth,\n )\n\n assert_equal(\n res.status_code,\n 200\n )\n\n assert_true(res.json['result']['userHasAuth'])\n\n assert_equal(\n self.node_addon.user_settings,\n self.user_addon\n )\n assert_equal(\n self.node_addon.external_account,\n self.account\n )\n\n def test_remove_user_auth(self):\n self.node_addon.set_auth(self.account, self.user)\n\n res = self.app.delete_json(\n self.project.api_url_for('mendeley_remove_user_auth'),\n {\n 'external_account_id': self.account._id,\n },\n auth=self.user.auth,\n )\n\n assert_equal(\n res.status_code,\n 200\n )\n\n self.node_addon.reload()\n\n assert_is_none(self.node_addon.user_settings)\n assert_is_none(self.node_addon.external_account)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n # Settings config updates node settings\n self.node_addon.associated_user_settings = []\n self.node_addon.save()\n res = self.app.put_json(\n self.project.api_url_for('mendeley_set_config'),\n {\n 'external_account_id': self.account._id,\n 'external_list_id': 'list',\n },\n auth=self.user.auth,\n )\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=self.user_addon)\n expected = {\n 'result': serializer.serialized_node_settings\n }\n assert_equal(res.json, expected)\n\n @mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')\n def test_set_config_not_owner(self, mock_metadata):\n mock_metadata.return_value = MockFolder(name='Fake Folder')\n user = AuthUserFactory()\n user.add_addon('mendeley')\n self.project.add_contributor(user)\n self.project.save()\n res = self.app.put_json(\n self.project.api_url_for('mendeley_set_config'),\n {\n 'external_account_id': self.account._id,\n 'external_list_id': 'list',\n },\n auth=user.auth,\n )\n self.node_addon.reload()\n assert_equal(self.user_addon, self.node_addon.user_settings)\n serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None)\n expected = {\n 'result': serializer.serialized_node_settings\n }\n assert_equal(res.json, expected)\n\n def test_mendeley_widget_view_complete(self):\n # JSON: everything a widget needs\n assert_false(self.node_addon.complete)\n assert_equal(self.node_addon.mendeley_list_id, None)\n self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user=self.user))\n url = self.project.api_url_for('mendeley_widget')\n res = self.app.get(url, auth=self.user.auth).json\n\n assert_true(res['complete'])\n assert_equal(res['list_id'], 'ROOT-ID')\n\n def test_widget_view_incomplete(self):\n # JSON: tell the widget when it hasn't been configured\n assert_false(self.node_addon.complete)\n assert_equal(self.node_addon.mendeley_list_id, None)\n url = self.project.api_url_for('mendeley_widget')\n res = self.app.get(url, auth=self.user.auth).json\n\n assert_false(res['complete'])\n assert_is_none(res['list_id'])\n\n @httpretty.activate\n def test_mendeley_citation_list_root(self):\n\n httpretty.register_uri(\n httpretty.GET,\n urlparse.urljoin(API_URL, 'folders'),\n body=mock_responses['folders'],\n content_type='application/json'\n )\n\n res = self.app.get(\n self.project.api_url_for('mendeley_citation_list'),\n auth=self.user.auth\n )\n root = res.json['contents'][0]\n assert_equal(root['kind'], 'folder')\n assert_equal(root['id'], 'ROOT')\n assert_equal(root['parent_list_id'], '__')\n\n @httpretty.activate\n def test_mendeley_citation_list_non_root(self):\n\n httpretty.register_uri(\n httpretty.GET,\n urlparse.urljoin(API_URL, 'folders'),\n body=mock_responses['folders'],\n content_type='application/json'\n )\n\n httpretty.register_uri(\n httpretty.GET,\n urlparse.urljoin(API_URL, 'documents'),\n body=mock_responses['documents'],\n content_type='application/json'\n )\n\n res = self.app.get(\n self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),\n auth=self.user.auth\n )\n\n children = res.json['contents']\n assert_equal(len(children), 7)\n assert_equal(children[0]['kind'], 'folder')\n assert_equal(children[1]['kind'], 'file')\n assert_true(children[1].get('csl') is not None)\n\n @httpretty.activate\n def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):\n\n non_authorizing_user = AuthUserFactory()\n self.project.add_contributor(non_authorizing_user, save=True)\n\n self.node_addon.mendeley_list_id = 'e843da05-8818-47c2-8c37-41eebfc4fe3f'\n self.node_addon.save()\n\n httpretty.register_uri(\n httpretty.GET,\n urlparse.urljoin(API_URL, 'folders'),\n body=mock_responses['folders'],\n content_type='application/json'\n )\n\n httpretty.register_uri(\n httpretty.GET,\n urlparse.urljoin(API_URL, 'documents'),\n body=mock_responses['documents'],\n content_type='application/json'\n )\n\n res = self.app.get(\n self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),\n auth=non_authorizing_user.auth,\n expect_errors=True\n )\n assert_equal(res.status_code, 403)\n", "step-ids": [ 11, 14, 16, 20, 25 ] }
[ 11, 14, 16, 20, 25 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @itchat.msg_register(itchat.content.TEXT) def text_reply(msg): return msg.text <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @itchat.msg_register(itchat.content.TEXT) def text_reply(msg): return msg.text itchat.auto_login() itchat.run() <|reserved_special_token_1|> import itchat @itchat.msg_register(itchat.content.TEXT) def text_reply(msg): return msg.text itchat.auto_login() itchat.run() <|reserved_special_token_1|> # @Time : 2019/12/12 15:54 # @Author : Libuda # @FileName: 远程服务器文件监控.py # @Software: PyCharm import itchat @itchat.msg_register(itchat.content.TEXT) def text_reply(msg): return msg.text itchat.auto_login() itchat.run()
flexible
{ "blob_id": "2b87b8571664989e78790bd9df23eee9cbd44035", "index": 1363, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\nitchat.auto_login()\nitchat.run()\n", "step-4": "import itchat\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\nitchat.auto_login()\nitchat.run()\n", "step-5": "# @Time : 2019/12/12 15:54\n# @Author : Libuda\n# @FileName: 远程服务器文件监控.py\n# @Software: PyCharm\nimport itchat\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\nitchat.auto_login()\nitchat.run()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class TestShellBootstrap(object): <|reserved_special_token_0|> def tearDown(self): self.shell.restore() <|reserved_special_token_0|> def _test_bootstrap_stream_type(self, attr): assert_is_instance(getattr(sys, attr), ThreadLocalStream) <|reserved_special_token_0|> def test_bootstrap_print(self): assert_equal(print, pypsi_print) def test_restore_print(self): self.shell.restore() assert_equal(print, self.real_print) def test_restore_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_restore_stream_type, attr yield self._test_restore_stream_instance, attr <|reserved_special_token_0|> def _test_restore_stream_instance(self, attr): self.shell.restore() assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr)) <|reserved_special_token_1|> <|reserved_special_token_0|> class TestShellBootstrap(object): def setUp(self): self.real_stdout = sys.stdout self.real_stderr = sys.stderr self.real_stdin = sys.stdin self.real_print = print self.shell = PypsiTestShell() def tearDown(self): self.shell.restore() def test_bootstrap_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_bootstrap_stream_type, attr yield self._test_bootstrap_stream_instance, attr def _test_bootstrap_stream_type(self, attr): assert_is_instance(getattr(sys, attr), ThreadLocalStream) <|reserved_special_token_0|> def test_bootstrap_print(self): assert_equal(print, pypsi_print) def test_restore_print(self): self.shell.restore() assert_equal(print, self.real_print) def test_restore_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_restore_stream_type, attr yield self._test_restore_stream_instance, attr def _test_restore_stream_type(self, attr): self.shell.restore() assert_not_is_instance(getattr(sys, attr), ThreadLocalStream) def _test_restore_stream_instance(self, attr): self.shell.restore() assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr)) <|reserved_special_token_1|> <|reserved_special_token_0|> class PypsiTestShell(Shell): pass class TestShellBootstrap(object): def setUp(self): self.real_stdout = sys.stdout self.real_stderr = sys.stderr self.real_stdin = sys.stdin self.real_print = print self.shell = PypsiTestShell() def tearDown(self): self.shell.restore() def test_bootstrap_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_bootstrap_stream_type, attr yield self._test_bootstrap_stream_instance, attr def _test_bootstrap_stream_type(self, attr): assert_is_instance(getattr(sys, attr), ThreadLocalStream) def _test_bootstrap_stream_instance(self, attr): assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self, 'real_' + attr)) def test_bootstrap_print(self): assert_equal(print, pypsi_print) def test_restore_print(self): self.shell.restore() assert_equal(print, self.real_print) def test_restore_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_restore_stream_type, attr yield self._test_restore_stream_instance, attr def _test_restore_stream_type(self, attr): self.shell.restore() assert_not_is_instance(getattr(sys, attr), ThreadLocalStream) def _test_restore_stream_instance(self, attr): self.shell.restore() assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr)) <|reserved_special_token_1|> import sys from pypsi.pipes import ThreadLocalStream from pypsi.shell import Shell from pypsi.core import pypsi_print from nose.tools import * class PypsiTestShell(Shell): pass class TestShellBootstrap(object): def setUp(self): self.real_stdout = sys.stdout self.real_stderr = sys.stderr self.real_stdin = sys.stdin self.real_print = print self.shell = PypsiTestShell() def tearDown(self): self.shell.restore() def test_bootstrap_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_bootstrap_stream_type, attr yield self._test_bootstrap_stream_instance, attr def _test_bootstrap_stream_type(self, attr): assert_is_instance(getattr(sys, attr), ThreadLocalStream) def _test_bootstrap_stream_instance(self, attr): assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self, 'real_' + attr)) def test_bootstrap_print(self): assert_equal(print, pypsi_print) def test_restore_print(self): self.shell.restore() assert_equal(print, self.real_print) def test_restore_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_restore_stream_type, attr yield self._test_restore_stream_instance, attr def _test_restore_stream_type(self, attr): self.shell.restore() assert_not_is_instance(getattr(sys, attr), ThreadLocalStream) def _test_restore_stream_instance(self, attr): self.shell.restore() assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))
flexible
{ "blob_id": "1983340b3ce7ba8b631ba090871bea1ef7044943", "index": 9333, "step-1": "<mask token>\n\n\nclass TestShellBootstrap(object):\n <mask token>\n\n def tearDown(self):\n self.shell.restore()\n <mask token>\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n <mask token>\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n <mask token>\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-2": "<mask token>\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n <mask token>\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-3": "<mask token>\n\n\nclass PypsiTestShell(Shell):\n pass\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_bootstrap_stream_instance(self, attr):\n assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self,\n 'real_' + attr))\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-4": "import sys\nfrom pypsi.pipes import ThreadLocalStream\nfrom pypsi.shell import Shell\nfrom pypsi.core import pypsi_print\nfrom nose.tools import *\n\n\nclass PypsiTestShell(Shell):\n pass\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_bootstrap_stream_instance(self, attr):\n assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self,\n 'real_' + attr))\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-5": null, "step-ids": [ 7, 10, 12, 13 ] }
[ 7, 10, 12, 13 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> urlpatterns = [path('google/login', views.google_login), path( 'google/callback/', views.google_callback), path( 'accounts/google/login/finish/', views.GoogleLogin.as_view(), name= 'google_login_todjango')] <|reserved_special_token_1|> from django.contrib import admin from django.urls import path, include from accounts import views urlpatterns = [path('google/login', views.google_login), path( 'google/callback/', views.google_callback), path( 'accounts/google/login/finish/', views.GoogleLogin.as_view(), name= 'google_login_todjango')] <|reserved_special_token_1|> from django.contrib import admin from django.urls import path, include from accounts import views urlpatterns = [ path('google/login', views.google_login), path('google/callback/', views.google_callback), path('accounts/google/login/finish/', views.GoogleLogin.as_view(), name = 'google_login_todjango'), ]
flexible
{ "blob_id": "68319663aad13b562e56b8ee25f25c7b548417df", "index": 4739, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('google/login', views.google_login), path(\n 'google/callback/', views.google_callback), path(\n 'accounts/google/login/finish/', views.GoogleLogin.as_view(), name=\n 'google_login_todjango')]\n", "step-3": "from django.contrib import admin\nfrom django.urls import path, include\nfrom accounts import views\nurlpatterns = [path('google/login', views.google_login), path(\n 'google/callback/', views.google_callback), path(\n 'accounts/google/login/finish/', views.GoogleLogin.as_view(), name=\n 'google_login_todjango')]\n", "step-4": "from django.contrib import admin\nfrom django.urls import path, include\n\nfrom accounts import views\n\nurlpatterns = [\n path('google/login', views.google_login),\n path('google/callback/', views.google_callback),\n path('accounts/google/login/finish/', views.GoogleLogin.as_view(), name = 'google_login_todjango'),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python3 from flask import Flask, request from flask_restplus import Resource, Api, fields from pymongo import MongoClient from bson.objectid import ObjectId import requests, datetime, re #------------- CONFIG CONSTANTS -------------# DEBUG = True MAX_PAGE_LIMIT = 2 COLLECTION = 'indicators' DB_CONFIG = { 'dbuser': 'z5113243', 'dbpassword': 'badpassword01', 'mlab_inst': 'ds239071', 'dbname': 'cs9321_ass2' } #------------- API INITIALISATION -------------# db = None # initialised in main app = Flask(__name__) app.config.SWAGGER_UI_DOC_EXPANSION = 'list' api = Api( app, title='Assignment 2 - COMP9321 - Chris Joy (z5113243)', description='In this assignment, we\'re asked to develop ' \ 'a Flask-Restplus data service that allows a client to ' \ 'read and store some publicly available economic indicator ' \ 'data for countries around the world, and allow the consumers ' \ 'to access the data through a REST API.' ) indicator_model = api.model(COLLECTION, { 'indicator_id': fields.String(required=True, title='An Indicator ', description='http://api.worldbank.org/v2/indicators', example='NY.GDP.MKTP.CD'), }) parser = api.parser() parser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \ 'where k is between 1 and 100. Eg. top10, bottom40') #------------- HELPER FUNCTIONS -------------# def mlab_client(dbuser, dbpassword, mlab_inst, dbname): return MongoClient( f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}' )[dbname] def api_url(indicator, date='2012:2017', fmt='json', page=1): return 'http://api.worldbank.org/v2/countries/all/indicators/' \ f'{indicator}?date={date}&format={fmt}&page={page}' # Recursively build an array containing indicator data def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT): response = requests.get(api_url(indicator=indicator, page=page)).json() if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'): return 'Invalid indicator' if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']: return prevRes+response[1] return get_indicator_data( indicator=indicator, page=response[0]['page']+1, prevRes=prevRes+response[1], max_pages=max_pages, ) # Restructure indicator entry according to spec def format_collection_entry(indicator_data): return { 'country': indicator_data['country']['value'], 'date': indicator_data['date'], 'value': indicator_data['value'], } # Transform to top<k>/bottom<k> queries to array indexes def query_to_index(query, arr_size): try: match = re.search(r'^(bottom|top)\d+$', query).group() order = re.search(r'^(bottom|top)', match).group() length = int(re.search(r'\d+$', match).group()) if order == 'top': return slice(length) elif order == 'bottom': return slice(arr_size-length, arr_size) else: return slice(arr_size) except: return slice(arr_size) #------------- QUESTION ROUTES -------------# @api.route(f'/{COLLECTION}', endpoint=COLLECTION) class CollectionIndex(Resource): @api.doc(description='[Q1] Import a collection from the data service.') @api.response(200, 'Successfully retrieved collection.') @api.response(201, 'Successfully created collection.') @api.response(400, 'Unable to create / retrieve collection.') @api.expect(indicator_model) def post(self): body = request.json # Indicator hasn't been specified in body (400) if not body['indicator_id']: return { 'message': 'Please specify an indicator.' }, 400 # Retrieve indicator from database (200) existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']}) if existing_collection: return { 'location': f'/{COLLECTION}/{str(existing_collection["_id"])}', 'collection_id': str(existing_collection['_id']), 'creation_time': str(existing_collection['creation_time']), 'indicator': existing_collection['indicator'], }, 200 # From now onwards we need to obtain data from the Worldbank API indicator_data = get_indicator_data(body['indicator_id']) # Valid indicator hasn't been specified (400) if indicator_data == 'Invalid indicator': return { 'message': 'Please specify a valid indicator.' }, 400 # Create and retrieve indicator from Worldbank API (201) collection = { 'indicator': indicator_data[0]['indicator']['id'], 'indicator_value': indicator_data[0]['indicator']['value'], 'creation_time': datetime.datetime.utcnow(), 'entries': [format_collection_entry(entry) for entry in indicator_data], } created_collection = db[COLLECTION].insert_one(collection) return { 'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}', 'collection_id': str(created_collection.inserted_id), 'creation_time': str(collection['creation_time']), 'indicator': collection['indicator'], }, 201 @api.doc(description='[Q3] Retrieve the list of available collections.') @api.response(200, 'Successfully retreieved collections.') @api.response(400, 'Unable to retreive collections.') def get(self): try: collections = db[COLLECTION].find() except: return { 'message': 'Unable to retrieve collections.' }, 400 return [{ 'location': f'/{COLLECTION}/{str(doc["_id"])}', 'collection_id': str(doc['_id']), 'creation_time': str(doc['creation_time']), 'indicator': doc['indicator'], } for doc in collections], 200 @api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id') @api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.') class CollectionsById(Resource): @api.doc(description='[Q2] Deleting a collection with the data service.') @api.response(200, 'Successfully removed collection.') @api.response(404, 'Unable to find collection.') @api.response(400, 'Unable to remove collection.') def delete(self, collection_id): # Check if collection exists if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}): return { 'message': 'Unable to find collection.' }, 404 # Remove collection from db try: db[COLLECTION].delete_one({'_id': ObjectId(collection_id)}) except: return { 'message': 'Unable to remove collection.' }, 400 return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200 @api.doc(description='[Q4] Retrieve a collection.') @api.response(200, 'Successfully retreived collection.') @api.response(404, 'Unable to retreive collection.') def get(self, collection_id): try: collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)}) except: return { 'message': 'Unable to find collection' }, 404 return { 'collection_id': str(collection['_id']), 'indicator': collection['indicator'], 'indicator_value': collection['indicator_value'], 'creation_time': str(collection['creation_time']), 'entries': collection['entries'], }, 200 @api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate') @api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.') @api.param('year', 'Year ranging from 2012 to 2017.') @api.param('country', 'Country identifier (eg. Arab World)') class CollectionByCountryYear(Resource): @api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.') @api.response(200, 'Successfully retrieved economic indicator for given a country and year.') @api.response(400, 'Unable to retrieve indicator entry.') @api.response(404, 'Unable to find collection.') def get(self, collection_id, year, country): try: collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)}) except: return { 'message': 'Unable to find collection' }, 404 # Create a filtered list containing entries that match params filtered_entries = [ entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year ] if len(filtered_entries) == 0: return {'message': 'Unable to find specific indicator entry ' \ f'for country=\'{country}\' and year=\'{year}\'.'}, 400 return { 'collection_id': str(collection['_id']), 'indicator': collection['indicator'], **filtered_entries[0], }, 200 @api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom') @api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.') @api.param('year', 'Year ranging from 2012 to 2017.') class CollectionByTopBottom(Resource): @api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.') @api.response(200, 'Successfully retreived economic indicator values.') @api.response(404, 'Unable to find collection.') @api.expect(parser) def get(self, collection_id, year): query = request.args.get('q') try: collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)}) except: return { 'message': 'Unable to find collection' }, 404 filtered_entries = [ entry for entry in collection['entries'] if entry['date'] == year ] if not query: return { 'indicator': collection['indicator'], 'indicator_value': collection['indicator_value'], 'entries': filtered_entries, }, 200 return { 'indicator': collection['indicator'], 'indicator_value': collection['indicator_value'], 'entries': sorted( filtered_entries, key=lambda k: k['value'], reverse=True )[query_to_index(query, len(filtered_entries))], }, 200 if __name__ == '__main__': db = mlab_client( dbuser=DB_CONFIG['dbuser'], dbpassword=DB_CONFIG['dbpassword'], mlab_inst=DB_CONFIG['mlab_inst'], dbname=DB_CONFIG['dbname'] ) app.run(debug=DEBUG)
normal
{ "blob_id": "75958b48a3372b56e072a0caa468171ab6b99eb6", "index": 8917, "step-1": "<mask token>\n\n\n@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\n@api.param('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT\n ):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or len(response) <= 1 and response[0]['message'][0]['key'\n ] == 'Invalid value':\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0][\n 'pages']:\n return prevRes + response[1]\n return get_indicator_data(indicator=indicator, page=response[0]['page'] +\n 1, prevRes=prevRes + response[1], max_pages=max_pages)\n\n\n<mask token>\n\n\n@api.route(f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n if not body['indicator_id']:\n return {'message': 'Please specify an indicator.'}, 400\n existing_collection = db[COLLECTION].find_one({'indicator': body[\n 'indicator_id']})\n if existing_collection:\n return {'location':\n f\"/{COLLECTION}/{str(existing_collection['_id'])}\",\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator']}, 200\n indicator_data = get_indicator_data(body['indicator_id'])\n if indicator_data == 'Invalid indicator':\n return {'message': 'Please specify a valid indicator.'}, 400\n collection = {'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(), 'entries': [\n format_collection_entry(entry) for entry in indicator_data]}\n created_collection = db[COLLECTION].insert_one(collection)\n return {'location':\n f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']), 'indicator':\n collection['indicator']}, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return {'message': 'Unable to retrieve collections.'}, 400\n return [{'location': f\"/{COLLECTION}/{str(doc['_id'])}\",\n 'collection_id': str(doc['_id']), 'creation_time': str(doc[\n 'creation_time']), 'indicator': doc['indicator']} for doc in\n collections], 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\n@api.param('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef mlab_client(dbuser, dbpassword, mlab_inst, dbname):\n return MongoClient(\n f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'\n )[dbname]\n\n\ndef api_url(indicator, date='2012:2017', fmt='json', page=1):\n return (\n f'http://api.worldbank.org/v2/countries/all/indicators/{indicator}?date={date}&format={fmt}&page={page}'\n )\n\n\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT\n ):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or len(response) <= 1 and response[0]['message'][0]['key'\n ] == 'Invalid value':\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0][\n 'pages']:\n return prevRes + response[1]\n return get_indicator_data(indicator=indicator, page=response[0]['page'] +\n 1, prevRes=prevRes + response[1], max_pages=max_pages)\n\n\ndef format_collection_entry(indicator_data):\n return {'country': indicator_data['country']['value'], 'date':\n indicator_data['date'], 'value': indicator_data['value']}\n\n\ndef query_to_index(query, arr_size):\n try:\n match = re.search('^(bottom|top)\\\\d+$', query).group()\n order = re.search('^(bottom|top)', match).group()\n length = int(re.search('\\\\d+$', match).group())\n if order == 'top':\n return slice(length)\n elif order == 'bottom':\n return slice(arr_size - length, arr_size)\n else:\n return slice(arr_size)\n except:\n return slice(arr_size)\n\n\n@api.route(f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n if not body['indicator_id']:\n return {'message': 'Please specify an indicator.'}, 400\n existing_collection = db[COLLECTION].find_one({'indicator': body[\n 'indicator_id']})\n if existing_collection:\n return {'location':\n f\"/{COLLECTION}/{str(existing_collection['_id'])}\",\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator']}, 200\n indicator_data = get_indicator_data(body['indicator_id'])\n if indicator_data == 'Invalid indicator':\n return {'message': 'Please specify a valid indicator.'}, 400\n collection = {'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(), 'entries': [\n format_collection_entry(entry) for entry in indicator_data]}\n created_collection = db[COLLECTION].insert_one(collection)\n return {'location':\n f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']), 'indicator':\n collection['indicator']}, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return {'message': 'Unable to retrieve collections.'}, 400\n return [{'location': f\"/{COLLECTION}/{str(doc['_id'])}\",\n 'collection_id': str(doc['_id']), 'creation_time': str(doc[\n 'creation_time']), 'indicator': doc['indicator']} for doc in\n collections], 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\n@api.param('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\n<mask token>\n", "step-4": "<mask token>\nparser.add_argument('q', help=\n 'Query param. Expected format: top<k> / bottom<k>, where k is between 1 and 100. Eg. top10, bottom40'\n )\n\n\ndef mlab_client(dbuser, dbpassword, mlab_inst, dbname):\n return MongoClient(\n f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'\n )[dbname]\n\n\ndef api_url(indicator, date='2012:2017', fmt='json', page=1):\n return (\n f'http://api.worldbank.org/v2/countries/all/indicators/{indicator}?date={date}&format={fmt}&page={page}'\n )\n\n\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT\n ):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or len(response) <= 1 and response[0]['message'][0]['key'\n ] == 'Invalid value':\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0][\n 'pages']:\n return prevRes + response[1]\n return get_indicator_data(indicator=indicator, page=response[0]['page'] +\n 1, prevRes=prevRes + response[1], max_pages=max_pages)\n\n\ndef format_collection_entry(indicator_data):\n return {'country': indicator_data['country']['value'], 'date':\n indicator_data['date'], 'value': indicator_data['value']}\n\n\ndef query_to_index(query, arr_size):\n try:\n match = re.search('^(bottom|top)\\\\d+$', query).group()\n order = re.search('^(bottom|top)', match).group()\n length = int(re.search('\\\\d+$', match).group())\n if order == 'top':\n return slice(length)\n elif order == 'bottom':\n return slice(arr_size - length, arr_size)\n else:\n return slice(arr_size)\n except:\n return slice(arr_size)\n\n\n@api.route(f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n if not body['indicator_id']:\n return {'message': 'Please specify an indicator.'}, 400\n existing_collection = db[COLLECTION].find_one({'indicator': body[\n 'indicator_id']})\n if existing_collection:\n return {'location':\n f\"/{COLLECTION}/{str(existing_collection['_id'])}\",\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator']}, 200\n indicator_data = get_indicator_data(body['indicator_id'])\n if indicator_data == 'Invalid indicator':\n return {'message': 'Please specify a valid indicator.'}, 400\n collection = {'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(), 'entries': [\n format_collection_entry(entry) for entry in indicator_data]}\n created_collection = db[COLLECTION].insert_one(collection)\n return {'location':\n f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']), 'indicator':\n collection['indicator']}, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return {'message': 'Unable to retrieve collections.'}, 400\n return [{'location': f\"/{COLLECTION}/{str(doc['_id'])}\",\n 'collection_id': str(doc['_id']), 'creation_time': str(doc[\n 'creation_time']), 'indicator': doc['indicator']} for doc in\n collections], 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\n@api.param('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\nif __name__ == '__main__':\n db = mlab_client(dbuser=DB_CONFIG['dbuser'], dbpassword=DB_CONFIG[\n 'dbpassword'], mlab_inst=DB_CONFIG['mlab_inst'], dbname=DB_CONFIG[\n 'dbname'])\n app.run(debug=DEBUG)\n", "step-5": "#!/usr/bin/env python3\nfrom flask import Flask, request\nfrom flask_restplus import Resource, Api, fields\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport requests, datetime, re\n\n#------------- CONFIG CONSTANTS -------------#\n\nDEBUG = True\nMAX_PAGE_LIMIT = 2\nCOLLECTION = 'indicators'\nDB_CONFIG = {\n 'dbuser': 'z5113243',\n 'dbpassword': 'badpassword01',\n 'mlab_inst': 'ds239071',\n 'dbname': 'cs9321_ass2' \n}\n\n#------------- API INITIALISATION -------------#\n\ndb = None # initialised in main\napp = Flask(__name__)\napp.config.SWAGGER_UI_DOC_EXPANSION = 'list'\napi = Api(\n app,\n title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',\n description='In this assignment, we\\'re asked to develop ' \\\n 'a Flask-Restplus data service that allows a client to ' \\\n 'read and store some publicly available economic indicator ' \\\n 'data for countries around the world, and allow the consumers ' \\\n 'to access the data through a REST API.'\n)\nindicator_model = api.model(COLLECTION, {\n 'indicator_id': fields.String(required=True,\n title='An Indicator ',\n description='http://api.worldbank.org/v2/indicators',\n example='NY.GDP.MKTP.CD'),\n})\nparser = api.parser()\nparser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \\\n 'where k is between 1 and 100. Eg. top10, bottom40')\n\n#------------- HELPER FUNCTIONS -------------#\n\ndef mlab_client(dbuser, dbpassword, mlab_inst, dbname):\n return MongoClient(\n f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'\n )[dbname]\n\ndef api_url(indicator, date='2012:2017', fmt='json', page=1):\n return 'http://api.worldbank.org/v2/countries/all/indicators/' \\\n f'{indicator}?date={date}&format={fmt}&page={page}'\n\n# Recursively build an array containing indicator data\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:\n return prevRes+response[1]\n return get_indicator_data(\n indicator=indicator,\n page=response[0]['page']+1,\n prevRes=prevRes+response[1],\n max_pages=max_pages,\n )\n\n# Restructure indicator entry according to spec\ndef format_collection_entry(indicator_data):\n return {\n 'country': indicator_data['country']['value'],\n 'date': indicator_data['date'],\n 'value': indicator_data['value'],\n }\n\n# Transform to top<k>/bottom<k> queries to array indexes\ndef query_to_index(query, arr_size):\n try:\n match = re.search(r'^(bottom|top)\\d+$', query).group()\n order = re.search(r'^(bottom|top)', match).group()\n length = int(re.search(r'\\d+$', match).group())\n if order == 'top':\n return slice(length)\n elif order == 'bottom':\n return slice(arr_size-length, arr_size)\n else:\n return slice(arr_size)\n except:\n return slice(arr_size)\n\n#------------- QUESTION ROUTES -------------#\n\n@api.route(f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n # Indicator hasn't been specified in body (400)\n if not body['indicator_id']:\n return { 'message': 'Please specify an indicator.' }, 400\n # Retrieve indicator from database (200)\n existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})\n if existing_collection:\n return {\n 'location': f'/{COLLECTION}/{str(existing_collection[\"_id\"])}',\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator'],\n }, 200\n # From now onwards we need to obtain data from the Worldbank API\n indicator_data = get_indicator_data(body['indicator_id'])\n # Valid indicator hasn't been specified (400)\n if indicator_data == 'Invalid indicator':\n return { 'message': 'Please specify a valid indicator.' }, 400\n # Create and retrieve indicator from Worldbank API (201)\n collection = {\n 'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(),\n 'entries': [format_collection_entry(entry) for entry in indicator_data],\n }\n created_collection = db[COLLECTION].insert_one(collection)\n return {\n 'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']),\n 'indicator': collection['indicator'],\n }, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return { 'message': 'Unable to retrieve collections.' }, 400\n return [{\n 'location': f'/{COLLECTION}/{str(doc[\"_id\"])}',\n 'collection_id': str(doc['_id']),\n 'creation_time': str(doc['creation_time']),\n 'indicator': doc['indicator'],\n } for doc in collections], 200\n\n@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n # Check if collection exists\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return { 'message': 'Unable to find collection.' }, 404\n # Remove collection from db\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to remove collection.' }, 400\n return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to find collection' }, 404\n return {\n 'collection_id': str(collection['_id']),\n 'indicator': collection['indicator'],\n 'indicator_value': collection['indicator_value'],\n 'creation_time': str(collection['creation_time']),\n 'entries': collection['entries'],\n }, 200\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\n@api.param('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n @api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200, 'Successfully retrieved economic indicator for given a country and year.')\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to find collection' }, 404\n # Create a filtered list containing entries that match params\n filtered_entries = [\n entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year\n ]\n if len(filtered_entries) == 0:\n return {'message': 'Unable to find specific indicator entry ' \\\n f'for country=\\'{country}\\' and year=\\'{year}\\'.'}, 400\n return {\n 'collection_id': str(collection['_id']),\n 'indicator': collection['indicator'],\n **filtered_entries[0],\n }, 200\n\n@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')\n@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\n@api.param('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n @api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to find collection' }, 404\n filtered_entries = [\n entry for entry in collection['entries'] if entry['date'] == year\n ]\n if not query:\n return {\n 'indicator': collection['indicator'],\n 'indicator_value': collection['indicator_value'],\n 'entries': filtered_entries,\n }, 200\n return {\n 'indicator': collection['indicator'],\n 'indicator_value': collection['indicator_value'],\n 'entries': sorted(\n filtered_entries,\n key=lambda k: k['value'],\n reverse=True\n )[query_to_index(query, len(filtered_entries))],\n }, 200\n\nif __name__ == '__main__':\n db = mlab_client(\n dbuser=DB_CONFIG['dbuser'],\n dbpassword=DB_CONFIG['dbpassword'],\n mlab_inst=DB_CONFIG['mlab_inst'],\n dbname=DB_CONFIG['dbname']\n )\n app.run(debug=DEBUG)", "step-ids": [ 7, 11, 15, 16, 19 ] }
[ 7, 11, 15, 16, 19 ]
<|reserved_special_token_0|> class MaoyanSpider(object): def __init__(self): self.url = 'https://maoyan.com/board/4?offset={}' def get_html(self, url): headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=url, headers=headers) res = request.urlopen(req) html = res.read().decode() return html <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def get_comment(self, two_link): two_html = self.get_html(two_link) with open('test.html', 'w') as f: f.write(two_html) re_bds = '<div class="comment-content">(.*?)</div>' comment_list = self.re_func(re_bds, two_html) return comment_list def save_image(self, two_link, name): two_html = self.get_html(two_link) re_bds = ( '<div class="img.*?"><img class="default-img" data-src="(.*?)" alt=""></div>' ) link_list = self.re_func(re_bds, two_html) print(link_list) directory = '/home/tarena/images/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) for link in link_list: headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=link, headers=headers) res = request.urlopen(req) html = res.read() filename = directory + link.split('@')[0][-10:] with open(filename, 'wb') as f: f.write(html) time.sleep(random.randint(1, 3)) def run(self): for offset in range(0, 21, 10): url = self.url.format(offset) self.parse_html(url) time.sleep(random.randint(1, 2)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MaoyanSpider(object): def __init__(self): self.url = 'https://maoyan.com/board/4?offset={}' def get_html(self, url): headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=url, headers=headers) res = request.urlopen(req) html = res.read().decode() return html def re_func(self, re_bds, html): pattern = re.compile(re_bds, re.S) r_list = pattern.findall(html) return r_list def parse_html(self, one_url): one_html = self.get_html(one_url) re_bds = ( '<div class="movie-item-info">.*?href="(.*?)".*?title="(.*?)".*?<p class="star">(.*?)</p>.*?class="releasetime">(.*?)</p>' ) r_list = self.re_func(re_bds, one_html) self.save_html(r_list) def save_html(self, r_list): item = {} for r in r_list: item['name'] = r[1].strip() item['star'] = r[2].strip()[3:] item['time'] = r[3].strip()[5:15] two_link = 'https://maoyan.com' + r[0] item['comment'] = self.get_comment(two_link) print(item) self.save_image(two_link, item['name']) def get_comment(self, two_link): two_html = self.get_html(two_link) with open('test.html', 'w') as f: f.write(two_html) re_bds = '<div class="comment-content">(.*?)</div>' comment_list = self.re_func(re_bds, two_html) return comment_list def save_image(self, two_link, name): two_html = self.get_html(two_link) re_bds = ( '<div class="img.*?"><img class="default-img" data-src="(.*?)" alt=""></div>' ) link_list = self.re_func(re_bds, two_html) print(link_list) directory = '/home/tarena/images/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) for link in link_list: headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=link, headers=headers) res = request.urlopen(req) html = res.read() filename = directory + link.split('@')[0][-10:] with open(filename, 'wb') as f: f.write(html) time.sleep(random.randint(1, 3)) def run(self): for offset in range(0, 21, 10): url = self.url.format(offset) self.parse_html(url) time.sleep(random.randint(1, 2)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MaoyanSpider(object): def __init__(self): self.url = 'https://maoyan.com/board/4?offset={}' def get_html(self, url): headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=url, headers=headers) res = request.urlopen(req) html = res.read().decode() return html def re_func(self, re_bds, html): pattern = re.compile(re_bds, re.S) r_list = pattern.findall(html) return r_list def parse_html(self, one_url): one_html = self.get_html(one_url) re_bds = ( '<div class="movie-item-info">.*?href="(.*?)".*?title="(.*?)".*?<p class="star">(.*?)</p>.*?class="releasetime">(.*?)</p>' ) r_list = self.re_func(re_bds, one_html) self.save_html(r_list) def save_html(self, r_list): item = {} for r in r_list: item['name'] = r[1].strip() item['star'] = r[2].strip()[3:] item['time'] = r[3].strip()[5:15] two_link = 'https://maoyan.com' + r[0] item['comment'] = self.get_comment(two_link) print(item) self.save_image(two_link, item['name']) def get_comment(self, two_link): two_html = self.get_html(two_link) with open('test.html', 'w') as f: f.write(two_html) re_bds = '<div class="comment-content">(.*?)</div>' comment_list = self.re_func(re_bds, two_html) return comment_list def save_image(self, two_link, name): two_html = self.get_html(two_link) re_bds = ( '<div class="img.*?"><img class="default-img" data-src="(.*?)" alt=""></div>' ) link_list = self.re_func(re_bds, two_html) print(link_list) directory = '/home/tarena/images/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) for link in link_list: headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=link, headers=headers) res = request.urlopen(req) html = res.read() filename = directory + link.split('@')[0][-10:] with open(filename, 'wb') as f: f.write(html) time.sleep(random.randint(1, 3)) def run(self): for offset in range(0, 21, 10): url = self.url.format(offset) self.parse_html(url) time.sleep(random.randint(1, 2)) if __name__ == '__main__': spider = MaoyanSpider() spider.run() <|reserved_special_token_1|> from urllib import request import time import random from useragents import ua_list import re import os class MaoyanSpider(object): def __init__(self): self.url = 'https://maoyan.com/board/4?offset={}' def get_html(self, url): headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=url, headers=headers) res = request.urlopen(req) html = res.read().decode() return html def re_func(self, re_bds, html): pattern = re.compile(re_bds, re.S) r_list = pattern.findall(html) return r_list def parse_html(self, one_url): one_html = self.get_html(one_url) re_bds = ( '<div class="movie-item-info">.*?href="(.*?)".*?title="(.*?)".*?<p class="star">(.*?)</p>.*?class="releasetime">(.*?)</p>' ) r_list = self.re_func(re_bds, one_html) self.save_html(r_list) def save_html(self, r_list): item = {} for r in r_list: item['name'] = r[1].strip() item['star'] = r[2].strip()[3:] item['time'] = r[3].strip()[5:15] two_link = 'https://maoyan.com' + r[0] item['comment'] = self.get_comment(two_link) print(item) self.save_image(two_link, item['name']) def get_comment(self, two_link): two_html = self.get_html(two_link) with open('test.html', 'w') as f: f.write(two_html) re_bds = '<div class="comment-content">(.*?)</div>' comment_list = self.re_func(re_bds, two_html) return comment_list def save_image(self, two_link, name): two_html = self.get_html(two_link) re_bds = ( '<div class="img.*?"><img class="default-img" data-src="(.*?)" alt=""></div>' ) link_list = self.re_func(re_bds, two_html) print(link_list) directory = '/home/tarena/images/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) for link in link_list: headers = {'User-Agent': random.choice(ua_list)} req = request.Request(url=link, headers=headers) res = request.urlopen(req) html = res.read() filename = directory + link.split('@')[0][-10:] with open(filename, 'wb') as f: f.write(html) time.sleep(random.randint(1, 3)) def run(self): for offset in range(0, 21, 10): url = self.url.format(offset) self.parse_html(url) time.sleep(random.randint(1, 2)) if __name__ == '__main__': spider = MaoyanSpider() spider.run() <|reserved_special_token_1|> from urllib import request import time import random from useragents import ua_list import re import os class MaoyanSpider(object): def __init__(self): self.url = 'https://maoyan.com/board/4?offset={}' # 请求功能函数 - html def get_html(self,url): headers = { 'User-Agent':random.choice(ua_list) } req = request.Request(url=url,headers=headers) res = request.urlopen(req) html = res.read().decode() return html # 解析功能函数 def re_func(self,re_bds,html): pattern = re.compile(re_bds,re.S) r_list = pattern.findall(html) return r_list # 解析一级页面 def parse_html(self,one_url): one_html = self.get_html(one_url) re_bds = '<div class="movie-item-info">.*?href="(.*?)".*?title="(.*?)".*?<p class="star">(.*?)</p>.*?class="releasetime">(.*?)</p>' # r_list: [('/films/1203','name','star','time'),()] r_list = self.re_func(re_bds,one_html) self.save_html(r_list) def save_html(self,r_list): item = {} # r: ('/films/1203','name','star','time') for r in r_list: item['name'] = r[1].strip() item['star'] = r[2].strip()[3:] item['time'] = r[3].strip()[5:15] two_link = 'https://maoyan.com' + r[0] item['comment'] = self.get_comment(two_link) print(item) self.save_image(two_link,item['name']) # 获取评论的函数 def get_comment(self,two_link): two_html = self.get_html(two_link) with open('test.html','w') as f: f.write(two_html) re_bds = '<div class="comment-content">(.*?)</div>' comment_list = self.re_func(re_bds,two_html) return comment_list # 保存图片函数 def save_image(self,two_link,name): two_html = self.get_html(two_link) re_bds = '<div class="img.*?"><img class="default-img" data-src="(.*?)" alt=""></div>' # link_list: ['src1','src2','src3'] link_list = self.re_func(re_bds,two_html) print(link_list) # 创建对应文件夹 directory = '/home/tarena/images/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) for link in link_list: headers = {'User-Agent':random.choice(ua_list)} req = request.Request(url=link,headers=headers) res = request.urlopen(req) html = res.read() filename = directory + \ link.split('@')[0][-10:] with open(filename,'wb') as f: f.write(html) time.sleep(random.randint(1,3)) def run(self): for offset in range(0,21,10): url = self.url.format(offset) self.parse_html(url) time.sleep(random.randint(1,2)) if __name__ == '__main__': spider = MaoyanSpider() spider.run()
flexible
{ "blob_id": "7ef0bb3e8cbba4a29249a09cf7bc91e053411361", "index": 2225, "step-1": "<mask token>\n\n\nclass MaoyanSpider(object):\n\n def __init__(self):\n self.url = 'https://maoyan.com/board/4?offset={}'\n\n def get_html(self, url):\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=url, headers=headers)\n res = request.urlopen(req)\n html = res.read().decode()\n return html\n <mask token>\n <mask token>\n <mask token>\n\n def get_comment(self, two_link):\n two_html = self.get_html(two_link)\n with open('test.html', 'w') as f:\n f.write(two_html)\n re_bds = '<div class=\"comment-content\">(.*?)</div>'\n comment_list = self.re_func(re_bds, two_html)\n return comment_list\n\n def save_image(self, two_link, name):\n two_html = self.get_html(two_link)\n re_bds = (\n '<div class=\"img.*?\"><img class=\"default-img\" data-src=\"(.*?)\" alt=\"\"></div>'\n )\n link_list = self.re_func(re_bds, two_html)\n print(link_list)\n directory = '/home/tarena/images/' + name + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n for link in link_list:\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=link, headers=headers)\n res = request.urlopen(req)\n html = res.read()\n filename = directory + link.split('@')[0][-10:]\n with open(filename, 'wb') as f:\n f.write(html)\n time.sleep(random.randint(1, 3))\n\n def run(self):\n for offset in range(0, 21, 10):\n url = self.url.format(offset)\n self.parse_html(url)\n time.sleep(random.randint(1, 2))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass MaoyanSpider(object):\n\n def __init__(self):\n self.url = 'https://maoyan.com/board/4?offset={}'\n\n def get_html(self, url):\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=url, headers=headers)\n res = request.urlopen(req)\n html = res.read().decode()\n return html\n\n def re_func(self, re_bds, html):\n pattern = re.compile(re_bds, re.S)\n r_list = pattern.findall(html)\n return r_list\n\n def parse_html(self, one_url):\n one_html = self.get_html(one_url)\n re_bds = (\n '<div class=\"movie-item-info\">.*?href=\"(.*?)\".*?title=\"(.*?)\".*?<p class=\"star\">(.*?)</p>.*?class=\"releasetime\">(.*?)</p>'\n )\n r_list = self.re_func(re_bds, one_html)\n self.save_html(r_list)\n\n def save_html(self, r_list):\n item = {}\n for r in r_list:\n item['name'] = r[1].strip()\n item['star'] = r[2].strip()[3:]\n item['time'] = r[3].strip()[5:15]\n two_link = 'https://maoyan.com' + r[0]\n item['comment'] = self.get_comment(two_link)\n print(item)\n self.save_image(two_link, item['name'])\n\n def get_comment(self, two_link):\n two_html = self.get_html(two_link)\n with open('test.html', 'w') as f:\n f.write(two_html)\n re_bds = '<div class=\"comment-content\">(.*?)</div>'\n comment_list = self.re_func(re_bds, two_html)\n return comment_list\n\n def save_image(self, two_link, name):\n two_html = self.get_html(two_link)\n re_bds = (\n '<div class=\"img.*?\"><img class=\"default-img\" data-src=\"(.*?)\" alt=\"\"></div>'\n )\n link_list = self.re_func(re_bds, two_html)\n print(link_list)\n directory = '/home/tarena/images/' + name + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n for link in link_list:\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=link, headers=headers)\n res = request.urlopen(req)\n html = res.read()\n filename = directory + link.split('@')[0][-10:]\n with open(filename, 'wb') as f:\n f.write(html)\n time.sleep(random.randint(1, 3))\n\n def run(self):\n for offset in range(0, 21, 10):\n url = self.url.format(offset)\n self.parse_html(url)\n time.sleep(random.randint(1, 2))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass MaoyanSpider(object):\n\n def __init__(self):\n self.url = 'https://maoyan.com/board/4?offset={}'\n\n def get_html(self, url):\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=url, headers=headers)\n res = request.urlopen(req)\n html = res.read().decode()\n return html\n\n def re_func(self, re_bds, html):\n pattern = re.compile(re_bds, re.S)\n r_list = pattern.findall(html)\n return r_list\n\n def parse_html(self, one_url):\n one_html = self.get_html(one_url)\n re_bds = (\n '<div class=\"movie-item-info\">.*?href=\"(.*?)\".*?title=\"(.*?)\".*?<p class=\"star\">(.*?)</p>.*?class=\"releasetime\">(.*?)</p>'\n )\n r_list = self.re_func(re_bds, one_html)\n self.save_html(r_list)\n\n def save_html(self, r_list):\n item = {}\n for r in r_list:\n item['name'] = r[1].strip()\n item['star'] = r[2].strip()[3:]\n item['time'] = r[3].strip()[5:15]\n two_link = 'https://maoyan.com' + r[0]\n item['comment'] = self.get_comment(two_link)\n print(item)\n self.save_image(two_link, item['name'])\n\n def get_comment(self, two_link):\n two_html = self.get_html(two_link)\n with open('test.html', 'w') as f:\n f.write(two_html)\n re_bds = '<div class=\"comment-content\">(.*?)</div>'\n comment_list = self.re_func(re_bds, two_html)\n return comment_list\n\n def save_image(self, two_link, name):\n two_html = self.get_html(two_link)\n re_bds = (\n '<div class=\"img.*?\"><img class=\"default-img\" data-src=\"(.*?)\" alt=\"\"></div>'\n )\n link_list = self.re_func(re_bds, two_html)\n print(link_list)\n directory = '/home/tarena/images/' + name + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n for link in link_list:\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=link, headers=headers)\n res = request.urlopen(req)\n html = res.read()\n filename = directory + link.split('@')[0][-10:]\n with open(filename, 'wb') as f:\n f.write(html)\n time.sleep(random.randint(1, 3))\n\n def run(self):\n for offset in range(0, 21, 10):\n url = self.url.format(offset)\n self.parse_html(url)\n time.sleep(random.randint(1, 2))\n\n\nif __name__ == '__main__':\n spider = MaoyanSpider()\n spider.run()\n", "step-4": "from urllib import request\nimport time\nimport random\nfrom useragents import ua_list\nimport re\nimport os\n\n\nclass MaoyanSpider(object):\n\n def __init__(self):\n self.url = 'https://maoyan.com/board/4?offset={}'\n\n def get_html(self, url):\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=url, headers=headers)\n res = request.urlopen(req)\n html = res.read().decode()\n return html\n\n def re_func(self, re_bds, html):\n pattern = re.compile(re_bds, re.S)\n r_list = pattern.findall(html)\n return r_list\n\n def parse_html(self, one_url):\n one_html = self.get_html(one_url)\n re_bds = (\n '<div class=\"movie-item-info\">.*?href=\"(.*?)\".*?title=\"(.*?)\".*?<p class=\"star\">(.*?)</p>.*?class=\"releasetime\">(.*?)</p>'\n )\n r_list = self.re_func(re_bds, one_html)\n self.save_html(r_list)\n\n def save_html(self, r_list):\n item = {}\n for r in r_list:\n item['name'] = r[1].strip()\n item['star'] = r[2].strip()[3:]\n item['time'] = r[3].strip()[5:15]\n two_link = 'https://maoyan.com' + r[0]\n item['comment'] = self.get_comment(two_link)\n print(item)\n self.save_image(two_link, item['name'])\n\n def get_comment(self, two_link):\n two_html = self.get_html(two_link)\n with open('test.html', 'w') as f:\n f.write(two_html)\n re_bds = '<div class=\"comment-content\">(.*?)</div>'\n comment_list = self.re_func(re_bds, two_html)\n return comment_list\n\n def save_image(self, two_link, name):\n two_html = self.get_html(two_link)\n re_bds = (\n '<div class=\"img.*?\"><img class=\"default-img\" data-src=\"(.*?)\" alt=\"\"></div>'\n )\n link_list = self.re_func(re_bds, two_html)\n print(link_list)\n directory = '/home/tarena/images/' + name + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n for link in link_list:\n headers = {'User-Agent': random.choice(ua_list)}\n req = request.Request(url=link, headers=headers)\n res = request.urlopen(req)\n html = res.read()\n filename = directory + link.split('@')[0][-10:]\n with open(filename, 'wb') as f:\n f.write(html)\n time.sleep(random.randint(1, 3))\n\n def run(self):\n for offset in range(0, 21, 10):\n url = self.url.format(offset)\n self.parse_html(url)\n time.sleep(random.randint(1, 2))\n\n\nif __name__ == '__main__':\n spider = MaoyanSpider()\n spider.run()\n", "step-5": "from urllib import request\nimport time\nimport random\nfrom useragents import ua_list\nimport re\nimport os\n\nclass MaoyanSpider(object):\n def __init__(self):\n self.url = 'https://maoyan.com/board/4?offset={}'\n\n # 请求功能函数 - html\n def get_html(self,url):\n headers = {\n 'User-Agent':random.choice(ua_list)\n }\n req = request.Request(url=url,headers=headers)\n res = request.urlopen(req)\n html = res.read().decode()\n\n return html\n\n # 解析功能函数\n def re_func(self,re_bds,html):\n pattern = re.compile(re_bds,re.S)\n r_list = pattern.findall(html)\n\n return r_list\n\n # 解析一级页面\n def parse_html(self,one_url):\n one_html = self.get_html(one_url)\n re_bds = '<div class=\"movie-item-info\">.*?href=\"(.*?)\".*?title=\"(.*?)\".*?<p class=\"star\">(.*?)</p>.*?class=\"releasetime\">(.*?)</p>'\n # r_list: [('/films/1203','name','star','time'),()]\n r_list = self.re_func(re_bds,one_html)\n self.save_html(r_list)\n\n def save_html(self,r_list):\n item = {}\n # r: ('/films/1203','name','star','time')\n for r in r_list:\n item['name'] = r[1].strip()\n item['star'] = r[2].strip()[3:]\n item['time'] = r[3].strip()[5:15]\n two_link = 'https://maoyan.com' + r[0]\n item['comment'] = self.get_comment(two_link)\n print(item)\n self.save_image(two_link,item['name'])\n\n # 获取评论的函数\n def get_comment(self,two_link):\n two_html = self.get_html(two_link)\n\n with open('test.html','w') as f:\n f.write(two_html)\n\n re_bds = '<div class=\"comment-content\">(.*?)</div>'\n comment_list = self.re_func(re_bds,two_html)\n\n return comment_list\n\n\n # 保存图片函数\n def save_image(self,two_link,name):\n two_html = self.get_html(two_link)\n\n re_bds = '<div class=\"img.*?\"><img class=\"default-img\" data-src=\"(.*?)\" alt=\"\"></div>'\n # link_list: ['src1','src2','src3']\n link_list = self.re_func(re_bds,two_html)\n\n print(link_list)\n\n # 创建对应文件夹\n directory = '/home/tarena/images/' + name + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n for link in link_list:\n headers = {'User-Agent':random.choice(ua_list)}\n req = request.Request(url=link,headers=headers)\n res = request.urlopen(req)\n html = res.read()\n\n filename = directory + \\\n link.split('@')[0][-10:]\n with open(filename,'wb') as f:\n f.write(html)\n time.sleep(random.randint(1,3))\n\n def run(self):\n for offset in range(0,21,10):\n url = self.url.format(offset)\n self.parse_html(url)\n time.sleep(random.randint(1,2))\n\nif __name__ == '__main__':\n spider = MaoyanSpider()\n spider.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 6, 9, 10, 11, 12 ] }
[ 6, 9, 10, 11, 12 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/7/10 14:26 # @Author : MengHe # @File : c1.py # @Software: PyCharm import re a = 'Python|Java|C#|C++|Kotlin|JavaScript' r = re.findall('Java', a) print(r) # print(a.index('Python') > -1) # print('Kotlin' in a)
normal
{ "blob_id": "e05f545ca969e0c2330779ed54a33a594d6ebb25", "index": 2501, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(r)\n", "step-3": "<mask token>\na = 'Python|Java|C#|C++|Kotlin|JavaScript'\nr = re.findall('Java', a)\nprint(r)\n", "step-4": "import re\na = 'Python|Java|C#|C++|Kotlin|JavaScript'\nr = re.findall('Java', a)\nprint(r)\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/7/10 14:26\n# @Author : MengHe\n# @File : c1.py\n# @Software: PyCharm\nimport re\n\na = 'Python|Java|C#|C++|Kotlin|JavaScript'\nr = re.findall('Java', a)\nprint(r)\n\n# print(a.index('Python') > -1)\n# print('Kotlin' in a)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(t) <|reserved_special_token_1|> <|reserved_special_token_0|> t = dt.datetime.today() print(t) <|reserved_special_token_1|> <|reserved_special_token_0|> import datetime as dt t = dt.datetime.today() print(t) <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Tue Dec 31 05:48:57 2019 @author: emama """ import datetime as dt t = dt.datetime.today() print(t)
flexible
{ "blob_id": "b1fbc8f3616b70e5d35898fd895c37e838c87dc9", "index": 9293, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(t)\n", "step-3": "<mask token>\nt = dt.datetime.today()\nprint(t)\n", "step-4": "<mask token>\nimport datetime as dt\nt = dt.datetime.today()\nprint(t)\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 31 05:48:57 2019\r\n\r\n@author: emama\r\n\"\"\"\r\n\r\nimport datetime as dt\r\n\r\nt = dt.datetime.today()\r\nprint(t)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
__author__ = 'laispace.com' import sqlite3 dbname = 'alloyteam.db' def createTable(): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute('''CREATE TABLE IF NOT EXISTS posts (url text primary key, title text, date text, authorLink text, authorName text, view text) ''') conn.commit() conn.close() def createPosts(posts): conn = sqlite3.connect(dbname) c = conn.cursor() for post in posts: c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post) # c.executemany('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', posts) conn.commit() conn.close() def readPosts(): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute('SELECT * FROM posts') posts = c.fetchall() conn.commit() conn.close() return posts def dropTable(): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute('DROP table IF EXISTS posts') conn.commit() conn.close()
normal
{ "blob_id": "602df213c0d588404597c566001cd9c96b5034d0", "index": 4530, "step-1": "<mask token>\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n", "step-2": "<mask token>\n\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n \"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n", "step-3": "__author__ = 'laispace.com'\n<mask token>\ndbname = 'alloyteam.db'\n\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n \"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n", "step-4": "__author__ = 'laispace.com'\nimport sqlite3\ndbname = 'alloyteam.db'\n\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n \"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n", "step-5": "__author__ = 'laispace.com'\n\nimport sqlite3\n\ndbname = 'alloyteam.db'\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n ''')\n conn.commit()\n conn.close()\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n # c.executemany('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', posts)\n conn.commit()\n conn.close()\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# -*- encoding: utf-8 -*- """ Created by eniocc at 11/10/2020 """ import ctypes from py_dss_interface.models.Base import Base class MonitorsS(Base): """ This interface can be used to read/write certain properties of the active DSS object. The structure of the interface is as follows: CStr MonitorsS(int32_t Parameter, CStr Argument); This interface returns a string according to the number sent in the variable “parameter”. The parameter can be one of the following. """ def monitors_file_name(self) -> str: """Returns the name of the CSV file associated with active monitor.""" result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0), ctypes.c_int32(0))) return result.value.decode('ascii') def monitors_read_name(self) -> str: """Returns the active Monitor object by name.""" result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1), ctypes.c_int32(0))) return result.value.decode('ascii') def monitors_write_name(self, argument) -> str: """Sets the active Monitor object by name.""" result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2), argument.encode('ascii'))) return result.value.decode('ascii') def monitors_read_element(self) -> str: """Returns the full name of element being monitored by the active Monitor.""" result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3), ctypes.c_int32(0))) return result.value.decode('ascii') def monitors_write_element(self, argument) -> str: """Sets the full name of element being monitored by the active Monitor.""" result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4), argument.encode('ascii'))) return result.value.decode('ascii')
normal
{ "blob_id": "f6f0dcb806fbc1e14c0907dd500fdc6a609a19f7", "index": 5598, "step-1": "<mask token>\n\n\nclass MonitorsS(Base):\n <mask token>\n <mask token>\n <mask token>\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n <mask token>\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n", "step-2": "<mask token>\n\n\nclass MonitorsS(Base):\n <mask token>\n\n def monitors_file_name(self) ->str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n <mask token>\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n <mask token>\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n", "step-3": "<mask token>\n\n\nclass MonitorsS(Base):\n <mask token>\n\n def monitors_file_name(self) ->str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_read_name(self) ->str:\n \"\"\"Returns the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n\n def monitors_read_element(self) ->str:\n \"\"\"Returns the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n", "step-4": "<mask token>\n\n\nclass MonitorsS(Base):\n \"\"\"\n This interface can be used to read/write certain properties of the active DSS object.\n\n The structure of the interface is as follows:\n CStr MonitorsS(int32_t Parameter, CStr Argument);\n\n This interface returns a string according to the number sent in the variable “parameter”. The parameter can be\n one of the following.\n \"\"\"\n\n def monitors_file_name(self) ->str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_read_name(self) ->str:\n \"\"\"Returns the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n\n def monitors_read_element(self) ->str:\n \"\"\"Returns the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n", "step-5": "# -*- encoding: utf-8 -*-\n\"\"\"\n Created by eniocc at 11/10/2020\n\"\"\"\nimport ctypes\n\nfrom py_dss_interface.models.Base import Base\n\n\nclass MonitorsS(Base):\n \"\"\"\n This interface can be used to read/write certain properties of the active DSS object.\n\n The structure of the interface is as follows:\n CStr MonitorsS(int32_t Parameter, CStr Argument);\n\n This interface returns a string according to the number sent in the variable “parameter”. The parameter can be\n one of the following.\n \"\"\"\n\n def monitors_file_name(self) -> str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0), ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_read_name(self) -> str:\n \"\"\"Returns the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1), ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_name(self, argument) -> str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2), argument.encode('ascii')))\n return result.value.decode('ascii')\n\n def monitors_read_element(self) -> str:\n \"\"\"Returns the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3), ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_element(self, argument) -> str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4), argument.encode('ascii')))\n return result.value.decode('ascii')\n", "step-ids": [ 3, 4, 6, 7, 9 ] }
[ 3, 4, 6, 7, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if re.findall(pattern, n): print(n, 'は電話番号の形式です') else: print(n, 'は電話番号の形式ではありません') <|reserved_special_token_1|> <|reserved_special_token_0|> n = input('電話番号を入力してください>>') pattern = ( '[\\(]{0,1}[0-9]{2,4}[\\)\\-\\(]{0,1}[0-9]{2,4}[\\)\\-]{0,1}[0-9]{3,4}') if re.findall(pattern, n): print(n, 'は電話番号の形式です') else: print(n, 'は電話番号の形式ではありません') <|reserved_special_token_1|> import re n = input('電話番号を入力してください>>') pattern = ( '[\\(]{0,1}[0-9]{2,4}[\\)\\-\\(]{0,1}[0-9]{2,4}[\\)\\-]{0,1}[0-9]{3,4}') if re.findall(pattern, n): print(n, 'は電話番号の形式です') else: print(n, 'は電話番号の形式ではありません') <|reserved_special_token_1|> import re n = input("電話番号を入力してください>>") pattern = r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' if re.findall(pattern, n): print(n, "は電話番号の形式です") else: print(n, "は電話番号の形式ではありません")
flexible
{ "blob_id": "7ea81f83f556fcc55c9c9d44bcd63c583829fc08", "index": 8977, "step-1": "<mask token>\n", "step-2": "<mask token>\nif re.findall(pattern, n):\n print(n, 'は電話番号の形式です')\nelse:\n print(n, 'は電話番号の形式ではありません')\n", "step-3": "<mask token>\nn = input('電話番号を入力してください>>')\npattern = (\n '[\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-]{0,1}[0-9]{3,4}')\nif re.findall(pattern, n):\n print(n, 'は電話番号の形式です')\nelse:\n print(n, 'は電話番号の形式ではありません')\n", "step-4": "import re\nn = input('電話番号を入力してください>>')\npattern = (\n '[\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-]{0,1}[0-9]{3,4}')\nif re.findall(pattern, n):\n print(n, 'は電話番号の形式です')\nelse:\n print(n, 'は電話番号の形式ではありません')\n", "step-5": "import re\n\nn = input(\"電話番号を入力してください>>\")\npattern = r'[\\(]{0,1}[0-9]{2,4}[\\)\\-\\(]{0,1}[0-9]{2,4}[\\)\\-]{0,1}[0-9]{3,4}'\nif re.findall(pattern, n):\n print(n, \"は電話番号の形式です\")\nelse:\n print(n, \"は電話番号の形式ではありません\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# from https://web.archive.org/web/20121220025758/http://xkcd.com/actuary.py.txt # script written by Randall Munroe. Most comments by Emily Cain (although there were a few brief ones explaining how the program worked before I looked at it) # Summary of program (by Emily): # this program takes inputs of current ages and genders to calculate the probability any or all of those people will die in a certain time period. # if you input a year (after the current year) or a number of years (less than the current year) the program will calculate the probability that anyone or everyone will die in that time period. Either way, the program also determines the number of years for certain probabilities of anyone or everyone dying. # The program outputs these calculations in an easily readable form. #!/usr/bin/python import sys import datetime # The following description was written by Randall (the programmer). # Calculates death probabilities based on Social Security # actuarial tables for a given group of people. # Run with a list of ages/genders and an optional timespan (or year in the future): # python actuary.py 63m 80m 75f 73m 10 # or: # python actuary.py 63m 80m 75f 73m 2022 # This will give statistics for that group, including # various probabilities over 10 years. Years can be # ommitted and it will still give some statistics. # If "Years" exceeds the current calendar year, it will be interpreted as a date. #this is a list of lists. The outer list contains two inner lists, each of which is an actuarial table--one for men and one for women bothtables=[[0.00756, 0.00052, 0.00035, 0.00025, 0.00020, 0.00018, 0.00017, 0.00016, 0.00014, 0.00011, 0.00009, 0.00010, 0.00015, 0.00027, 0.00043, 0.00061, 0.00078, 0.00094, 0.00107, 0.00119, 0.00131, 0.00142, 0.00149, 0.00151, 0.00148, 0.00143, 0.00140, 0.00138, 0.00137, 0.00139, 0.00141, 0.00143, 0.00147, 0.00152, 0.00158, 0.00165, 0.00174, 0.00186, 0.00202, 0.00221, 0.00243, 0.00267, 0.00291, 0.00317, 0.00344, 0.00373, 0.00405, 0.00441, 0.00480, 0.00524, 0.00573, 0.00623, 0.00671, 0.00714, 0.00756, 0.00800, 0.00853, 0.00917, 0.00995, 0.01086, 0.01190, 0.01301, 0.01413, 0.01522, 0.01635, 0.01760, 0.01906, 0.02073, 0.02265, 0.02482, 0.02729, 0.03001, 0.03289, 0.03592, 0.03918, 0.04292, 0.04715, 0.05173, 0.05665, 0.06206, 0.06821, 0.07522, 0.08302, 0.09163, 0.10119, 0.11183, 0.12367, 0.13679, 0.15124, 0.16702, 0.18414, 0.20255, 0.22224, 0.24314, 0.26520, 0.28709, 0.30846, 0.32891, 0.34803, 0.36544, 0.38371, 0.40289, 0.42304, 0.44419, 0.46640, 0.48972, 0.51421, 0.53992, 0.56691, 0.59526, 0.62502, 0.65628, 0.68909, 0.72354, 0.75972, 0.79771, 0.83759, 0.87947, 0.92345, 0.96962], [0.00615, 0.00041, 0.00025, 0.00018, 0.00015, 0.00014, 0.00014, 0.00013, 0.00012, 0.00011, 0.00010, 0.00010, 0.00012, 0.00016, 0.00021, 0.00028, 0.00034, 0.00039, 0.00042, 0.00043, 0.00045, 0.00047, 0.00048, 0.00049, 0.00050, 0.00051, 0.00052, 0.00053, 0.00056, 0.00059, 0.00063, 0.00068, 0.00073, 0.00078, 0.00084, 0.00091, 0.00098, 0.00108, 0.00118, 0.00130, 0.00144, 0.00158, 0.00173, 0.00189, 0.00206, 0.00225, 0.00244, 0.00264, 0.00285, 0.00306, 0.00329, 0.00355, 0.00382, 0.00409, 0.00437, 0.00468, 0.00505, 0.00549, 0.00603, 0.00665, 0.00736, 0.00813, 0.00890, 0.00967, 0.01047, 0.01136, 0.01239, 0.01357, 0.01491, 0.01641, 0.01816, 0.02008, 0.02210, 0.02418, 0.02641, 0.02902, 0.03206, 0.03538, 0.03899, 0.04301, 0.04766, 0.05307, 0.05922, 0.06618, 0.07403, 0.08285, 0.09270, 0.10365, 0.11574, 0.12899, 0.14343, 0.15907, 0.17591, 0.19393, 0.21312, 0.23254, 0.25193, 0.27097, 0.28933, 0.30670, 0.32510, 0.34460, 0.36528, 0.38720, 0.41043, 0.43505, 0.46116, 0.48883, 0.51816, 0.54925, 0.58220, 0.61714, 0.65416, 0.69341, 0.73502, 0.77912, 0.82587, 0.87542, 0.92345, 0.96962]] def deathprob(age, years): # a formula to determine the probability a given person will die, with a number of years as input #negative ages = female (this is Randall's comment) act=[] #this is a list that will hold the relevant actuarial tables, male or female if age<0: # if age is a negative number the person is female act=bothtables[1] # use the second table (females) age=-1*age # multiply age by -1 to make it positive else: act=bothtables[0] # use the first table (males) while(len(act)<int(age+years+2)): # slower/bloaiter but keeps things clean (Randall's comment) act.append(act[-1]**0.5) # I'm not sure what this does liveprob=1 i=0 iage=int(age) # age as integer fage=age%1 # fraction after age if it's a mixed number? maybe? while i<=years-1: #advance through this formula for each year between now and the date in question thisyear=(1-fage)*act[iage+i]+fage*act[iage+i+1] #the probability they will die this year is equal to this formula liveprob*=1-thisyear # multiply the overall probability they will survive by the probability they will survive this year i+=1 if years%1: # Amortizes risk of dying over a partial year, which is (Randall's comment) # 1-P(living last full year)^(year fraction) (Randall's comment) lastyear=(1-fage)*act[iage+i]+fage*act[iage+i+1] lastyearlive=1-lastyear lastyearlive=lastyearlive**((years%1)) liveprob*=lastyearlive return 1-liveprob # return the probability they will die i.e. 1 - the probability they wil live def proballdie(ages, years): # probability everyone in the list will die by a certain year, given the list "ages" and the number of years probsliving=[] for i in ages: probsliving.append(1-deathprob(i, years)) prod=1 for i in probsliving: prod*=(1-i) return prod def probanydie(ages, years): #returns the probability that anyone in the list dies probsliving=[] for i in ages: probsliving.append(1-deathprob(i, years)) prod=1 for i in probsliving: prod*=i return 1-prod def calcexp(ages, prob, flag): #calculates life expectancy based on the ages list, the probability of dying (5, 50, 95%), and whether or not it is "flagged" as calculating the probability that all or any die i=0 for interval in (10, 1, 0.1, 0.01): #loops through the numbers at left probs=0 while(probs<prob): #while the variable "probs" is less than the input probability i+=interval #increase i by 10, 1, .1 or .01 if flag==0: #if we want to know the probability that the entire group will die probs=proballdie(ages, i) else: probs=probanydie(ages, i) #if we want to know the probability that any member of the group will die i-=interval #subtract the current interval from i before returning to start the for loop again with the subtracted i return i #returns a float ages=[] # creates an empty list that will hold the ages of everyone you want to know about # print sys.argv[1:] for arg in sys.argv[1:]: #for each argument you have entered except the first one (which is the script name) gender=1 years=1.0 if arg[-1]=='m' or arg[-1]=='M': #If the last character of the argument is M or m, then the person is male and we will use their age as a positive number try: ages.append(1*float(arg[:-1])) #try adding all but the last character of the argument to the ages table. The last character indicates gender, preceding characters indicate age. except: print "Error parsing argument", arg elif arg[-1]=='f' or arg[-1]=='F': #if the last character of the argument is F or f, then the person is female and we will use their age as a negative number try: ages.append(-1*float(arg[:-1])) #try adding all but the last character of the argument, times -1 because female, to the ages table. The last character indicates gender, preceding characters indicate age. except: print "Error parsing argument", arg else: #if the input appears to be neither a male or female person with the age, it is probably the time period we want to know about try: years=float(arg) break except: print "Error parsing argument", arg # shows user how to enter input correctly if they do it wrong if not sys.argv[1:]: print "The format is 'actuary.py 15m 80f 23', with a list of ages and a number of years to run the projections." raise SystemExit if not ages: print "No ages specified. Format is 12m, 17f, etc." raise SystemExit # print "Ages:", ages # print "Years:", years (datetime.date.today()+datetime.timedelta(days=365.242191*1)).year #adding date object to a timedelta object to get a date object. finds its year. does ??? with it someone_years=[calcexp(ages, 0.05, 1), # this returns a list of floats, probably. Or strings???? used as strings below calcexp(ages, 0.5, 1), calcexp(ages, 0.95, 1)] someone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[0])).year, # takes the above numbers and uses them to calculate a date based on today's date + total time. (datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[1])).year, (datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[2])).year] print "There is a 5% chance of someone dying within", someone_years[0], "years (by", str(someone_dates[0])+")." #concatenates to avoid automatic space; must convert to string first. print "There is a 50% chance of someone dying within", someone_years[1], "years (by", str(someone_dates[1])+")." print "There is a 95% chance of someone dying within", someone_years[2], "years (by", str(someone_dates[2])+")." print "" if len(ages)>1: #only makes sense to do an everyone statement if there are multiple people. everyone_years=[calcexp(ages, 0.05, 0), calcexp(ages, 0.5, 0), calcexp(ages, 0.95, 0)] everyone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[0])).year, (datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[1])).year, (datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[2])).year] print "There is a 5% chance of everyone dying within", everyone_years[0], "years (by", str(everyone_dates[0])+")." print "There is a 50% chance of everyone dying within", everyone_years[1], "years (by", str(everyone_dates[1])+")." print "There is a 95% chance of everyone dying within", everyone_years[2], "years (by", str(everyone_dates[2])+")." if years: # if the user has input year yearword="years" if years==1: # changes from plural to singular if "years" is 1, so it says "1 year" instead of "1 years" yearword="year" print "" if years>datetime.date.today().year: # Program assumes years under current year are a number of years, and years over current year refer to the date. If input years is greater than the current year... years=years-datetime.date.today().year #...recalculate the "years" variable to be the number of years in the future that year is if len(ages)>1: #if there is more than one person being analyzed, we will look at the probability of everyone dying p=100*proballdie(ages, years) # converts probability into a percentage by multiplying by 100 printable="" # the percentage we will print out if p<0.001: # if the percentage is really low/almost impossible printable="<0.001" elif p>99.99: # if the percentage is really high/almost guaranteed printable=">99.99" else: printable=str(p)[:5] # if the percentage is not at one of the above extremes we want to see the actual percentage in our output print "Probability of all dying in", years, yearword+": ", printable+"%" #outputs the info in an easily readable format p=100*probanydie(ages, years) #regardless of how many people there are we will want to know the probability anyone dies printable="" # the percentage we will print out if p<0.001: printable="<0.001" # if the percentage is really low/almost impossible elif p>99.99: printable=">99.99" # if the percentage is really high/almost guaranteed print p # I don't know why he is choosing to do this, it seems odd/inconsistent with rest of program else: printable=str(p)[:5] # convert p to a string and assign the first 5 characters to printable print "Probability of a death within", years, yearword+":", printable+"%" #outputs the info in an easily readable format raise SystemExit #leaves the program
normal
{ "blob_id": "f0702c8555ef07aac9e667c35b5b5fd85820ec54", "index": 4355, "step-1": "# from https://web.archive.org/web/20121220025758/http://xkcd.com/actuary.py.txt\n\n# script written by Randall Munroe. Most comments by Emily Cain (although there were a few brief ones explaining how the program worked before I looked at it)\n\n# Summary of program (by Emily):\n\n# this program takes inputs of current ages and genders to calculate the probability any or all of those people will die in a certain time period. \n\n# if you input a year (after the current year) or a number of years (less than the current year) the program will calculate the probability that anyone or everyone will die in that time period. Either way, the program also determines the number of years for certain probabilities of anyone or everyone dying. \n\n# The program outputs these calculations in an easily readable form. \n\n\n#!/usr/bin/python\nimport sys\nimport datetime\n\n# The following description was written by Randall (the programmer). \n\n# Calculates death probabilities based on Social Security\n# actuarial tables for a given group of people.\n\n# Run with a list of ages/genders and an optional timespan (or year in the future):\n\n# python actuary.py 63m 80m 75f 73m 10\n\n# or:\n\n# python actuary.py 63m 80m 75f 73m 2022\n\n# This will give statistics for that group, including\n# various probabilities over 10 years. Years can be\n# ommitted and it will still give some statistics.\n# If \"Years\" exceeds the current calendar year, it will be interpreted as a date.\n\n\n#this is a list of lists. The outer list contains two inner lists, each of which is an actuarial table--one for men and one for women\nbothtables=[[0.00756, 0.00052, 0.00035, 0.00025, 0.00020, 0.00018, 0.00017, 0.00016, 0.00014, 0.00011, 0.00009, 0.00010, 0.00015, 0.00027, 0.00043, 0.00061, 0.00078, 0.00094, 0.00107, 0.00119, 0.00131, 0.00142, 0.00149, 0.00151, 0.00148, 0.00143, 0.00140, 0.00138, 0.00137, 0.00139, 0.00141, 0.00143, 0.00147, 0.00152, 0.00158, 0.00165, 0.00174, 0.00186, 0.00202, 0.00221, 0.00243, 0.00267, 0.00291, 0.00317, 0.00344, 0.00373, 0.00405, 0.00441, 0.00480, 0.00524, 0.00573, 0.00623, 0.00671, 0.00714, 0.00756, 0.00800, 0.00853, 0.00917, 0.00995, 0.01086, 0.01190, 0.01301, 0.01413, 0.01522, 0.01635, 0.01760, 0.01906, 0.02073, 0.02265, 0.02482, 0.02729, 0.03001, 0.03289, 0.03592, 0.03918, 0.04292, 0.04715, 0.05173, 0.05665, 0.06206, 0.06821, 0.07522, 0.08302, 0.09163, 0.10119, 0.11183, 0.12367, 0.13679, 0.15124, 0.16702, 0.18414, 0.20255, 0.22224, 0.24314, 0.26520, 0.28709, 0.30846, 0.32891, 0.34803, 0.36544, 0.38371, 0.40289, 0.42304, 0.44419, 0.46640, 0.48972, 0.51421, 0.53992, 0.56691, 0.59526, 0.62502, 0.65628, 0.68909, 0.72354, 0.75972, 0.79771, 0.83759, 0.87947, 0.92345, 0.96962], [0.00615, 0.00041, 0.00025, 0.00018, 0.00015, 0.00014, 0.00014, 0.00013, 0.00012, 0.00011, 0.00010, 0.00010, 0.00012, 0.00016, 0.00021, 0.00028, 0.00034, 0.00039, 0.00042, 0.00043, 0.00045, 0.00047, 0.00048, 0.00049, 0.00050, 0.00051, 0.00052, 0.00053, 0.00056, 0.00059, 0.00063, 0.00068, 0.00073, 0.00078, 0.00084, 0.00091, 0.00098, 0.00108, 0.00118, 0.00130, 0.00144, 0.00158, 0.00173, 0.00189, 0.00206, 0.00225, 0.00244, 0.00264, 0.00285, 0.00306, 0.00329, 0.00355, 0.00382, 0.00409, 0.00437, 0.00468, 0.00505, 0.00549, 0.00603, 0.00665, 0.00736, 0.00813, 0.00890, 0.00967, 0.01047, 0.01136, 0.01239, 0.01357, 0.01491, 0.01641, 0.01816, 0.02008, 0.02210, 0.02418, 0.02641, 0.02902, 0.03206, 0.03538, 0.03899, 0.04301, 0.04766, 0.05307, 0.05922, 0.06618, 0.07403, 0.08285, 0.09270, 0.10365, 0.11574, 0.12899, 0.14343, 0.15907, 0.17591, 0.19393, 0.21312, 0.23254, 0.25193, 0.27097, 0.28933, 0.30670, 0.32510, 0.34460, 0.36528, 0.38720, 0.41043, 0.43505, 0.46116, 0.48883, 0.51816, 0.54925, 0.58220, 0.61714, 0.65416, 0.69341, 0.73502, 0.77912, 0.82587, 0.87542, 0.92345, 0.96962]]\n\ndef deathprob(age, years): # a formula to determine the probability a given person will die, with a number of years as input \n #negative ages = female (this is Randall's comment)\n act=[] #this is a list that will hold the relevant actuarial tables, male or female \n if age<0: # if age is a negative number the person is female \n act=bothtables[1] # use the second table (females)\n age=-1*age # multiply age by -1 to make it positive\n else:\n act=bothtables[0] # use the first table (males)\n while(len(act)<int(age+years+2)): # slower/bloaiter but keeps things clean (Randall's comment)\n act.append(act[-1]**0.5) # I'm not sure what this does \n liveprob=1 \n i=0\n iage=int(age) # age as integer \n fage=age%1 # fraction after age if it's a mixed number? maybe? \n while i<=years-1: #advance through this formula for each year between now and the date in question \n thisyear=(1-fage)*act[iage+i]+fage*act[iage+i+1] #the probability they will die this year is equal to this formula\n liveprob*=1-thisyear # multiply the overall probability they will survive by the probability they will survive this year \n i+=1\n if years%1: # Amortizes risk of dying over a partial year, which is (Randall's comment)\n # 1-P(living last full year)^(year fraction) (Randall's comment)\n lastyear=(1-fage)*act[iage+i]+fage*act[iage+i+1] \n lastyearlive=1-lastyear\n lastyearlive=lastyearlive**((years%1))\n liveprob*=lastyearlive\n return 1-liveprob # return the probability they will die i.e. 1 - the probability they wil live \n\ndef proballdie(ages, years): # probability everyone in the list will die by a certain year, given the list \"ages\" and the number of years \n probsliving=[]\n for i in ages:\n probsliving.append(1-deathprob(i, years))\n prod=1\n for i in probsliving:\n prod*=(1-i)\n return prod\n\ndef probanydie(ages, years): #returns the probability that anyone in the list dies\n probsliving=[]\n for i in ages: \n probsliving.append(1-deathprob(i, years))\n prod=1\n for i in probsliving:\n prod*=i\n return 1-prod\n\ndef calcexp(ages, prob, flag): #calculates life expectancy based on the ages list, the probability of dying (5, 50, 95%), and whether or not it is \"flagged\" as calculating the probability that all or any die \n i=0\n for interval in (10, 1, 0.1, 0.01): #loops through the numbers at left\n probs=0\n while(probs<prob): #while the variable \"probs\" is less than the input probability\n i+=interval #increase i by 10, 1, .1 or .01\n if flag==0: #if we want to know the probability that the entire group will die \n probs=proballdie(ages, i)\n else:\n probs=probanydie(ages, i) #if we want to know the probability that any member of the group will die\n i-=interval #subtract the current interval from i before returning to start the for loop again with the subtracted i \n return i #returns a float \n\nages=[] # creates an empty list that will hold the ages of everyone you want to know about\n# print sys.argv[1:]\nfor arg in sys.argv[1:]: #for each argument you have entered except the first one (which is the script name)\n gender=1\n years=1.0\n if arg[-1]=='m' or arg[-1]=='M': #If the last character of the argument is M or m, then the person is male and we will use their age as a positive number\n try:\n ages.append(1*float(arg[:-1])) #try adding all but the last character of the argument to the ages table. The last character indicates gender, preceding characters indicate age. \n except:\n print \"Error parsing argument\", arg\n elif arg[-1]=='f' or arg[-1]=='F': #if the last character of the argument is F or f, then the person is female and we will use their age as a negative number \n try:\n ages.append(-1*float(arg[:-1])) #try adding all but the last character of the argument, times -1 because female, to the ages table. The last character indicates gender, preceding characters indicate age. \n except:\n print \"Error parsing argument\", arg\n else: #if the input appears to be neither a male or female person with the age, it is probably the time period we want to know about\n try:\n years=float(arg)\n break\n except:\n print \"Error parsing argument\", arg\n\n# shows user how to enter input correctly if they do it wrong\nif not sys.argv[1:]:\n print \"The format is 'actuary.py 15m 80f 23', with a list of ages and a number of years to run the projections.\"\n raise SystemExit\nif not ages:\n print \"No ages specified. Format is 12m, 17f, etc.\"\n raise SystemExit\n\n# print \"Ages:\", ages\n# print \"Years:\", years\n\n(datetime.date.today()+datetime.timedelta(days=365.242191*1)).year #adding date object to a timedelta object to get a date object. finds its year. does ??? with it \nsomeone_years=[calcexp(ages, 0.05, 1), # this returns a list of floats, probably. Or strings???? used as strings below\n calcexp(ages, 0.5, 1),\n calcexp(ages, 0.95, 1)]\nsomeone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[0])).year, # takes the above numbers and uses them to calculate a date based on today's date + total time. \n (datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[1])).year,\n (datetime.date.today()+datetime.timedelta(days=365.242191*someone_years[2])).year]\nprint \"There is a 5% chance of someone dying within\", someone_years[0], \"years (by\", str(someone_dates[0])+\").\" #concatenates to avoid automatic space; must convert to string first. \nprint \"There is a 50% chance of someone dying within\", someone_years[1], \"years (by\", str(someone_dates[1])+\").\"\nprint \"There is a 95% chance of someone dying within\", someone_years[2], \"years (by\", str(someone_dates[2])+\").\"\nprint \"\"\n\nif len(ages)>1: #only makes sense to do an everyone statement if there are multiple people. \n everyone_years=[calcexp(ages, 0.05, 0),\n calcexp(ages, 0.5, 0),\n calcexp(ages, 0.95, 0)]\n everyone_dates=[(datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[0])).year,\n (datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[1])).year,\n (datetime.date.today()+datetime.timedelta(days=365.242191*everyone_years[2])).year]\n print \"There is a 5% chance of everyone dying within\", everyone_years[0], \"years (by\", str(everyone_dates[0])+\").\"\n print \"There is a 50% chance of everyone dying within\", everyone_years[1], \"years (by\", str(everyone_dates[1])+\").\"\n print \"There is a 95% chance of everyone dying within\", everyone_years[2], \"years (by\", str(everyone_dates[2])+\").\"\n\n\nif years: # if the user has input year \n yearword=\"years\" \n if years==1: # changes from plural to singular if \"years\" is 1, so it says \"1 year\" instead of \"1 years\"\n yearword=\"year\"\n\n print \"\"\n if years>datetime.date.today().year: # Program assumes years under current year are a number of years, and years over current year refer to the date. If input years is greater than the current year...\n years=years-datetime.date.today().year #...recalculate the \"years\" variable to be the number of years in the future that year is\n if len(ages)>1: #if there is more than one person being analyzed, we will look at the probability of everyone dying \n p=100*proballdie(ages, years) # converts probability into a percentage by multiplying by 100 \n printable=\"\" # the percentage we will print out \n if p<0.001: # if the percentage is really low/almost impossible \n printable=\"<0.001\"\n elif p>99.99: # if the percentage is really high/almost guaranteed \n printable=\">99.99\" \n else:\n printable=str(p)[:5] # if the percentage is not at one of the above extremes we want to see the actual percentage in our output\n print \"Probability of all dying in\", years, yearword+\": \", printable+\"%\" #outputs the info in an easily readable format\n p=100*probanydie(ages, years) #regardless of how many people there are we will want to know the probability anyone dies \n printable=\"\" # the percentage we will print out \n if p<0.001:\n printable=\"<0.001\" # if the percentage is really low/almost impossible\n elif p>99.99:\n printable=\">99.99\" # if the percentage is really high/almost guaranteed \n print p # I don't know why he is choosing to do this, it seems odd/inconsistent with rest of program \n else:\n printable=str(p)[:5] # convert p to a string and assign the first 5 characters to printable \n print \"Probability of a death within\", years, yearword+\":\", printable+\"%\" #outputs the info in an easily readable format\nraise SystemExit #leaves the program \n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
def ip_address(address): new_address = "" split_address = address.split(".") seprator = "[.]" new_address = seprator.join(split_address) return new_address if __name__ == "__main__": ipaddress = ip_address("192.168.1.1") print(ipaddress)
normal
{ "blob_id": "7ef62e5545930ab13312f8ae1ea70a74386d8bfa", "index": 1231, "step-1": "<mask token>\n", "step-2": "def ip_address(address):\n new_address = ''\n split_address = address.split('.')\n seprator = '[.]'\n new_address = seprator.join(split_address)\n return new_address\n\n\n<mask token>\n", "step-3": "def ip_address(address):\n new_address = ''\n split_address = address.split('.')\n seprator = '[.]'\n new_address = seprator.join(split_address)\n return new_address\n\n\nif __name__ == '__main__':\n ipaddress = ip_address('192.168.1.1')\n print(ipaddress)\n", "step-4": "def ip_address(address):\n new_address = \"\"\n split_address = address.split(\".\")\n seprator = \"[.]\"\n new_address = seprator.join(split_address)\n return new_address\n\n\nif __name__ == \"__main__\":\n ipaddress = ip_address(\"192.168.1.1\")\n print(ipaddress)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for file in file_list_excel: """遍历所有excel文件,删除空行""" file_path = os.path.join(file_dir, file) df = pd.read_excel(file_path) data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any') new_list.append(data) <|reserved_special_token_0|> df_all.to_excel('new_file.xlsx', index=False) print('Ok, 3秒后退出。') time.sleep(3) <|reserved_special_token_1|> <|reserved_special_token_0|> file_dir = os.getcwd() file_list_all = os.listdir(file_dir) file_list_excel = [item for item in file_list_all if '.xlsx' in item or '.xls' in item] new_list = [] for file in file_list_excel: """遍历所有excel文件,删除空行""" file_path = os.path.join(file_dir, file) df = pd.read_excel(file_path) data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any') new_list.append(data) df_all = pd.concat(new_list) df_all.to_excel('new_file.xlsx', index=False) print('Ok, 3秒后退出。') time.sleep(3) <|reserved_special_token_1|> import os import time import pandas as pd file_dir = os.getcwd() file_list_all = os.listdir(file_dir) file_list_excel = [item for item in file_list_all if '.xlsx' in item or '.xls' in item] new_list = [] for file in file_list_excel: """遍历所有excel文件,删除空行""" file_path = os.path.join(file_dir, file) df = pd.read_excel(file_path) data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any') new_list.append(data) df_all = pd.concat(new_list) df_all.to_excel('new_file.xlsx', index=False) print('Ok, 3秒后退出。') time.sleep(3) <|reserved_special_token_1|> # -*- coding: utf-8 -*- import os import time import pandas as pd file_dir = os.getcwd() # 获取当前工作目录 file_list_all = os.listdir(file_dir) # 获取目录下的所有文件名 file_list_excel = [item for item in file_list_all if ('.xlsx' in item) or ('.xls' in item)] # 清洗非excel文件 new_list = [] # 空列表用于存放下面各个清洗后的表格 for file in file_list_excel: '''遍历所有excel文件,删除空行''' file_path = os.path.join(file_dir, file) # 连接而成当前文件的完整路径 df = pd.read_excel(file_path) # 读取当前excel文件 data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any') # 对空行进行删除 new_list.append(data) # 删除空行后存入列表 df_all = pd.concat(new_list) # 将所有删除空行的表格进行合并 df_all.to_excel('new_file.xlsx', index=False) # 将合并后的数据存到文件中 print('Ok, 3秒后退出。') time.sleep(3)
flexible
{ "blob_id": "ea646068d48a9a4b5a578a5fb1399d83a4812b02", "index": 1134, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor file in file_list_excel:\n \"\"\"遍历所有excel文件,删除空行\"\"\"\n file_path = os.path.join(file_dir, file)\n df = pd.read_excel(file_path)\n data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any')\n new_list.append(data)\n<mask token>\ndf_all.to_excel('new_file.xlsx', index=False)\nprint('Ok, 3秒后退出。')\ntime.sleep(3)\n", "step-3": "<mask token>\nfile_dir = os.getcwd()\nfile_list_all = os.listdir(file_dir)\nfile_list_excel = [item for item in file_list_all if '.xlsx' in item or \n '.xls' in item]\nnew_list = []\nfor file in file_list_excel:\n \"\"\"遍历所有excel文件,删除空行\"\"\"\n file_path = os.path.join(file_dir, file)\n df = pd.read_excel(file_path)\n data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any')\n new_list.append(data)\ndf_all = pd.concat(new_list)\ndf_all.to_excel('new_file.xlsx', index=False)\nprint('Ok, 3秒后退出。')\ntime.sleep(3)\n", "step-4": "import os\nimport time\nimport pandas as pd\nfile_dir = os.getcwd()\nfile_list_all = os.listdir(file_dir)\nfile_list_excel = [item for item in file_list_all if '.xlsx' in item or \n '.xls' in item]\nnew_list = []\nfor file in file_list_excel:\n \"\"\"遍历所有excel文件,删除空行\"\"\"\n file_path = os.path.join(file_dir, file)\n df = pd.read_excel(file_path)\n data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any')\n new_list.append(data)\ndf_all = pd.concat(new_list)\ndf_all.to_excel('new_file.xlsx', index=False)\nprint('Ok, 3秒后退出。')\ntime.sleep(3)\n", "step-5": "# -*- coding: utf-8 -*-\nimport os\nimport time\nimport pandas as pd\n\nfile_dir = os.getcwd() # 获取当前工作目录\nfile_list_all = os.listdir(file_dir) # 获取目录下的所有文件名\nfile_list_excel = [item for item in file_list_all if ('.xlsx' in item) or ('.xls' in item)] # 清洗非excel文件\n\nnew_list = [] # 空列表用于存放下面各个清洗后的表格\nfor file in file_list_excel:\n '''遍历所有excel文件,删除空行'''\n file_path = os.path.join(file_dir, file) # 连接而成当前文件的完整路径\n df = pd.read_excel(file_path) # 读取当前excel文件\n data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any') # 对空行进行删除\n new_list.append(data) # 删除空行后存入列表\n\ndf_all = pd.concat(new_list) # 将所有删除空行的表格进行合并\ndf_all.to_excel('new_file.xlsx', index=False) # 将合并后的数据存到文件中\n\nprint('Ok, 3秒后退出。')\ntime.sleep(3)\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class VideoClassifier: def __init__(self, train_mode='late_fusion', video_model_path=None, time_step=16, base_path='/user/vlongobardi/AFEW/aligned/', feature_name='emobase2010_100', stride=1): self.time_step = time_step self.train_mode = train_mode self.feature_name = feature_name self.classes = classes self.lb = LabelBinarizer() self.lb.fit_transform(np.array(classes)) self.feature_num = 1582 self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40) self.stride = stride if video_model_path is not None: try: self.model = my_model() self.model.load_weights(video_model_path) print('VideoClassifier loaded successfully', video_model_path) except: print('Exception') else: t_files = glob.glob(base_path + 'Train' + '/*/*csv') v_files = glob.glob(base_path + 'Val' + '/*/*csv') self.csv_fusion = self.generate_feature(t_files, v_files) self.do_training() <|reserved_special_token_0|> <|reserved_special_token_0|> def load_early_csv(self, dataset): csv_early_fusion = {} print('Opening csv: features_path_early_fusion_' + dataset + '_' + self.feature_name + '.csv') with open('features_path_early_fusion_' + dataset + '_' + self. feature_name + '.csv', 'r') as f: f.readline() csv_reader = csv.reader(f) for clip_id, ground_truth, frame_label, audio_label in csv_reader: if clip_id not in csv_early_fusion: csv_early_fusion[clip_id] = [] csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label]) return csv_early_fusion <|reserved_special_token_0|> def early_gen_train(self, list_files, batch_size): c = 0 clip_ids = list(self.csv_fusion['train'].keys()) random.shuffle(clip_ids) while True: labels = [] features = [np.zeros((batch_size, self.time_step, self. feature_num)).astype('float'), np.zeros((batch_size, self. time_step, 1024)).astype('float')] for i in range(c, c + batch_size): clip_id = clip_ids[i] video_info = self.csv_fusion['train'][clip_id] ground_truth = video_info[0][0] start = random.randint(0, len(video_info) - self.time_step) for index, elem in enumerate(video_info[start:self. time_step + start]): _, frame_path, audio_path = elem if not isfile(frame_path): start += 1 if start >= len(video_info): raise continue frame_feature = np.load(frame_path) features[0][i - c][index] = np.array(from_arff_to_feture (audio_path)).reshape(self.feature_num) features[1][i - c][index] = frame_feature.reshape(1024) labels.append(ground_truth) c += batch_size if c + batch_size > len(clip_ids): c = 0 random.shuffle(clip_ids) labels = self.lb.transform(np.array(labels)).reshape(( batch_size, 7)) yield features, labels <|reserved_special_token_0|> <|reserved_special_token_0|> def get_validation_dim(self): if self.stride == 2: if 'full' in self.feature_name: return 141 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 114 elif '100' in self.feature_name: return 128 elif self.stride == 1: if 'full' in self.feature_name: return 76 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 63 elif '100' in self.feature_name: return 69 elif self.stride == self.time_step: return 0 def train(self, train_files, val_files, train_data, model): if train_data['opt'] == 'Adam': optimizer = Adam(lr=train_data['lr']) else: optimizer = SGD(lr=train_data['lr']) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.summary() train_gen = train_data['generator1'](train_files, train_data[ 'batch_size']) no_of_training_images = len(train_files) no_of_val_images = self.get_validation_dim() print('no_of_val_images:', no_of_val_images) val_gen = train_data['generator2'](val_files, train_data['batch_size']) model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt' ] + '_Model' + str(train_data['model_name'] ) + '_Feature' + self.feature_name + '_' + str(train_data[ 'iteration']) + '_' + self.train_mode model_name += 'stride' + str(self.stride) model_name += '.h5' def custom_scheduler(epoch): if epoch < 50: print(0.1) return 0.1 if epoch < 100: print(0.01) return 0.01 if epoch < 125: print(0.001) return 0.001 else: print(0.0001) return 0.0001 class CheckValCMCallback(keras.callbacks.Callback): def __init__(self, m, dim, validation_files, epoch): super().__init__() self.vc = m self.dim = dim self.val_files = validation_files self.epoch = epoch self.accs = [] def on_epoch_end(self, epoch, logs=None): csv_fusion = self.vc.load_early_csv('val') gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval') acc = self.model.evaluate_generator(gen, self.dim, workers=0) self.accs.append(acc) print('Evaluate:', acc) if self.epoch == epoch + 1: print('Validation_Accuracy =', self.accs) cb = [ModelCheckpoint(filepath=str( 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' + model_name), monitor='val_accuracy', save_weights_only=True), TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode + '/' + self.feature_name, write_graph=True, write_images=True)] cb += [LearningRateScheduler(custom_scheduler)] history = model.fit_generator(train_gen, validation_data=val_gen, epochs=train_data['epoch'], steps_per_epoch= no_of_training_images * 2 // train_data['batch_size'], validation_steps=no_of_val_images, workers=0, verbose=1, callbacks=cb) print('\n\nTrain_Accuracy =', history.history['accuracy']) print('\nVal_Accuracy =', history.history['val_accuracy']) print('\n\nTrain_Loss =', history.history['loss']) print('\nVal_Loss =', history.history['val_loss']) <|reserved_special_token_0|> def print_confusion_matrix(self, stride=1): """ IMPLEMENT FOR EARLY FUSION MISSING """ csv_fusion = {} predictions = [] ground_truths = [] if self.train_mode == 'early_fusion': csv_fusion = self.load_early_csv('val') print('CSV loaded', len(csv_fusion)) gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride) for x in gen: ground_truths.append(self.lb.inverse_transform(x[1])[0]) pred = self.model.predict(x[0]) pred = self.lb.inverse_transform(pred) predictions.append(pred[0]) self.print_stats(ground_truths, predictions, 'Video') else: with open('lables_late_fusion' + self.feature_name + '.csv', 'r' ) as f: f.readline() csv_reader = csv.reader(f) for row in csv_reader: csv_fusion[row[0]] = [row[1], row[2], row[3]] a_p = [] f_p = [] files = glob.glob('/user/vlongobardi/late_feature/' + self. feature_name + '/*/*csv') for file in files: clip_id = basename(file).split('.')[0] ground_truth, frame_pred, audio_pred = csv_fusion[clip_id] sample = np.append(self.lb.transform(np.array([audio_pred]) ), self.lb.transform(np.array([frame_pred]))) pred = self.model.predict(sample.reshape((1, 14))) pred = self.lb.inverse_transform(pred)[0] predictions.append(pred) a_p.append(audio_pred) f_p.append(frame_pred) ground_truths.append(ground_truth) self.print_stats(ground_truths, predictions, 'Video') self.print_stats(ground_truths, a_p, 'Audio') self.print_stats(ground_truths, f_p, 'Frame') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class VideoClassifier: def __init__(self, train_mode='late_fusion', video_model_path=None, time_step=16, base_path='/user/vlongobardi/AFEW/aligned/', feature_name='emobase2010_100', stride=1): self.time_step = time_step self.train_mode = train_mode self.feature_name = feature_name self.classes = classes self.lb = LabelBinarizer() self.lb.fit_transform(np.array(classes)) self.feature_num = 1582 self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40) self.stride = stride if video_model_path is not None: try: self.model = my_model() self.model.load_weights(video_model_path) print('VideoClassifier loaded successfully', video_model_path) except: print('Exception') else: t_files = glob.glob(base_path + 'Train' + '/*/*csv') v_files = glob.glob(base_path + 'Val' + '/*/*csv') self.csv_fusion = self.generate_feature(t_files, v_files) self.do_training() <|reserved_special_token_0|> <|reserved_special_token_0|> def load_early_csv(self, dataset): csv_early_fusion = {} print('Opening csv: features_path_early_fusion_' + dataset + '_' + self.feature_name + '.csv') with open('features_path_early_fusion_' + dataset + '_' + self. feature_name + '.csv', 'r') as f: f.readline() csv_reader = csv.reader(f) for clip_id, ground_truth, frame_label, audio_label in csv_reader: if clip_id not in csv_early_fusion: csv_early_fusion[clip_id] = [] csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label]) return csv_early_fusion def _generate_data_for_early_fusion(self, files, name): if 'full' in self.feature_name: frame_to_discard = 0 else: window_size = int(self.feature_name.split('_')[1]) frame_to_discard = ceil(window_size / 2 / 40) my_csv = {} for file in tqdm(files): clip_id_temp = file.split('.')[0] base_path = clip_id_temp.replace('AFEW/aligned', 'early_feature/framefeature') + '*' frames_features_path = glob.glob(base_path) audio_features_path = glob.glob(base_path.replace( 'early_feature/framefeature', 'early_feature/' + self. feature_name)) frames_features_path.sort(key=lambda x: int(x.split('_')[-1]. split('.')[0])) if 'full' not in self.feature_name: audio_features_path.sort(key=lambda x: int(x.split('_')[-1] .split('.')[0])) ground_truth = basename(dirname(clip_id_temp)) clip_id = basename(clip_id_temp) frames_features_path = frames_features_path[frame_to_discard:] if len(frames_features_path) < 16: continue if len(audio_features_path ) < 16 and 'full' not in self.feature_name: continue for index, frame in enumerate(frames_features_path): if clip_id not in my_csv.keys(): my_csv[clip_id] = [] if 'full' not in self.feature_name: my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]]) else: my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]]) with open('features_path_early_fusion_' + name + '_' + self. feature_name + '.csv', 'w') as f: f.write('clip_id, ground_truth, frame_label, audio_label\n') for key in my_csv: for line in my_csv[key]: f.write(key + ',' + line[0] + ',' + line[1] + ',' + line[2] + '\n') return my_csv def early_gen_train(self, list_files, batch_size): c = 0 clip_ids = list(self.csv_fusion['train'].keys()) random.shuffle(clip_ids) while True: labels = [] features = [np.zeros((batch_size, self.time_step, self. feature_num)).astype('float'), np.zeros((batch_size, self. time_step, 1024)).astype('float')] for i in range(c, c + batch_size): clip_id = clip_ids[i] video_info = self.csv_fusion['train'][clip_id] ground_truth = video_info[0][0] start = random.randint(0, len(video_info) - self.time_step) for index, elem in enumerate(video_info[start:self. time_step + start]): _, frame_path, audio_path = elem if not isfile(frame_path): start += 1 if start >= len(video_info): raise continue frame_feature = np.load(frame_path) features[0][i - c][index] = np.array(from_arff_to_feture (audio_path)).reshape(self.feature_num) features[1][i - c][index] = frame_feature.reshape(1024) labels.append(ground_truth) c += batch_size if c + batch_size > len(clip_ids): c = 0 random.shuffle(clip_ids) labels = self.lb.transform(np.array(labels)).reshape(( batch_size, 7)) yield features, labels def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1): """ stride 50% sul su tutti i file """ c = 0 labels = features = [] clip_ids = list(list_files.keys()) while True: for clip_id in tqdm(clip_ids): video_info = list_files[clip_id] ground_truth = video_info[0][0] for start in range(0, len(video_info) - self.time_step, self.time_step // stride): if c == 0: labels = [] features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'), np.zeros(( batch_size, self.time_step, 1024)).astype('float')] for index, elem in enumerate(video_info[start:self. time_step + start]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][c][index] = np.array(from_arff_to_feture (audio_path)).reshape(self.feature_num) features[1][c][index] = frame_feature.reshape(1024) labels.append(ground_truth) c += 1 if c == batch_size: c = 0 labels = self.lb.transform(np.array(labels)).reshape(( batch_size, 7)) yield features, labels if mode == 'eval': break def early_gen_test_clip(self, list_files, clip_id, stride=1): """ stride su singolo file, quindi va richiamato per ogni file """ ground_truth = list_files[0][0] start = 0 end = len(list_files) - self.time_step while True: labels = [] features = [np.zeros((1, self.time_step, self.feature_num)). astype('float'), np.zeros((1, self.time_step, 1024)).astype ('float')] for index, elem in enumerate(list_files[start:start + self. time_step]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][0][index] = np.array(from_arff_to_feture( audio_path)).reshape(self.feature_num) features[1][0][index] = frame_feature.reshape(1024) labels.append(ground_truth) start += self.time_step // stride if start >= end: break labels = self.lb.transform(np.array(labels)).reshape((1, 7)) yield features, labels def get_validation_dim(self): if self.stride == 2: if 'full' in self.feature_name: return 141 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 114 elif '100' in self.feature_name: return 128 elif self.stride == 1: if 'full' in self.feature_name: return 76 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 63 elif '100' in self.feature_name: return 69 elif self.stride == self.time_step: return 0 def train(self, train_files, val_files, train_data, model): if train_data['opt'] == 'Adam': optimizer = Adam(lr=train_data['lr']) else: optimizer = SGD(lr=train_data['lr']) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.summary() train_gen = train_data['generator1'](train_files, train_data[ 'batch_size']) no_of_training_images = len(train_files) no_of_val_images = self.get_validation_dim() print('no_of_val_images:', no_of_val_images) val_gen = train_data['generator2'](val_files, train_data['batch_size']) model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt' ] + '_Model' + str(train_data['model_name'] ) + '_Feature' + self.feature_name + '_' + str(train_data[ 'iteration']) + '_' + self.train_mode model_name += 'stride' + str(self.stride) model_name += '.h5' def custom_scheduler(epoch): if epoch < 50: print(0.1) return 0.1 if epoch < 100: print(0.01) return 0.01 if epoch < 125: print(0.001) return 0.001 else: print(0.0001) return 0.0001 class CheckValCMCallback(keras.callbacks.Callback): def __init__(self, m, dim, validation_files, epoch): super().__init__() self.vc = m self.dim = dim self.val_files = validation_files self.epoch = epoch self.accs = [] def on_epoch_end(self, epoch, logs=None): csv_fusion = self.vc.load_early_csv('val') gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval') acc = self.model.evaluate_generator(gen, self.dim, workers=0) self.accs.append(acc) print('Evaluate:', acc) if self.epoch == epoch + 1: print('Validation_Accuracy =', self.accs) cb = [ModelCheckpoint(filepath=str( 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' + model_name), monitor='val_accuracy', save_weights_only=True), TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode + '/' + self.feature_name, write_graph=True, write_images=True)] cb += [LearningRateScheduler(custom_scheduler)] history = model.fit_generator(train_gen, validation_data=val_gen, epochs=train_data['epoch'], steps_per_epoch= no_of_training_images * 2 // train_data['batch_size'], validation_steps=no_of_val_images, workers=0, verbose=1, callbacks=cb) print('\n\nTrain_Accuracy =', history.history['accuracy']) print('\nVal_Accuracy =', history.history['val_accuracy']) print('\n\nTrain_Loss =', history.history['loss']) print('\nVal_Loss =', history.history['val_loss']) def print_stats(self, ground_truths, predictions, name): cm = confusion_matrix(ground_truths, predictions, self.classes) print('###' + name + ' Results###\n') print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np. newaxis], decimals=3), self.classes) print('\n\n') print('Accuracy score: ', accuracy_score(ground_truths, predictions ), '\n\n') print( '#################################################################end###\n\n\n' ) def print_confusion_matrix(self, stride=1): """ IMPLEMENT FOR EARLY FUSION MISSING """ csv_fusion = {} predictions = [] ground_truths = [] if self.train_mode == 'early_fusion': csv_fusion = self.load_early_csv('val') print('CSV loaded', len(csv_fusion)) gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride) for x in gen: ground_truths.append(self.lb.inverse_transform(x[1])[0]) pred = self.model.predict(x[0]) pred = self.lb.inverse_transform(pred) predictions.append(pred[0]) self.print_stats(ground_truths, predictions, 'Video') else: with open('lables_late_fusion' + self.feature_name + '.csv', 'r' ) as f: f.readline() csv_reader = csv.reader(f) for row in csv_reader: csv_fusion[row[0]] = [row[1], row[2], row[3]] a_p = [] f_p = [] files = glob.glob('/user/vlongobardi/late_feature/' + self. feature_name + '/*/*csv') for file in files: clip_id = basename(file).split('.')[0] ground_truth, frame_pred, audio_pred = csv_fusion[clip_id] sample = np.append(self.lb.transform(np.array([audio_pred]) ), self.lb.transform(np.array([frame_pred]))) pred = self.model.predict(sample.reshape((1, 14))) pred = self.lb.inverse_transform(pred)[0] predictions.append(pred) a_p.append(audio_pred) f_p.append(frame_pred) ground_truths.append(ground_truth) self.print_stats(ground_truths, predictions, 'Video') self.print_stats(ground_truths, a_p, 'Audio') self.print_stats(ground_truths, f_p, 'Frame') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class VideoClassifier: def __init__(self, train_mode='late_fusion', video_model_path=None, time_step=16, base_path='/user/vlongobardi/AFEW/aligned/', feature_name='emobase2010_100', stride=1): self.time_step = time_step self.train_mode = train_mode self.feature_name = feature_name self.classes = classes self.lb = LabelBinarizer() self.lb.fit_transform(np.array(classes)) self.feature_num = 1582 self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40) self.stride = stride if video_model_path is not None: try: self.model = my_model() self.model.load_weights(video_model_path) print('VideoClassifier loaded successfully', video_model_path) except: print('Exception') else: t_files = glob.glob(base_path + 'Train' + '/*/*csv') v_files = glob.glob(base_path + 'Val' + '/*/*csv') self.csv_fusion = self.generate_feature(t_files, v_files) self.do_training() def do_training(self): skips = 0 iters = 1 bs = 16 ep = 150 opts = ['SGD'] lrs = [0.01] models = [my_model] models_name = [x.__name__ for x in models] for index, model in enumerate(models): for opt in opts: for lr in lrs: for iteration in range(iters): if skips > 0: skips -= 1 continue train_infos = {'iteration': iteration, 'model_name': models_name[index], 'batch_size': bs, 'epoch': ep, 'lr': lr, 'opt': opt} print( """ ################################################################################ ############################## ITERATION """ + str(iteration + 1) + ' of ' + str(iters) + """ ########################### ######################################################""" + """ ######################## epochs:""", ep, 'batch_size:', bs, '\nmodel:', models_name[ index], 'in', models_name, '\nopt:', opt, 'in', opts, '\nlr:', lr, 'in', lrs) train_infos['generator1'] = self.early_gen_train train_infos['generator2'] = self.early_gen_new_val t_files, v_files = self.csv_fusion['train' ], self.csv_fusion['val'] m = model() self.train(t_files, v_files, train_infos, m) def generate_feature(self, t_files, v_files): if not exists('features_path_early_fusion_train_' + self. feature_name + '.csv'): print('\n##### GENERATING CSV FOR EARLY FUSION... #####') csv_early_fusion = {'train': self. _generate_data_for_early_fusion(t_files, 'train'), 'val': self._generate_data_for_early_fusion(v_files, 'val')} print('\n##### CSV GENERATED! #####') else: csv_early_fusion = {} for name in ['train', 'val']: csv_early_fusion[name] = self.load_early_csv(name) return csv_early_fusion def load_early_csv(self, dataset): csv_early_fusion = {} print('Opening csv: features_path_early_fusion_' + dataset + '_' + self.feature_name + '.csv') with open('features_path_early_fusion_' + dataset + '_' + self. feature_name + '.csv', 'r') as f: f.readline() csv_reader = csv.reader(f) for clip_id, ground_truth, frame_label, audio_label in csv_reader: if clip_id not in csv_early_fusion: csv_early_fusion[clip_id] = [] csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label]) return csv_early_fusion def _generate_data_for_early_fusion(self, files, name): if 'full' in self.feature_name: frame_to_discard = 0 else: window_size = int(self.feature_name.split('_')[1]) frame_to_discard = ceil(window_size / 2 / 40) my_csv = {} for file in tqdm(files): clip_id_temp = file.split('.')[0] base_path = clip_id_temp.replace('AFEW/aligned', 'early_feature/framefeature') + '*' frames_features_path = glob.glob(base_path) audio_features_path = glob.glob(base_path.replace( 'early_feature/framefeature', 'early_feature/' + self. feature_name)) frames_features_path.sort(key=lambda x: int(x.split('_')[-1]. split('.')[0])) if 'full' not in self.feature_name: audio_features_path.sort(key=lambda x: int(x.split('_')[-1] .split('.')[0])) ground_truth = basename(dirname(clip_id_temp)) clip_id = basename(clip_id_temp) frames_features_path = frames_features_path[frame_to_discard:] if len(frames_features_path) < 16: continue if len(audio_features_path ) < 16 and 'full' not in self.feature_name: continue for index, frame in enumerate(frames_features_path): if clip_id not in my_csv.keys(): my_csv[clip_id] = [] if 'full' not in self.feature_name: my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]]) else: my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]]) with open('features_path_early_fusion_' + name + '_' + self. feature_name + '.csv', 'w') as f: f.write('clip_id, ground_truth, frame_label, audio_label\n') for key in my_csv: for line in my_csv[key]: f.write(key + ',' + line[0] + ',' + line[1] + ',' + line[2] + '\n') return my_csv def early_gen_train(self, list_files, batch_size): c = 0 clip_ids = list(self.csv_fusion['train'].keys()) random.shuffle(clip_ids) while True: labels = [] features = [np.zeros((batch_size, self.time_step, self. feature_num)).astype('float'), np.zeros((batch_size, self. time_step, 1024)).astype('float')] for i in range(c, c + batch_size): clip_id = clip_ids[i] video_info = self.csv_fusion['train'][clip_id] ground_truth = video_info[0][0] start = random.randint(0, len(video_info) - self.time_step) for index, elem in enumerate(video_info[start:self. time_step + start]): _, frame_path, audio_path = elem if not isfile(frame_path): start += 1 if start >= len(video_info): raise continue frame_feature = np.load(frame_path) features[0][i - c][index] = np.array(from_arff_to_feture (audio_path)).reshape(self.feature_num) features[1][i - c][index] = frame_feature.reshape(1024) labels.append(ground_truth) c += batch_size if c + batch_size > len(clip_ids): c = 0 random.shuffle(clip_ids) labels = self.lb.transform(np.array(labels)).reshape(( batch_size, 7)) yield features, labels def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1): """ stride 50% sul su tutti i file """ c = 0 labels = features = [] clip_ids = list(list_files.keys()) while True: for clip_id in tqdm(clip_ids): video_info = list_files[clip_id] ground_truth = video_info[0][0] for start in range(0, len(video_info) - self.time_step, self.time_step // stride): if c == 0: labels = [] features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'), np.zeros(( batch_size, self.time_step, 1024)).astype('float')] for index, elem in enumerate(video_info[start:self. time_step + start]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][c][index] = np.array(from_arff_to_feture (audio_path)).reshape(self.feature_num) features[1][c][index] = frame_feature.reshape(1024) labels.append(ground_truth) c += 1 if c == batch_size: c = 0 labels = self.lb.transform(np.array(labels)).reshape(( batch_size, 7)) yield features, labels if mode == 'eval': break def early_gen_test_clip(self, list_files, clip_id, stride=1): """ stride su singolo file, quindi va richiamato per ogni file """ ground_truth = list_files[0][0] start = 0 end = len(list_files) - self.time_step while True: labels = [] features = [np.zeros((1, self.time_step, self.feature_num)). astype('float'), np.zeros((1, self.time_step, 1024)).astype ('float')] for index, elem in enumerate(list_files[start:start + self. time_step]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][0][index] = np.array(from_arff_to_feture( audio_path)).reshape(self.feature_num) features[1][0][index] = frame_feature.reshape(1024) labels.append(ground_truth) start += self.time_step // stride if start >= end: break labels = self.lb.transform(np.array(labels)).reshape((1, 7)) yield features, labels def get_validation_dim(self): if self.stride == 2: if 'full' in self.feature_name: return 141 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 114 elif '100' in self.feature_name: return 128 elif self.stride == 1: if 'full' in self.feature_name: return 76 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 63 elif '100' in self.feature_name: return 69 elif self.stride == self.time_step: return 0 def train(self, train_files, val_files, train_data, model): if train_data['opt'] == 'Adam': optimizer = Adam(lr=train_data['lr']) else: optimizer = SGD(lr=train_data['lr']) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.summary() train_gen = train_data['generator1'](train_files, train_data[ 'batch_size']) no_of_training_images = len(train_files) no_of_val_images = self.get_validation_dim() print('no_of_val_images:', no_of_val_images) val_gen = train_data['generator2'](val_files, train_data['batch_size']) model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt' ] + '_Model' + str(train_data['model_name'] ) + '_Feature' + self.feature_name + '_' + str(train_data[ 'iteration']) + '_' + self.train_mode model_name += 'stride' + str(self.stride) model_name += '.h5' def custom_scheduler(epoch): if epoch < 50: print(0.1) return 0.1 if epoch < 100: print(0.01) return 0.01 if epoch < 125: print(0.001) return 0.001 else: print(0.0001) return 0.0001 class CheckValCMCallback(keras.callbacks.Callback): def __init__(self, m, dim, validation_files, epoch): super().__init__() self.vc = m self.dim = dim self.val_files = validation_files self.epoch = epoch self.accs = [] def on_epoch_end(self, epoch, logs=None): csv_fusion = self.vc.load_early_csv('val') gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval') acc = self.model.evaluate_generator(gen, self.dim, workers=0) self.accs.append(acc) print('Evaluate:', acc) if self.epoch == epoch + 1: print('Validation_Accuracy =', self.accs) cb = [ModelCheckpoint(filepath=str( 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' + model_name), monitor='val_accuracy', save_weights_only=True), TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode + '/' + self.feature_name, write_graph=True, write_images=True)] cb += [LearningRateScheduler(custom_scheduler)] history = model.fit_generator(train_gen, validation_data=val_gen, epochs=train_data['epoch'], steps_per_epoch= no_of_training_images * 2 // train_data['batch_size'], validation_steps=no_of_val_images, workers=0, verbose=1, callbacks=cb) print('\n\nTrain_Accuracy =', history.history['accuracy']) print('\nVal_Accuracy =', history.history['val_accuracy']) print('\n\nTrain_Loss =', history.history['loss']) print('\nVal_Loss =', history.history['val_loss']) def print_stats(self, ground_truths, predictions, name): cm = confusion_matrix(ground_truths, predictions, self.classes) print('###' + name + ' Results###\n') print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np. newaxis], decimals=3), self.classes) print('\n\n') print('Accuracy score: ', accuracy_score(ground_truths, predictions ), '\n\n') print( '#################################################################end###\n\n\n' ) def print_confusion_matrix(self, stride=1): """ IMPLEMENT FOR EARLY FUSION MISSING """ csv_fusion = {} predictions = [] ground_truths = [] if self.train_mode == 'early_fusion': csv_fusion = self.load_early_csv('val') print('CSV loaded', len(csv_fusion)) gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride) for x in gen: ground_truths.append(self.lb.inverse_transform(x[1])[0]) pred = self.model.predict(x[0]) pred = self.lb.inverse_transform(pred) predictions.append(pred[0]) self.print_stats(ground_truths, predictions, 'Video') else: with open('lables_late_fusion' + self.feature_name + '.csv', 'r' ) as f: f.readline() csv_reader = csv.reader(f) for row in csv_reader: csv_fusion[row[0]] = [row[1], row[2], row[3]] a_p = [] f_p = [] files = glob.glob('/user/vlongobardi/late_feature/' + self. feature_name + '/*/*csv') for file in files: clip_id = basename(file).split('.')[0] ground_truth, frame_pred, audio_pred = csv_fusion[clip_id] sample = np.append(self.lb.transform(np.array([audio_pred]) ), self.lb.transform(np.array([frame_pred]))) pred = self.model.predict(sample.reshape((1, 14))) pred = self.lb.inverse_transform(pred)[0] predictions.append(pred) a_p.append(audio_pred) f_p.append(frame_pred) ground_truths.append(ground_truth) self.print_stats(ground_truths, predictions, 'Video') self.print_stats(ground_truths, a_p, 'Audio') self.print_stats(ground_truths, f_p, 'Frame') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def my_model(): r1, r2 = regularizers.l2(1e-05), regularizers.l2(1e-05) frame_input = Input(shape=(16, 1024)) audio_input = Input(shape=(16, 1582)) x = Concatenate(name='fusion1')([frame_input, audio_input]) x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x) x = TimeDistributed(Dropout(0.5))(x) x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer= r2, name='ff_logit'))(x) x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x) return Model([audio_input, frame_input], x) class VideoClassifier: def __init__(self, train_mode='late_fusion', video_model_path=None, time_step=16, base_path='/user/vlongobardi/AFEW/aligned/', feature_name='emobase2010_100', stride=1): self.time_step = time_step self.train_mode = train_mode self.feature_name = feature_name self.classes = classes self.lb = LabelBinarizer() self.lb.fit_transform(np.array(classes)) self.feature_num = 1582 self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40) self.stride = stride if video_model_path is not None: try: self.model = my_model() self.model.load_weights(video_model_path) print('VideoClassifier loaded successfully', video_model_path) except: print('Exception') else: t_files = glob.glob(base_path + 'Train' + '/*/*csv') v_files = glob.glob(base_path + 'Val' + '/*/*csv') self.csv_fusion = self.generate_feature(t_files, v_files) self.do_training() def do_training(self): skips = 0 iters = 1 bs = 16 ep = 150 opts = ['SGD'] lrs = [0.01] models = [my_model] models_name = [x.__name__ for x in models] for index, model in enumerate(models): for opt in opts: for lr in lrs: for iteration in range(iters): if skips > 0: skips -= 1 continue train_infos = {'iteration': iteration, 'model_name': models_name[index], 'batch_size': bs, 'epoch': ep, 'lr': lr, 'opt': opt} print( """ ################################################################################ ############################## ITERATION """ + str(iteration + 1) + ' of ' + str(iters) + """ ########################### ######################################################""" + """ ######################## epochs:""", ep, 'batch_size:', bs, '\nmodel:', models_name[ index], 'in', models_name, '\nopt:', opt, 'in', opts, '\nlr:', lr, 'in', lrs) train_infos['generator1'] = self.early_gen_train train_infos['generator2'] = self.early_gen_new_val t_files, v_files = self.csv_fusion['train' ], self.csv_fusion['val'] m = model() self.train(t_files, v_files, train_infos, m) def generate_feature(self, t_files, v_files): if not exists('features_path_early_fusion_train_' + self. feature_name + '.csv'): print('\n##### GENERATING CSV FOR EARLY FUSION... #####') csv_early_fusion = {'train': self. _generate_data_for_early_fusion(t_files, 'train'), 'val': self._generate_data_for_early_fusion(v_files, 'val')} print('\n##### CSV GENERATED! #####') else: csv_early_fusion = {} for name in ['train', 'val']: csv_early_fusion[name] = self.load_early_csv(name) return csv_early_fusion def load_early_csv(self, dataset): csv_early_fusion = {} print('Opening csv: features_path_early_fusion_' + dataset + '_' + self.feature_name + '.csv') with open('features_path_early_fusion_' + dataset + '_' + self. feature_name + '.csv', 'r') as f: f.readline() csv_reader = csv.reader(f) for clip_id, ground_truth, frame_label, audio_label in csv_reader: if clip_id not in csv_early_fusion: csv_early_fusion[clip_id] = [] csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label]) return csv_early_fusion def _generate_data_for_early_fusion(self, files, name): if 'full' in self.feature_name: frame_to_discard = 0 else: window_size = int(self.feature_name.split('_')[1]) frame_to_discard = ceil(window_size / 2 / 40) my_csv = {} for file in tqdm(files): clip_id_temp = file.split('.')[0] base_path = clip_id_temp.replace('AFEW/aligned', 'early_feature/framefeature') + '*' frames_features_path = glob.glob(base_path) audio_features_path = glob.glob(base_path.replace( 'early_feature/framefeature', 'early_feature/' + self. feature_name)) frames_features_path.sort(key=lambda x: int(x.split('_')[-1]. split('.')[0])) if 'full' not in self.feature_name: audio_features_path.sort(key=lambda x: int(x.split('_')[-1] .split('.')[0])) ground_truth = basename(dirname(clip_id_temp)) clip_id = basename(clip_id_temp) frames_features_path = frames_features_path[frame_to_discard:] if len(frames_features_path) < 16: continue if len(audio_features_path ) < 16 and 'full' not in self.feature_name: continue for index, frame in enumerate(frames_features_path): if clip_id not in my_csv.keys(): my_csv[clip_id] = [] if 'full' not in self.feature_name: my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]]) else: my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]]) with open('features_path_early_fusion_' + name + '_' + self. feature_name + '.csv', 'w') as f: f.write('clip_id, ground_truth, frame_label, audio_label\n') for key in my_csv: for line in my_csv[key]: f.write(key + ',' + line[0] + ',' + line[1] + ',' + line[2] + '\n') return my_csv def early_gen_train(self, list_files, batch_size): c = 0 clip_ids = list(self.csv_fusion['train'].keys()) random.shuffle(clip_ids) while True: labels = [] features = [np.zeros((batch_size, self.time_step, self. feature_num)).astype('float'), np.zeros((batch_size, self. time_step, 1024)).astype('float')] for i in range(c, c + batch_size): clip_id = clip_ids[i] video_info = self.csv_fusion['train'][clip_id] ground_truth = video_info[0][0] start = random.randint(0, len(video_info) - self.time_step) for index, elem in enumerate(video_info[start:self. time_step + start]): _, frame_path, audio_path = elem if not isfile(frame_path): start += 1 if start >= len(video_info): raise continue frame_feature = np.load(frame_path) features[0][i - c][index] = np.array(from_arff_to_feture (audio_path)).reshape(self.feature_num) features[1][i - c][index] = frame_feature.reshape(1024) labels.append(ground_truth) c += batch_size if c + batch_size > len(clip_ids): c = 0 random.shuffle(clip_ids) labels = self.lb.transform(np.array(labels)).reshape(( batch_size, 7)) yield features, labels def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1): """ stride 50% sul su tutti i file """ c = 0 labels = features = [] clip_ids = list(list_files.keys()) while True: for clip_id in tqdm(clip_ids): video_info = list_files[clip_id] ground_truth = video_info[0][0] for start in range(0, len(video_info) - self.time_step, self.time_step // stride): if c == 0: labels = [] features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'), np.zeros(( batch_size, self.time_step, 1024)).astype('float')] for index, elem in enumerate(video_info[start:self. time_step + start]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][c][index] = np.array(from_arff_to_feture (audio_path)).reshape(self.feature_num) features[1][c][index] = frame_feature.reshape(1024) labels.append(ground_truth) c += 1 if c == batch_size: c = 0 labels = self.lb.transform(np.array(labels)).reshape(( batch_size, 7)) yield features, labels if mode == 'eval': break def early_gen_test_clip(self, list_files, clip_id, stride=1): """ stride su singolo file, quindi va richiamato per ogni file """ ground_truth = list_files[0][0] start = 0 end = len(list_files) - self.time_step while True: labels = [] features = [np.zeros((1, self.time_step, self.feature_num)). astype('float'), np.zeros((1, self.time_step, 1024)).astype ('float')] for index, elem in enumerate(list_files[start:start + self. time_step]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][0][index] = np.array(from_arff_to_feture( audio_path)).reshape(self.feature_num) features[1][0][index] = frame_feature.reshape(1024) labels.append(ground_truth) start += self.time_step // stride if start >= end: break labels = self.lb.transform(np.array(labels)).reshape((1, 7)) yield features, labels def get_validation_dim(self): if self.stride == 2: if 'full' in self.feature_name: return 141 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 114 elif '100' in self.feature_name: return 128 elif self.stride == 1: if 'full' in self.feature_name: return 76 elif '600' in self.feature_name: return 0 elif '300' in self.feature_name: return 63 elif '100' in self.feature_name: return 69 elif self.stride == self.time_step: return 0 def train(self, train_files, val_files, train_data, model): if train_data['opt'] == 'Adam': optimizer = Adam(lr=train_data['lr']) else: optimizer = SGD(lr=train_data['lr']) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.summary() train_gen = train_data['generator1'](train_files, train_data[ 'batch_size']) no_of_training_images = len(train_files) no_of_val_images = self.get_validation_dim() print('no_of_val_images:', no_of_val_images) val_gen = train_data['generator2'](val_files, train_data['batch_size']) model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt' ] + '_Model' + str(train_data['model_name'] ) + '_Feature' + self.feature_name + '_' + str(train_data[ 'iteration']) + '_' + self.train_mode model_name += 'stride' + str(self.stride) model_name += '.h5' def custom_scheduler(epoch): if epoch < 50: print(0.1) return 0.1 if epoch < 100: print(0.01) return 0.01 if epoch < 125: print(0.001) return 0.001 else: print(0.0001) return 0.0001 class CheckValCMCallback(keras.callbacks.Callback): def __init__(self, m, dim, validation_files, epoch): super().__init__() self.vc = m self.dim = dim self.val_files = validation_files self.epoch = epoch self.accs = [] def on_epoch_end(self, epoch, logs=None): csv_fusion = self.vc.load_early_csv('val') gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval') acc = self.model.evaluate_generator(gen, self.dim, workers=0) self.accs.append(acc) print('Evaluate:', acc) if self.epoch == epoch + 1: print('Validation_Accuracy =', self.accs) cb = [ModelCheckpoint(filepath=str( 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' + model_name), monitor='val_accuracy', save_weights_only=True), TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode + '/' + self.feature_name, write_graph=True, write_images=True)] cb += [LearningRateScheduler(custom_scheduler)] history = model.fit_generator(train_gen, validation_data=val_gen, epochs=train_data['epoch'], steps_per_epoch= no_of_training_images * 2 // train_data['batch_size'], validation_steps=no_of_val_images, workers=0, verbose=1, callbacks=cb) print('\n\nTrain_Accuracy =', history.history['accuracy']) print('\nVal_Accuracy =', history.history['val_accuracy']) print('\n\nTrain_Loss =', history.history['loss']) print('\nVal_Loss =', history.history['val_loss']) def print_stats(self, ground_truths, predictions, name): cm = confusion_matrix(ground_truths, predictions, self.classes) print('###' + name + ' Results###\n') print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np. newaxis], decimals=3), self.classes) print('\n\n') print('Accuracy score: ', accuracy_score(ground_truths, predictions ), '\n\n') print( '#################################################################end###\n\n\n' ) def print_confusion_matrix(self, stride=1): """ IMPLEMENT FOR EARLY FUSION MISSING """ csv_fusion = {} predictions = [] ground_truths = [] if self.train_mode == 'early_fusion': csv_fusion = self.load_early_csv('val') print('CSV loaded', len(csv_fusion)) gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride) for x in gen: ground_truths.append(self.lb.inverse_transform(x[1])[0]) pred = self.model.predict(x[0]) pred = self.lb.inverse_transform(pred) predictions.append(pred[0]) self.print_stats(ground_truths, predictions, 'Video') else: with open('lables_late_fusion' + self.feature_name + '.csv', 'r' ) as f: f.readline() csv_reader = csv.reader(f) for row in csv_reader: csv_fusion[row[0]] = [row[1], row[2], row[3]] a_p = [] f_p = [] files = glob.glob('/user/vlongobardi/late_feature/' + self. feature_name + '/*/*csv') for file in files: clip_id = basename(file).split('.')[0] ground_truth, frame_pred, audio_pred = csv_fusion[clip_id] sample = np.append(self.lb.transform(np.array([audio_pred]) ), self.lb.transform(np.array([frame_pred]))) pred = self.model.predict(sample.reshape((1, 14))) pred = self.lb.inverse_transform(pred)[0] predictions.append(pred) a_p.append(audio_pred) f_p.append(frame_pred) ground_truths.append(ground_truth) self.print_stats(ground_truths, predictions, 'Video') self.print_stats(ground_truths, a_p, 'Audio') self.print_stats(ground_truths, f_p, 'Frame') <|reserved_special_token_0|> <|reserved_special_token_1|> import csv import glob import random import sys from math import ceil, floor from os.path import basename, exists, dirname, isfile import numpy as np import keras from keras import Model, Input, regularizers from keras.layers import TimeDistributed, LSTMCell, Reshape, Dense, Lambda, Dropout, Concatenate from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler from keras.optimizers import Adam, SGD from sklearn.metrics import confusion_matrix, accuracy_score # , classification_report from sklearn.preprocessing import LabelBinarizer from tqdm import tqdm from Dataset.Dataset_Utils.augmenter import NoAug from Dataset.Dataset_Utils.datagen import DataGenerator as DataGen from Dataset.Dataset_Utils.dataset_tools import print_cm from Models.model_sharma import SharmaNet from audio_classifier import AudioClassifier, from_arff_to_feture from frames_classifier import FramesClassifier from test_models import * classes = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"] def my_model(): r1, r2 = regularizers.l2(1e-5), regularizers.l2(1e-5) frame_input = Input(shape=(16, 1024)) audio_input = Input(shape=(16, 1582)) x = Concatenate(name='fusion1')([frame_input, audio_input]) x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x) x = TimeDistributed(Dropout(0.5))(x) x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=r2, name='ff_logit'))(x) x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x) return Model([audio_input, frame_input], x) class VideoClassifier: def __init__(self, train_mode="late_fusion", video_model_path=None, time_step=16, base_path="/user/vlongobardi/AFEW/aligned/", feature_name="emobase2010_100", stride=1): self.time_step = time_step self.train_mode = train_mode self.feature_name = feature_name self.classes = classes self.lb = LabelBinarizer() self.lb.fit_transform(np.array(classes)) self.feature_num = 1582 self.offset = ceil(int(self.feature_name.split("_")[1]) / 2 / 40) self.stride = stride if video_model_path is not None: try: self.model = my_model() self.model.load_weights(video_model_path) print("VideoClassifier loaded successfully", video_model_path) except: print("Exception") else: t_files = glob.glob(base_path + "Train" + "/*/*csv") v_files = glob.glob(base_path + "Val" + "/*/*csv") self.csv_fusion = self.generate_feature(t_files, v_files) self.do_training() def do_training(self): skips = 0 iters = 1 bs = 16 ep = 150 opts = ["SGD"]#, "Adam"] lrs = [0.01] models = [my_model] models_name = [x.__name__ for x in models] for index, model in enumerate(models): for opt in opts: for lr in lrs: for iteration in range(iters): if skips > 0: skips -= 1 continue train_infos = { "iteration": iteration, "model_name": models_name[index], "batch_size": bs, "epoch": ep, "lr": lr, "opt": opt } print( "\n\n################################################################################\n" "############################## ITERATION " + str(iteration + 1) + " of " + str(iters) + " ###########################\n######################################################" + " ########################\nepochs:", ep, "batch_size:", bs, "\nmodel:", models_name[index], "in", models_name, "\nopt:", opt, "in", opts, "\nlr:", lr, "in", lrs) train_infos["generator1"] = self.early_gen_train train_infos["generator2"] = self.early_gen_new_val t_files, v_files = self.csv_fusion["train"], self.csv_fusion["val"] m = model() self.train(t_files, v_files, train_infos, m) def generate_feature(self, t_files, v_files): if not exists('features_path_early_fusion_train_' + self.feature_name + '.csv'): print("\n##### GENERATING CSV FOR EARLY FUSION... #####") csv_early_fusion = { "train": self._generate_data_for_early_fusion(t_files, "train"), "val": self._generate_data_for_early_fusion(v_files, "val") } print("\n##### CSV GENERATED! #####") else: csv_early_fusion = {} for name in ["train", "val"]: csv_early_fusion[name] = self.load_early_csv(name) return csv_early_fusion def load_early_csv(self, dataset): csv_early_fusion = {} print("Opening csv: features_path_early_fusion_" + dataset + "_" + self.feature_name + '.csv') with open('features_path_early_fusion_' + dataset + "_" + self.feature_name + '.csv', 'r') as f: f.readline() csv_reader = csv.reader(f) for clip_id, ground_truth, frame_label, audio_label in csv_reader: if clip_id not in csv_early_fusion: csv_early_fusion[clip_id] = [] csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label]) return csv_early_fusion def _generate_data_for_early_fusion(self, files, name): # '/user/vlongobardi/AFEW/aligned/Train/Angry/012738600.csv' # '/user/vlongobardi/early_feature/framefeature/Train/Angry/012738600_0.dat' # '/user/vlongobardi/early_feature/emobase2010_600/Train/Angry/012738600_0.arff' if "full" in self.feature_name: frame_to_discard = 0 else: window_size = int(self.feature_name.split("_")[1]) frame_to_discard = ceil(window_size / 2 / 40) my_csv = {} for file in tqdm(files): clip_id_temp = file.split(".")[0] base_path = clip_id_temp.replace("AFEW/aligned", "early_feature/framefeature") + "*" frames_features_path = glob.glob(base_path) audio_features_path = glob.glob( base_path.replace("early_feature/framefeature", "early_feature/" + self.feature_name)) frames_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0])) if "full" not in self.feature_name: audio_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0])) ground_truth = basename(dirname(clip_id_temp)) clip_id = basename(clip_id_temp) # discard video frames based on window size frames_features_path = frames_features_path[frame_to_discard:] if len(frames_features_path) < 16: continue # print("FRAME TOO FEW SAMPLES:", len(frames_features_path), clip_id) if len(audio_features_path) < 16 and "full" not in self.feature_name: continue # print("AUDIO TOO FEW SAMPLES:", len(audio_features_path), clip_id) for index, frame in enumerate(frames_features_path): if clip_id not in my_csv.keys(): my_csv[clip_id] = [] if "full" not in self.feature_name: my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]]) else: my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]]) with open('features_path_early_fusion_' + name + "_" + self.feature_name + '.csv', 'w') as f: f.write("clip_id, ground_truth, frame_label, audio_label\n") for key in my_csv: for line in my_csv[key]: f.write(key + "," + line[0] + "," + line[1] + "," + line[2] + "\n") return my_csv def early_gen_train(self, list_files, batch_size): c = 0 clip_ids = list(self.csv_fusion["train"].keys()) random.shuffle(clip_ids) while True: labels = [] features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'), np.zeros((batch_size, self.time_step, 1024)).astype('float')] for i in range(c, c + batch_size): clip_id = clip_ids[i] video_info = self.csv_fusion["train"][clip_id] ground_truth = video_info[0][0] # first_frame_num = int(video_info[0][1].split("_")[-1].split(".")[0]) start = random.randint(0, len(video_info) - self.time_step) for index, elem in enumerate(video_info[start:self.time_step + start]): _, frame_path, audio_path = elem if not isfile(frame_path): start += 1 if start >= len(video_info): raise continue frame_feature = np.load(frame_path) features[0][i - c][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, ) features[1][i - c][index] = frame_feature.reshape(1024, ) labels.append(ground_truth) c += batch_size if c + batch_size > len(clip_ids): c = 0 random.shuffle(clip_ids) labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7)) yield features, labels def early_gen_new_val(self, list_files, batch_size, mode="val", stride=1): """ stride 50% sul su tutti i file """ c = 0 labels = features = [] clip_ids = list(list_files.keys()) while True: for clip_id in tqdm(clip_ids): video_info = list_files[clip_id] ground_truth = video_info[0][0] for start in range(0, len(video_info) - self.time_step, self.time_step // stride): if c == 0: labels = [] features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'), np.zeros((batch_size, self.time_step, 1024)).astype('float')] for index, elem in enumerate(video_info[start:self.time_step + start]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][c][index] = np.array(from_arff_to_feture(audio_path)).reshape( self.feature_num, ) features[1][c][index] = frame_feature.reshape(1024, ) labels.append(ground_truth) c += 1 if c == batch_size: c = 0 labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7)) yield features, labels if mode == "eval": break def early_gen_test_clip(self, list_files, clip_id, stride=1): """ stride su singolo file, quindi va richiamato per ogni file """ ground_truth = list_files[0][0] start = 0 end = len(list_files) - self.time_step while True: labels = [] features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'), np.zeros((1, self.time_step, 1024)).astype('float')] for index, elem in enumerate(list_files[start:start + self.time_step]): _, frame_path, audio_path = elem frame_feature = np.load(frame_path) features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, ) features[1][0][index] = frame_feature.reshape(1024, ) labels.append(ground_truth) start += self.time_step // stride if start >= end: break labels = self.lb.transform(np.array(labels)).reshape((1, 7)) yield features, labels def get_validation_dim(self): if self.stride == 2: if "full" in self.feature_name: return 141 elif "600" in self.feature_name: return 0 elif "300" in self.feature_name: return 114 elif "100" in self.feature_name: return 128 elif self.stride == 1: if "full" in self.feature_name: return 76 elif "600" in self.feature_name: return 0 elif "300" in self.feature_name: return 63 elif "100" in self.feature_name: return 69 elif self.stride == self.time_step: return 0 def train(self, train_files, val_files, train_data, model): if train_data["opt"] == "Adam": optimizer = Adam(lr=train_data["lr"]) else: optimizer = SGD(lr=train_data["lr"]) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.summary() train_gen = train_data["generator1"](train_files, train_data["batch_size"]) no_of_training_images = len(train_files) no_of_val_images = self.get_validation_dim() print("no_of_val_images:", no_of_val_images) val_gen = train_data["generator2"](val_files, train_data["batch_size"]) # stride = 1, no overlapping # stride = 2, overlapping: 50% # stride = time_step, stride: 1 model_name = "_lr" + str(train_data["lr"]) + "_Opt" + train_data["opt"] + "_Model" + str( train_data["model_name"]) + "_Feature" + self.feature_name + "_" + str( train_data["iteration"]) + "_" + self.train_mode # + "_modelType" + str(self.model_type) model_name += "stride" + str(self.stride) model_name += ".h5" def custom_scheduler(epoch): if epoch < 50: print(0.1) return 0.1 if epoch < 100: print(0.01) return 0.01 if epoch < 125: print(0.001) return 0.001 else: print(0.0001) return 0.0001 #print(0.1 / 10 ** (floor(epoch / 40) + 1)) #return 0.1 / 10 ** (floor(epoch / 40) + 1) class CheckValCMCallback(keras.callbacks.Callback): def __init__(self, m, dim, validation_files, epoch): super().__init__() self.vc = m self.dim = dim self.val_files = validation_files self.epoch = epoch self.accs = [] def on_epoch_end(self, epoch, logs=None): csv_fusion = self.vc.load_early_csv("val") # gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval") # predictions = [] # ground_truths = [] # for x in gen: # ground_truths.append(self.vc.lb.inverse_transform(x[1])[0]) # pred = self.model.predict(x[0]) # pred = self.vc.lb.inverse_transform(pred) # predictions.append(pred[0]) # self.vc.print_stats(ground_truths, predictions, "Video" + str(epoch)) gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval") acc = self.model.evaluate_generator(gen, self.dim, workers=0) self.accs.append(acc) print("Evaluate:", acc) if self.epoch == epoch + 1: print("Validation_Accuracy =", self.accs) cb = [ModelCheckpoint( filepath=str( "weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}" + model_name), monitor="val_accuracy", save_weights_only=True), TensorBoard(log_dir="NewFusionLogs_sched/" + self.train_mode + "/" + self.feature_name, write_graph=True, write_images=True)] cb += [LearningRateScheduler(custom_scheduler)] #cb += [CheckValCMCallback(self, no_of_val_images, val_files, train_data["epoch"])] history = model.fit_generator(train_gen, validation_data=val_gen, epochs=train_data["epoch"], steps_per_epoch=(no_of_training_images * 2 // train_data["batch_size"]), validation_steps=(no_of_val_images), workers=0, verbose=1, callbacks=cb) print("\n\nTrain_Accuracy =", history.history['accuracy']) print("\nVal_Accuracy =", history.history['val_accuracy']) print("\n\nTrain_Loss =", history.history['loss']) print("\nVal_Loss =", history.history['val_loss']) def print_stats(self, ground_truths, predictions, name): cm = confusion_matrix(ground_truths, predictions, self.classes) print("###" + name + " Results###\n") # print_cm(cm, self.classes) # print("\n\n") print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes) print("\n\n") print("Accuracy score: ", accuracy_score(ground_truths, predictions), "\n\n") # print("Report") # print(classification_report(ground_truths, predictions)) print("#################################################################end###\n\n\n") def print_confusion_matrix(self, stride=1): """ IMPLEMENT FOR EARLY FUSION MISSING """ csv_fusion = {} predictions = [] ground_truths = [] if self.train_mode == "early_fusion": csv_fusion = self.load_early_csv("val") print("CSV loaded", len(csv_fusion)) gen = self.early_gen_new_val(csv_fusion, 1, "eval", stride) for x in gen: ground_truths.append(self.lb.inverse_transform(x[1])[0]) pred = self.model.predict(x[0]) pred = self.lb.inverse_transform(pred) predictions.append(pred[0]) # print("\ngt, pred", self.lb.inverse_transform(x[1]), pred) self.print_stats(ground_truths, predictions, "Video") else: with open('lables_late_fusion' + self.feature_name + '.csv', 'r') as f: f.readline() csv_reader = csv.reader(f) for row in csv_reader: csv_fusion[row[0]] = [row[1], row[2], row[3]] a_p = [] f_p = [] files = glob.glob("/user/vlongobardi/late_feature/" + self.feature_name + "/*/*csv") for file in files: clip_id = basename(file).split(".")[0] ground_truth, frame_pred, audio_pred = csv_fusion[clip_id] sample = np.append(self.lb.transform(np.array([audio_pred])), self.lb.transform(np.array([frame_pred]))) pred = self.model.predict(sample.reshape((1, 14))) pred = self.lb.inverse_transform(pred)[0] predictions.append(pred) a_p.append(audio_pred) f_p.append(frame_pred) ground_truths.append(ground_truth) self.print_stats(ground_truths, predictions, "Video") self.print_stats(ground_truths, a_p, "Audio") self.print_stats(ground_truths, f_p, "Frame") if __name__ == "__main__": if sys.argv[1] == "late": print("LATE") model_path = [ "audio_models/audioModel_0.2285_epoch135_lr0.1_OptSGD_Modela_model7_Featureemobase2010_100_3.h5", "audio_models/audioModel_0.2650_epoch01_lr0.01_OptSGD_Modela_model7_Featureemobase2010_300_2.h5", "audio_models/audioModel_0.2865_epoch13_lr0.001_OptSGD_Modela_model7_Featureemobase2010_600_0.h5", "audio_models/audioModel_0.3668_epoch67_lr0.001_OptSGD_Modela_model7_Featureemobase2010_full_2.h5" ] for mp in model_path: vc = VideoClassifier(train_mode="late_fusion", audio_model_path=mp) elif sys.argv[1] == "early": # mt = int(sys.argv[2]) print("EARLY") # , Model_type:", mt) arff_paths = {"e1": "emobase2010_100", "i1": "IS09_emotion_100", "e3": "emobase2010_300", "i3": "IS09_emotion_300", "e6": "emobase2010_600", "i6": "IS09_emotion_600", "ef": "emobase2010_full", "if": "IS09_emotion_full"} vc = VideoClassifier(train_mode="early_fusion", feature_name=arff_paths[sys.argv[2]]) # , model_type=mt)
flexible
{ "blob_id": "c925bed2f4d8120e156caebbe8e6bf9d6a51ee37", "index": 3330, "step-1": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n <mask token>\n <mask token>\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n <mask token>\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n <mask token>\n <mask token>\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n <mask token>\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n <mask token>\n <mask token>\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = ['SGD']\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n if skips > 0:\n skips -= 1\n continue\n train_infos = {'iteration': iteration, 'model_name':\n models_name[index], 'batch_size': bs, 'epoch':\n ep, 'lr': lr, 'opt': opt}\n print(\n \"\"\"\n\n################################################################################\n############################## ITERATION \"\"\"\n + str(iteration + 1) + ' of ' + str(iters) +\n \"\"\" ###########################\n######################################################\"\"\"\n + \"\"\" ########################\nepochs:\"\"\", ep,\n 'batch_size:', bs, '\\nmodel:', models_name[\n index], 'in', models_name, '\\nopt:', opt, 'in',\n opts, '\\nlr:', lr, 'in', lrs)\n train_infos['generator1'] = self.early_gen_train\n train_infos['generator2'] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion['train'\n ], self.csv_fusion['val']\n m = model()\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.\n feature_name + '.csv'):\n print('\\n##### GENERATING CSV FOR EARLY FUSION... #####')\n csv_early_fusion = {'train': self.\n _generate_data_for_early_fusion(t_files, 'train'), 'val':\n self._generate_data_for_early_fusion(v_files, 'val')}\n print('\\n##### CSV GENERATED! #####')\n else:\n csv_early_fusion = {}\n for name in ['train', 'val']:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef my_model():\n r1, r2 = regularizers.l2(1e-05), regularizers.l2(1e-05)\n frame_input = Input(shape=(16, 1024))\n audio_input = Input(shape=(16, 1582))\n x = Concatenate(name='fusion1')([frame_input, audio_input])\n x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1,\n name='ff_logit_lstm'))(x)\n x = TimeDistributed(Dropout(0.5))(x)\n x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=\n r2, name='ff_logit'))(x)\n x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)\n return Model([audio_input, frame_input], x)\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = ['SGD']\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n if skips > 0:\n skips -= 1\n continue\n train_infos = {'iteration': iteration, 'model_name':\n models_name[index], 'batch_size': bs, 'epoch':\n ep, 'lr': lr, 'opt': opt}\n print(\n \"\"\"\n\n################################################################################\n############################## ITERATION \"\"\"\n + str(iteration + 1) + ' of ' + str(iters) +\n \"\"\" ###########################\n######################################################\"\"\"\n + \"\"\" ########################\nepochs:\"\"\", ep,\n 'batch_size:', bs, '\\nmodel:', models_name[\n index], 'in', models_name, '\\nopt:', opt, 'in',\n opts, '\\nlr:', lr, 'in', lrs)\n train_infos['generator1'] = self.early_gen_train\n train_infos['generator2'] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion['train'\n ], self.csv_fusion['val']\n m = model()\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.\n feature_name + '.csv'):\n print('\\n##### GENERATING CSV FOR EARLY FUSION... #####')\n csv_early_fusion = {'train': self.\n _generate_data_for_early_fusion(t_files, 'train'), 'val':\n self._generate_data_for_early_fusion(v_files, 'val')}\n print('\\n##### CSV GENERATED! #####')\n else:\n csv_early_fusion = {}\n for name in ['train', 'val']:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n", "step-5": "import csv\nimport glob\nimport random\nimport sys\nfrom math import ceil, floor\nfrom os.path import basename, exists, dirname, isfile\n\nimport numpy as np\nimport keras\nfrom keras import Model, Input, regularizers\nfrom keras.layers import TimeDistributed, LSTMCell, Reshape, Dense, Lambda, Dropout, Concatenate\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler\nfrom keras.optimizers import Adam, SGD\nfrom sklearn.metrics import confusion_matrix, accuracy_score # , classification_report\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tqdm import tqdm\n\nfrom Dataset.Dataset_Utils.augmenter import NoAug\nfrom Dataset.Dataset_Utils.datagen import DataGenerator as DataGen\nfrom Dataset.Dataset_Utils.dataset_tools import print_cm\nfrom Models.model_sharma import SharmaNet\nfrom audio_classifier import AudioClassifier, from_arff_to_feture\nfrom frames_classifier import FramesClassifier\nfrom test_models import *\n\nclasses = [\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Neutral\", \"Sad\", \"Surprise\"]\n\n\ndef my_model():\n r1, r2 = regularizers.l2(1e-5), regularizers.l2(1e-5)\n frame_input = Input(shape=(16, 1024))\n audio_input = Input(shape=(16, 1582))\n x = Concatenate(name='fusion1')([frame_input, audio_input])\n x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x)\n x = TimeDistributed(Dropout(0.5))(x)\n x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=r2, name='ff_logit'))(x)\n x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)\n return Model([audio_input, frame_input], x)\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode=\"late_fusion\", video_model_path=None, time_step=16,\n base_path=\"/user/vlongobardi/AFEW/aligned/\", feature_name=\"emobase2010_100\", stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split(\"_\")[1]) / 2 / 40)\n self.stride = stride\n\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print(\"VideoClassifier loaded successfully\", video_model_path)\n except:\n print(\"Exception\")\n else:\n t_files = glob.glob(base_path + \"Train\" + \"/*/*csv\")\n v_files = glob.glob(base_path + \"Val\" + \"/*/*csv\")\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = [\"SGD\"]#, \"Adam\"]\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n\n if skips > 0:\n skips -= 1\n continue\n\n train_infos = {\n \"iteration\": iteration, \"model_name\": models_name[index],\n \"batch_size\": bs, \"epoch\": ep, \"lr\": lr, \"opt\": opt\n }\n\n print(\n \"\\n\\n################################################################################\\n\"\n \"############################## ITERATION \" + str(iteration + 1) + \" of \" + str(iters) +\n \" ###########################\\n######################################################\" +\n \" ########################\\nepochs:\", ep, \"batch_size:\", bs, \"\\nmodel:\", models_name[index],\n \"in\", models_name, \"\\nopt:\", opt, \"in\", opts, \"\\nlr:\", lr, \"in\", lrs)\n\n train_infos[\"generator1\"] = self.early_gen_train\n train_infos[\"generator2\"] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion[\"train\"], self.csv_fusion[\"val\"]\n m = model()\n\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.feature_name + '.csv'):\n print(\"\\n##### GENERATING CSV FOR EARLY FUSION... #####\")\n csv_early_fusion = {\n \"train\": self._generate_data_for_early_fusion(t_files, \"train\"),\n \"val\": self._generate_data_for_early_fusion(v_files, \"val\")\n }\n print(\"\\n##### CSV GENERATED! #####\")\n else:\n csv_early_fusion = {}\n for name in [\"train\", \"val\"]:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print(\"Opening csv: features_path_early_fusion_\" + dataset + \"_\" + self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + \"_\" + self.feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n # '/user/vlongobardi/AFEW/aligned/Train/Angry/012738600.csv'\n # '/user/vlongobardi/early_feature/framefeature/Train/Angry/012738600_0.dat'\n # '/user/vlongobardi/early_feature/emobase2010_600/Train/Angry/012738600_0.arff'\n if \"full\" in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split(\"_\")[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split(\".\")[0]\n base_path = clip_id_temp.replace(\"AFEW/aligned\", \"early_feature/framefeature\") + \"*\"\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(\n base_path.replace(\"early_feature/framefeature\", \"early_feature/\" + self.feature_name))\n frames_features_path.sort(key=lambda x: int(x.split(\"_\")[-1].split(\".\")[0]))\n if \"full\" not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split(\"_\")[-1].split(\".\")[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n\n # discard video frames based on window size\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n # print(\"FRAME TOO FEW SAMPLES:\", len(frames_features_path), clip_id)\n if len(audio_features_path) < 16 and \"full\" not in self.feature_name:\n continue\n # print(\"AUDIO TOO FEW SAMPLES:\", len(audio_features_path), clip_id)\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if \"full\" not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]])\n with open('features_path_early_fusion_' + name + \"_\" + self.feature_name + '.csv', 'w') as f:\n f.write(\"clip_id, ground_truth, frame_label, audio_label\\n\")\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + \",\" + line[0] + \",\" + line[1] + \",\" + line[2] + \"\\n\")\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion[\"train\"].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),\n np.zeros((batch_size, self.time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion[\"train\"][clip_id]\n ground_truth = video_info[0][0]\n\n # first_frame_num = int(video_info[0][1].split(\"_\")[-1].split(\".\")[0])\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )\n features[1][i - c][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode=\"val\", stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n\n for start in range(0, len(video_info) - self.time_step, self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),\n np.zeros((batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture(audio_path)).reshape(\n self.feature_num, )\n features[1][c][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))\n yield features, labels\n if mode == \"eval\":\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'),\n np.zeros((1, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(list_files[start:start + self.time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )\n features[1][0][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if \"full\" in self.feature_name:\n return 141\n elif \"600\" in self.feature_name:\n return 0\n elif \"300\" in self.feature_name:\n return 114\n elif \"100\" in self.feature_name:\n return 128\n elif self.stride == 1:\n if \"full\" in self.feature_name:\n return 76\n elif \"600\" in self.feature_name:\n return 0\n elif \"300\" in self.feature_name:\n return 63\n elif \"100\" in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data[\"opt\"] == \"Adam\":\n optimizer = Adam(lr=train_data[\"lr\"])\n else:\n optimizer = SGD(lr=train_data[\"lr\"])\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n\n train_gen = train_data[\"generator1\"](train_files, train_data[\"batch_size\"])\n no_of_training_images = len(train_files)\n\n no_of_val_images = self.get_validation_dim()\n print(\"no_of_val_images:\", no_of_val_images)\n val_gen = train_data[\"generator2\"](val_files, train_data[\"batch_size\"])\n\n # stride = 1, no overlapping\n # stride = 2, overlapping: 50%\n # stride = time_step, stride: 1\n\n model_name = \"_lr\" + str(train_data[\"lr\"]) + \"_Opt\" + train_data[\"opt\"] + \"_Model\" + str(\n train_data[\"model_name\"]) + \"_Feature\" + self.feature_name + \"_\" + str(\n train_data[\"iteration\"]) + \"_\" + self.train_mode # + \"_modelType\" + str(self.model_type)\n model_name += \"stride\" + str(self.stride)\n model_name += \".h5\"\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n #print(0.1 / 10 ** (floor(epoch / 40) + 1))\n #return 0.1 / 10 ** (floor(epoch / 40) + 1)\n\n class CheckValCMCallback(keras.callbacks.Callback):\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv(\"val\")\n # gen = self.vc.early_gen_new_val(csv_fusion, 16, \"eval\")\n # predictions = []\n # ground_truths = []\n # for x in gen:\n # ground_truths.append(self.vc.lb.inverse_transform(x[1])[0])\n # pred = self.model.predict(x[0])\n # pred = self.vc.lb.inverse_transform(pred)\n # predictions.append(pred[0])\n # self.vc.print_stats(ground_truths, predictions, \"Video\" + str(epoch))\n gen = self.vc.early_gen_new_val(csv_fusion, 16, \"eval\")\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print(\"Evaluate:\", acc)\n\n if self.epoch == epoch + 1:\n print(\"Validation_Accuracy =\", self.accs)\n\n cb = [ModelCheckpoint(\n filepath=str(\n \"weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}\" + model_name),\n monitor=\"val_accuracy\", save_weights_only=True),\n TensorBoard(log_dir=\"NewFusionLogs_sched/\" + self.train_mode + \"/\" + self.feature_name, write_graph=True,\n write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n #cb += [CheckValCMCallback(self, no_of_val_images, val_files, train_data[\"epoch\"])]\n history = model.fit_generator(train_gen,\n validation_data=val_gen,\n epochs=train_data[\"epoch\"],\n steps_per_epoch=(no_of_training_images * 2 // train_data[\"batch_size\"]),\n validation_steps=(no_of_val_images),\n workers=0, verbose=1, callbacks=cb)\n print(\"\\n\\nTrain_Accuracy =\", history.history['accuracy'])\n print(\"\\nVal_Accuracy =\", history.history['val_accuracy'])\n print(\"\\n\\nTrain_Loss =\", history.history['loss'])\n print(\"\\nVal_Loss =\", history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print(\"###\" + name + \" Results###\\n\")\n # print_cm(cm, self.classes)\n # print(\"\\n\\n\")\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes)\n print(\"\\n\\n\")\n print(\"Accuracy score: \", accuracy_score(ground_truths, predictions), \"\\n\\n\")\n # print(\"Report\")\n # print(classification_report(ground_truths, predictions))\n print(\"#################################################################end###\\n\\n\\n\")\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == \"early_fusion\":\n csv_fusion = self.load_early_csv(\"val\")\n print(\"CSV loaded\", len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, \"eval\", stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n # print(\"\\ngt, pred\", self.lb.inverse_transform(x[1]), pred)\n self.print_stats(ground_truths, predictions, \"Video\")\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob(\"/user/vlongobardi/late_feature/\" + self.feature_name + \"/*/*csv\")\n for file in files:\n clip_id = basename(file).split(\".\")[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n\n self.print_stats(ground_truths, predictions, \"Video\")\n self.print_stats(ground_truths, a_p, \"Audio\")\n self.print_stats(ground_truths, f_p, \"Frame\")\n\n\nif __name__ == \"__main__\":\n if sys.argv[1] == \"late\":\n print(\"LATE\")\n model_path = [\n \"audio_models/audioModel_0.2285_epoch135_lr0.1_OptSGD_Modela_model7_Featureemobase2010_100_3.h5\",\n \"audio_models/audioModel_0.2650_epoch01_lr0.01_OptSGD_Modela_model7_Featureemobase2010_300_2.h5\",\n \"audio_models/audioModel_0.2865_epoch13_lr0.001_OptSGD_Modela_model7_Featureemobase2010_600_0.h5\",\n \"audio_models/audioModel_0.3668_epoch67_lr0.001_OptSGD_Modela_model7_Featureemobase2010_full_2.h5\"\n ]\n for mp in model_path:\n vc = VideoClassifier(train_mode=\"late_fusion\", audio_model_path=mp)\n elif sys.argv[1] == \"early\":\n # mt = int(sys.argv[2])\n print(\"EARLY\") # , Model_type:\", mt)\n arff_paths = {\"e1\": \"emobase2010_100\", \"i1\": \"IS09_emotion_100\",\n \"e3\": \"emobase2010_300\", \"i3\": \"IS09_emotion_300\",\n \"e6\": \"emobase2010_600\", \"i6\": \"IS09_emotion_600\",\n \"ef\": \"emobase2010_full\", \"if\": \"IS09_emotion_full\"}\n vc = VideoClassifier(train_mode=\"early_fusion\", feature_name=arff_paths[sys.argv[2]]) # , model_type=mt)\n", "step-ids": [ 7, 11, 13, 14, 18 ] }
[ 7, 11, 13, 14, 18 ]
from .context import mango from solana.publickey import PublicKey def test_token_lookup(): data = { "tokens": [ { "address": "So11111111111111111111111111111111111111112", "symbol": "SOL", "name": "Wrapped SOL", "decimals": 9, }, { "address": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", "symbol": "USDC", "name": "USD Coin", "decimals": 6, }, { "address": "9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E", "symbol": "BTC", "name": "Wrapped Bitcoin (Sollet)", "decimals": 6, }, { "address": "2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk", "symbol": "ETH", "name": "Wrapped Ethereum (Sollet)", "decimals": 6, }] } actual = mango.SplTokenLookup("test-filename", data) assert actual is not None assert actual.logger is not None assert actual.find_by_symbol("ETH") is not None assert actual.find_by_symbol("ETH").name == "Wrapped Ethereum (Sollet)" assert actual.find_by_symbol("BTC") is not None assert actual.find_by_symbol("BTC").name == "Wrapped Bitcoin (Sollet)" def test_token_lookups_with_full_data(): token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.DefaultDataFilepath) assert token_lookup.find_by_symbol("BTC").mint == PublicKey("9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E") assert token_lookup.find_by_symbol("ETH").mint == PublicKey("2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk") assert token_lookup.find_by_mint("AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq").symbol == "SRM-SOL" assert token_lookup.find_by_mint("Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB").symbol == "USDT"
normal
{ "blob_id": "5e7a589af69a604021ed9558fcce721a8e254fee", "index": 5269, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.\n DefaultDataFilepath)\n assert token_lookup.find_by_symbol('BTC').mint == PublicKey(\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')\n assert token_lookup.find_by_symbol('ETH').mint == PublicKey(\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')\n assert token_lookup.find_by_mint(\n 'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'\n assert token_lookup.find_by_mint(\n 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'\n", "step-3": "<mask token>\n\n\ndef test_token_lookup():\n data = {'tokens': [{'address':\n 'So11111111111111111111111111111111111111112', 'symbol': 'SOL',\n 'name': 'Wrapped SOL', 'decimals': 9}, {'address':\n 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'symbol': 'USDC',\n 'name': 'USD Coin', 'decimals': 6}, {'address':\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E', 'symbol': 'BTC',\n 'name': 'Wrapped Bitcoin (Sollet)', 'decimals': 6}, {'address':\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk', 'symbol': 'ETH',\n 'name': 'Wrapped Ethereum (Sollet)', 'decimals': 6}]}\n actual = mango.SplTokenLookup('test-filename', data)\n assert actual is not None\n assert actual.logger is not None\n assert actual.find_by_symbol('ETH') is not None\n assert actual.find_by_symbol('ETH').name == 'Wrapped Ethereum (Sollet)'\n assert actual.find_by_symbol('BTC') is not None\n assert actual.find_by_symbol('BTC').name == 'Wrapped Bitcoin (Sollet)'\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.\n DefaultDataFilepath)\n assert token_lookup.find_by_symbol('BTC').mint == PublicKey(\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')\n assert token_lookup.find_by_symbol('ETH').mint == PublicKey(\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')\n assert token_lookup.find_by_mint(\n 'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'\n assert token_lookup.find_by_mint(\n 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'\n", "step-4": "from .context import mango\nfrom solana.publickey import PublicKey\n\n\ndef test_token_lookup():\n data = {'tokens': [{'address':\n 'So11111111111111111111111111111111111111112', 'symbol': 'SOL',\n 'name': 'Wrapped SOL', 'decimals': 9}, {'address':\n 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'symbol': 'USDC',\n 'name': 'USD Coin', 'decimals': 6}, {'address':\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E', 'symbol': 'BTC',\n 'name': 'Wrapped Bitcoin (Sollet)', 'decimals': 6}, {'address':\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk', 'symbol': 'ETH',\n 'name': 'Wrapped Ethereum (Sollet)', 'decimals': 6}]}\n actual = mango.SplTokenLookup('test-filename', data)\n assert actual is not None\n assert actual.logger is not None\n assert actual.find_by_symbol('ETH') is not None\n assert actual.find_by_symbol('ETH').name == 'Wrapped Ethereum (Sollet)'\n assert actual.find_by_symbol('BTC') is not None\n assert actual.find_by_symbol('BTC').name == 'Wrapped Bitcoin (Sollet)'\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.\n DefaultDataFilepath)\n assert token_lookup.find_by_symbol('BTC').mint == PublicKey(\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')\n assert token_lookup.find_by_symbol('ETH').mint == PublicKey(\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')\n assert token_lookup.find_by_mint(\n 'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'\n assert token_lookup.find_by_mint(\n 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'\n", "step-5": "from .context import mango\n\nfrom solana.publickey import PublicKey\n\n\ndef test_token_lookup():\n data = {\n \"tokens\": [\n {\n \"address\": \"So11111111111111111111111111111111111111112\",\n \"symbol\": \"SOL\",\n \"name\": \"Wrapped SOL\",\n \"decimals\": 9,\n },\n {\n \"address\": \"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v\",\n \"symbol\": \"USDC\",\n \"name\": \"USD Coin\",\n \"decimals\": 6,\n },\n {\n \"address\": \"9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E\",\n \"symbol\": \"BTC\",\n \"name\": \"Wrapped Bitcoin (Sollet)\",\n \"decimals\": 6,\n },\n {\n \"address\": \"2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk\",\n \"symbol\": \"ETH\",\n \"name\": \"Wrapped Ethereum (Sollet)\",\n \"decimals\": 6,\n }]\n }\n actual = mango.SplTokenLookup(\"test-filename\", data)\n assert actual is not None\n assert actual.logger is not None\n assert actual.find_by_symbol(\"ETH\") is not None\n assert actual.find_by_symbol(\"ETH\").name == \"Wrapped Ethereum (Sollet)\"\n assert actual.find_by_symbol(\"BTC\") is not None\n assert actual.find_by_symbol(\"BTC\").name == \"Wrapped Bitcoin (Sollet)\"\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.DefaultDataFilepath)\n assert token_lookup.find_by_symbol(\"BTC\").mint == PublicKey(\"9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E\")\n assert token_lookup.find_by_symbol(\"ETH\").mint == PublicKey(\"2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk\")\n assert token_lookup.find_by_mint(\"AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq\").symbol == \"SRM-SOL\"\n assert token_lookup.find_by_mint(\"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB\").symbol == \"USDT\"\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import torch from torch import nn from torch.nn import functional as F from models.blocks import UnetConv3, MultiAttentionBlock, UnetGridGatingSignal3, UnetUp3_CT, UnetDsv3 class AttentionGatedUnet3D(nn.Module): """ Attention Gated Unet for 3D semantic segmentation. Args: config: Must contain following attributes: num_classes (int): Number of output classes in the mask; in_channels (int): Number of channels in the input image; feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block; is_deconv (bool, optional): whether to use DeConvolutions; is_batchnorm (bool, optional): whether to use Batch Normalization; Attributes: num_classes (int): Number of classes in the output mask in_channels (int): Number of channels in the input image is_batchnorm (bool) is_deconv (bool) feature_scale (int) """ def __init__(self, config): super(AttentionGatedUnet3D, self).__init__() assert hasattr(config, "num_classes") assert hasattr(config, "in_channels") if not hasattr(config, "feature_scale"): print("feature_scale not specified in config, setting to default 4") config.feature_scale = 4 if not hasattr(config, "is_deconv"): print("is_deconv not specified in config, setting to default True") config.is_deconv = True if not hasattr(config, "is_batchnorm"): print("is_batchnorm not specified in config, setting to default True") config.is_batchnorm = True self.num_classes = config.num_classes self.in_channels = config.in_channels self.is_deconv = config.is_deconv self.is_batchnorm = config.is_batchnorm self.feature_scale = config.feature_scale nonlocal_mode = 'concatenation' attention_dsample = (2, 2, 2) filters = [64, 128, 256, 512, 1024] filters = [int(x / self.feature_scale) for x in filters] # downsampling self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm) self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2)) self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm) self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2)) self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm) self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2)) self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm) self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2)) self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm) self.gating = UnetGridGatingSignal3(filters[4], filters[4], kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm) # attention blocks self.attentionblock2 = MultiAttentionBlock(in_size=filters[1], gate_size=filters[2], inter_size=filters[1], nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample) self.attentionblock3 = MultiAttentionBlock(in_size=filters[2], gate_size=filters[3], inter_size=filters[2], nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample) self.attentionblock4 = MultiAttentionBlock(in_size=filters[3], gate_size=filters[4], inter_size=filters[3], nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample) # upsampling self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv) self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv) self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv) self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv) # deep supervision self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes, scale_factor=8) self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes, scale_factor=4) self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes, scale_factor=2) self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.num_classes, kernel_size=1) # final conv (without any concat) self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1) # initialise weights for m in self.modules(): if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: nn.init.normal(m.weight.data, 1.0, 0.02) nn.init.constant(m.bias.data, 0.0) def forward(self, inputs): # Feature Extraction conv1 = self.conv1(inputs) maxpool1 = self.maxpool1(conv1) conv2 = self.conv2(maxpool1) maxpool2 = self.maxpool2(conv2) conv3 = self.conv3(maxpool2) maxpool3 = self.maxpool3(conv3) conv4 = self.conv4(maxpool3) maxpool4 = self.maxpool4(conv4) # Gating Signal Generation center = self.center(maxpool4) gating = self.gating(center) # Attention Mechanism # Upscaling Part (Decoder) g_conv4, att4 = self.attentionblock4(conv4, gating) up4 = self.up_concat4(g_conv4, center) g_conv3, att3 = self.attentionblock3(conv3, up4) up3 = self.up_concat3(g_conv3, up4) g_conv2, att2 = self.attentionblock2(conv2, up3) up2 = self.up_concat2(g_conv2, up3) up1 = self.up_concat1(conv1, up2) # Deep Supervision dsv4 = self.dsv4(up4) dsv3 = self.dsv3(up3) dsv2 = self.dsv2(up2) dsv1 = self.dsv1(up1) final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1)) pred = F.softmax(final, dim=1) return pred # @staticmethod # def apply_argmax_softmax(pred): # log_p = F.softmax(pred, dim=1) # return log_p
normal
{ "blob_id": "55a392d63838cbef027f9cf525999c41416e3575", "index": 3875, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass AttentionGatedUnet3D(nn.Module):\n <mask token>\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, 'num_classes')\n assert hasattr(config, 'in_channels')\n if not hasattr(config, 'feature_scale'):\n print('feature_scale not specified in config, setting to default 4'\n )\n config.feature_scale = 4\n if not hasattr(config, 'is_deconv'):\n print('is_deconv not specified in config, setting to default True')\n config.is_deconv = True\n if not hasattr(config, 'is_batchnorm'):\n print(\n 'is_batchnorm not specified in config, setting to default True'\n )\n config.is_batchnorm = True\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n nonlocal_mode = 'concatenation'\n attention_dsample = 2, 2, 2\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4],\n kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1],\n gate_size=filters[2], inter_size=filters[1], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2],\n gate_size=filters[3], inter_size=filters[2], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3],\n gate_size=filters[4], inter_size=filters[3], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes,\n scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes,\n scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes,\n scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.\n num_classes, kernel_size=1)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n center = self.center(maxpool4)\n gating = self.gating(center)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n", "step-3": "<mask token>\n\n\nclass AttentionGatedUnet3D(nn.Module):\n \"\"\"\n Attention Gated Unet for 3D semantic segmentation.\n\n Args:\n config: Must contain following attributes:\n num_classes (int): Number of output classes in the mask;\n in_channels (int): Number of channels in the input image;\n feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;\n is_deconv (bool, optional): whether to use DeConvolutions;\n is_batchnorm (bool, optional): whether to use Batch Normalization;\n\n Attributes:\n num_classes (int): Number of classes in the output mask\n in_channels (int): Number of channels in the input image\n is_batchnorm (bool)\n is_deconv (bool)\n feature_scale (int)\n \"\"\"\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, 'num_classes')\n assert hasattr(config, 'in_channels')\n if not hasattr(config, 'feature_scale'):\n print('feature_scale not specified in config, setting to default 4'\n )\n config.feature_scale = 4\n if not hasattr(config, 'is_deconv'):\n print('is_deconv not specified in config, setting to default True')\n config.is_deconv = True\n if not hasattr(config, 'is_batchnorm'):\n print(\n 'is_batchnorm not specified in config, setting to default True'\n )\n config.is_batchnorm = True\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n nonlocal_mode = 'concatenation'\n attention_dsample = 2, 2, 2\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4],\n kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1],\n gate_size=filters[2], inter_size=filters[1], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2],\n gate_size=filters[3], inter_size=filters[2], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3],\n gate_size=filters[4], inter_size=filters[3], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes,\n scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes,\n scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes,\n scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.\n num_classes, kernel_size=1)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n center = self.center(maxpool4)\n gating = self.gating(center)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n", "step-4": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom models.blocks import UnetConv3, MultiAttentionBlock, UnetGridGatingSignal3, UnetUp3_CT, UnetDsv3\n\n\nclass AttentionGatedUnet3D(nn.Module):\n \"\"\"\n Attention Gated Unet for 3D semantic segmentation.\n\n Args:\n config: Must contain following attributes:\n num_classes (int): Number of output classes in the mask;\n in_channels (int): Number of channels in the input image;\n feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;\n is_deconv (bool, optional): whether to use DeConvolutions;\n is_batchnorm (bool, optional): whether to use Batch Normalization;\n\n Attributes:\n num_classes (int): Number of classes in the output mask\n in_channels (int): Number of channels in the input image\n is_batchnorm (bool)\n is_deconv (bool)\n feature_scale (int)\n \"\"\"\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, 'num_classes')\n assert hasattr(config, 'in_channels')\n if not hasattr(config, 'feature_scale'):\n print('feature_scale not specified in config, setting to default 4'\n )\n config.feature_scale = 4\n if not hasattr(config, 'is_deconv'):\n print('is_deconv not specified in config, setting to default True')\n config.is_deconv = True\n if not hasattr(config, 'is_batchnorm'):\n print(\n 'is_batchnorm not specified in config, setting to default True'\n )\n config.is_batchnorm = True\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n nonlocal_mode = 'concatenation'\n attention_dsample = 2, 2, 2\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4],\n kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1],\n gate_size=filters[2], inter_size=filters[1], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2],\n gate_size=filters[3], inter_size=filters[2], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3],\n gate_size=filters[4], inter_size=filters[3], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes,\n scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes,\n scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes,\n scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.\n num_classes, kernel_size=1)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n center = self.center(maxpool4)\n gating = self.gating(center)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n", "step-5": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom models.blocks import UnetConv3, MultiAttentionBlock, UnetGridGatingSignal3, UnetUp3_CT, UnetDsv3\n\n\nclass AttentionGatedUnet3D(nn.Module):\n \"\"\"\n Attention Gated Unet for 3D semantic segmentation.\n\n Args:\n config: Must contain following attributes:\n num_classes (int): Number of output classes in the mask;\n in_channels (int): Number of channels in the input image;\n feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;\n is_deconv (bool, optional): whether to use DeConvolutions;\n is_batchnorm (bool, optional): whether to use Batch Normalization;\n\n Attributes:\n num_classes (int): Number of classes in the output mask\n in_channels (int): Number of channels in the input image\n is_batchnorm (bool)\n is_deconv (bool)\n feature_scale (int)\n \"\"\"\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, \"num_classes\")\n assert hasattr(config, \"in_channels\")\n\n if not hasattr(config, \"feature_scale\"):\n print(\"feature_scale not specified in config, setting to default 4\")\n config.feature_scale = 4\n\n if not hasattr(config, \"is_deconv\"):\n print(\"is_deconv not specified in config, setting to default True\")\n config.is_deconv = True\n\n if not hasattr(config, \"is_batchnorm\"):\n print(\"is_batchnorm not specified in config, setting to default True\")\n config.is_batchnorm = True\n\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n\n nonlocal_mode = 'concatenation'\n attention_dsample = (2, 2, 2)\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4], kernel_size=(1, 1, 1),\n is_batchnorm=self.is_batchnorm)\n\n # attention blocks\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1], gate_size=filters[2], inter_size=filters[1],\n nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2], gate_size=filters[3], inter_size=filters[2],\n nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3], gate_size=filters[4], inter_size=filters[3],\n nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)\n\n # upsampling\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n\n # deep supervision\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes, scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes, scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes, scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.num_classes, kernel_size=1)\n\n # final conv (without any concat)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n\n # initialise weights\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n # Feature Extraction\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n\n # Gating Signal Generation\n center = self.center(maxpool4)\n gating = self.gating(center)\n\n # Attention Mechanism\n # Upscaling Part (Decoder)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n\n # Deep Supervision\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n\n# @staticmethod\n# def apply_argmax_softmax(pred):\n# log_p = F.softmax(pred, dim=1)\n\n# return log_p\n", "step-ids": [ 0, 3, 4, 5, 6 ] }
[ 0, 3, 4, 5, 6 ]
import re s=input('enter the string:') def end_num(s): text = re.compile(r".*[0-9]$") if text.match(s): return 'Yes!Number is present at the end of string' else: return 'No!Number is not present at the end of string' print(end_num(s))
normal
{ "blob_id": "94334f91b1556c05dce0ed6f23c074bb8875f185", "index": 2505, "step-1": "<mask token>\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n", "step-3": "<mask token>\ns = input('enter the string:')\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n", "step-4": "import re\ns = input('enter the string:')\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n", "step-5": "import re\r\ns=input('enter the string:')\r\ndef end_num(s):\r\n text = re.compile(r\".*[0-9]$\")\r\n if text.match(s):\r\n return 'Yes!Number is present at the end of string'\r\n else:\r\n return 'No!Number is not present at the end of string'\r\n\r\nprint(end_num(s))\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> def fun(st, n): suffix = [(0) for i in range(n)] prefix = [(0) for i in range(n)] count = 0 for i, val in enumerate(st): if val == '*': if i == 0: prefix[i] = 0 count += 1 else: prefix[i] = prefix[i - 1] count += 1 elif i == 0: prefix[i] = 0 count += 0 else: prefix[i] = prefix[i - 1] + count count += 0 count = 0 for i in range(n - 1, -1, -1): val = st[i] if val == '*': if i == n - 1: suffix[i] = 0 count += 1 else: suffix[i] = suffix[i + 1] count += 1 elif i == n - 1: suffix[i] = 0 count += 0 else: suffix[i] = suffix[i + 1] + count count += 0 ans = 10 ** 12 for i in range(n): if i != n - 1: ans = min(ans, prefix[i] + suffix[i + 1]) else: ans = min(ans, prefix[i]) print(ans) <|reserved_special_token_0|> <|reserved_special_token_1|> def fun(st, n): suffix = [(0) for i in range(n)] prefix = [(0) for i in range(n)] count = 0 for i, val in enumerate(st): if val == '*': if i == 0: prefix[i] = 0 count += 1 else: prefix[i] = prefix[i - 1] count += 1 elif i == 0: prefix[i] = 0 count += 0 else: prefix[i] = prefix[i - 1] + count count += 0 count = 0 for i in range(n - 1, -1, -1): val = st[i] if val == '*': if i == n - 1: suffix[i] = 0 count += 1 else: suffix[i] = suffix[i + 1] count += 1 elif i == n - 1: suffix[i] = 0 count += 0 else: suffix[i] = suffix[i + 1] + count count += 0 ans = 10 ** 12 for i in range(n): if i != n - 1: ans = min(ans, prefix[i] + suffix[i + 1]) else: ans = min(ans, prefix[i]) print(ans) <|reserved_special_token_0|> for _ in range(T): n = int(input()) st = input() fun(st, n) <|reserved_special_token_1|> def fun(st, n): suffix = [(0) for i in range(n)] prefix = [(0) for i in range(n)] count = 0 for i, val in enumerate(st): if val == '*': if i == 0: prefix[i] = 0 count += 1 else: prefix[i] = prefix[i - 1] count += 1 elif i == 0: prefix[i] = 0 count += 0 else: prefix[i] = prefix[i - 1] + count count += 0 count = 0 for i in range(n - 1, -1, -1): val = st[i] if val == '*': if i == n - 1: suffix[i] = 0 count += 1 else: suffix[i] = suffix[i + 1] count += 1 elif i == n - 1: suffix[i] = 0 count += 0 else: suffix[i] = suffix[i + 1] + count count += 0 ans = 10 ** 12 for i in range(n): if i != n - 1: ans = min(ans, prefix[i] + suffix[i + 1]) else: ans = min(ans, prefix[i]) print(ans) T = int(input()) for _ in range(T): n = int(input()) st = input() fun(st, n) <|reserved_special_token_1|> def fun(st,n): suffix=[0 for i in range(n)] prefix=[0 for i in range(n)] count=0 for i,val in enumerate(st): if(val=='*'): if(i==0): prefix[i]=0 count+=1 else: prefix[i]=prefix[i-1] count+=1 else: if(i==0): prefix[i]=0 count+=0 else: prefix[i]=prefix[i-1]+count count+=0 count=0 for i in range(n-1,-1,-1): val=st[i] if(val=='*'): if(i==n-1): suffix[i]=0 count+=1 else: suffix[i]=suffix[i+1] count+=1 else: if(i==n-1): suffix[i]=0 count+=0 else: suffix[i]=suffix[i+1]+count count+=0 ans=10**12 for i in range(n): if(i!=n-1): ans=min(ans,prefix[i]+suffix[i+1]) else: ans=min(ans,prefix[i]) print(ans) T = int(input()) for _ in range(T): n=int(input()) st=input() fun(st,n)
flexible
{ "blob_id": "77c7ca3391426d1e56e15a93ef3e6227a45140fc", "index": 2829, "step-1": "<mask token>\n", "step-2": "def fun(st, n):\n suffix = [(0) for i in range(n)]\n prefix = [(0) for i in range(n)]\n count = 0\n for i, val in enumerate(st):\n if val == '*':\n if i == 0:\n prefix[i] = 0\n count += 1\n else:\n prefix[i] = prefix[i - 1]\n count += 1\n elif i == 0:\n prefix[i] = 0\n count += 0\n else:\n prefix[i] = prefix[i - 1] + count\n count += 0\n count = 0\n for i in range(n - 1, -1, -1):\n val = st[i]\n if val == '*':\n if i == n - 1:\n suffix[i] = 0\n count += 1\n else:\n suffix[i] = suffix[i + 1]\n count += 1\n elif i == n - 1:\n suffix[i] = 0\n count += 0\n else:\n suffix[i] = suffix[i + 1] + count\n count += 0\n ans = 10 ** 12\n for i in range(n):\n if i != n - 1:\n ans = min(ans, prefix[i] + suffix[i + 1])\n else:\n ans = min(ans, prefix[i])\n print(ans)\n\n\n<mask token>\n", "step-3": "def fun(st, n):\n suffix = [(0) for i in range(n)]\n prefix = [(0) for i in range(n)]\n count = 0\n for i, val in enumerate(st):\n if val == '*':\n if i == 0:\n prefix[i] = 0\n count += 1\n else:\n prefix[i] = prefix[i - 1]\n count += 1\n elif i == 0:\n prefix[i] = 0\n count += 0\n else:\n prefix[i] = prefix[i - 1] + count\n count += 0\n count = 0\n for i in range(n - 1, -1, -1):\n val = st[i]\n if val == '*':\n if i == n - 1:\n suffix[i] = 0\n count += 1\n else:\n suffix[i] = suffix[i + 1]\n count += 1\n elif i == n - 1:\n suffix[i] = 0\n count += 0\n else:\n suffix[i] = suffix[i + 1] + count\n count += 0\n ans = 10 ** 12\n for i in range(n):\n if i != n - 1:\n ans = min(ans, prefix[i] + suffix[i + 1])\n else:\n ans = min(ans, prefix[i])\n print(ans)\n\n\n<mask token>\nfor _ in range(T):\n n = int(input())\n st = input()\n fun(st, n)\n", "step-4": "def fun(st, n):\n suffix = [(0) for i in range(n)]\n prefix = [(0) for i in range(n)]\n count = 0\n for i, val in enumerate(st):\n if val == '*':\n if i == 0:\n prefix[i] = 0\n count += 1\n else:\n prefix[i] = prefix[i - 1]\n count += 1\n elif i == 0:\n prefix[i] = 0\n count += 0\n else:\n prefix[i] = prefix[i - 1] + count\n count += 0\n count = 0\n for i in range(n - 1, -1, -1):\n val = st[i]\n if val == '*':\n if i == n - 1:\n suffix[i] = 0\n count += 1\n else:\n suffix[i] = suffix[i + 1]\n count += 1\n elif i == n - 1:\n suffix[i] = 0\n count += 0\n else:\n suffix[i] = suffix[i + 1] + count\n count += 0\n ans = 10 ** 12\n for i in range(n):\n if i != n - 1:\n ans = min(ans, prefix[i] + suffix[i + 1])\n else:\n ans = min(ans, prefix[i])\n print(ans)\n\n\nT = int(input())\nfor _ in range(T):\n n = int(input())\n st = input()\n fun(st, n)\n", "step-5": "def fun(st,n):\n suffix=[0 for i in range(n)]\n prefix=[0 for i in range(n)]\n count=0\n for i,val in enumerate(st):\n if(val=='*'):\n if(i==0):\n prefix[i]=0\n count+=1\n else:\n prefix[i]=prefix[i-1]\n count+=1\n else:\n if(i==0):\n prefix[i]=0\n count+=0\n else:\n prefix[i]=prefix[i-1]+count\n count+=0\n count=0\n for i in range(n-1,-1,-1):\n val=st[i]\n if(val=='*'):\n if(i==n-1):\n suffix[i]=0\n count+=1\n else:\n suffix[i]=suffix[i+1]\n count+=1\n else:\n if(i==n-1):\n suffix[i]=0\n count+=0\n else:\n suffix[i]=suffix[i+1]+count\n count+=0\n ans=10**12\n for i in range(n):\n if(i!=n-1):\n ans=min(ans,prefix[i]+suffix[i+1])\n else:\n ans=min(ans,prefix[i])\n print(ans)\n\nT = int(input())\nfor _ in range(T):\n n=int(input())\n st=input()\n fun(st,n)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> app.register_blueprint(test_controller, url_prefix='/test') app.register_blueprint(user_controller, url_prefix='/') if __name__ == '__main__': app.run(debug=True) <|reserved_special_token_1|> from setup import app, manager from Users.controller import user_controller from Test.controller import test_controller app.register_blueprint(test_controller, url_prefix='/test') app.register_blueprint(user_controller, url_prefix='/') if __name__ == '__main__': app.run(debug=True) <|reserved_special_token_1|> from setup import app, manager from Users.controller import user_controller from Test.controller import test_controller app.register_blueprint(test_controller, url_prefix="/test") #registeting test_controller blueprint with the main "app" and asking it to handle all url that begins with "/test". For eg: http://127.0.0.1/test/anythingcanbehere/orhere/orhere all such urls will go the test_conrtoller file. For now we just have to defined endpoints "test_get", "test_post". Anything else will result in 404 not fond error. app.register_blueprint(user_controller, url_prefix="/") if __name__ == "__main__": app.run(debug=True) #manager.run()
flexible
{ "blob_id": "afa22db946f77e9b33a443657592c20fbea21eb1", "index": 6146, "step-1": "<mask token>\n", "step-2": "<mask token>\napp.register_blueprint(test_controller, url_prefix='/test')\napp.register_blueprint(user_controller, url_prefix='/')\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "from setup import app, manager\nfrom Users.controller import user_controller\nfrom Test.controller import test_controller\napp.register_blueprint(test_controller, url_prefix='/test')\napp.register_blueprint(user_controller, url_prefix='/')\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "from setup import app, manager\nfrom Users.controller import user_controller\nfrom Test.controller import test_controller\n\napp.register_blueprint(test_controller, url_prefix=\"/test\") #registeting test_controller blueprint with the main \"app\" and asking it to handle all url that begins with \"/test\". For eg: http://127.0.0.1/test/anythingcanbehere/orhere/orhere all such urls will go the test_conrtoller file. For now we just have to defined endpoints \"test_get\", \"test_post\". Anything else will result in 404 not fond error.\napp.register_blueprint(user_controller, url_prefix=\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n #manager.run()", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.contrib import admin from TestApp.models import Parcel # Register your models here. class ParcelAdmin(admin.ModelAdmin): list_display = ['billno','shippername','rate'] admin.site.register(Parcel,ParcelAdmin)
normal
{ "blob_id": "a550b9406e9dd301b863744bb28bc81fac0cd80c", "index": 9607, "step-1": "<mask token>\n\n\nclass ParcelAdmin(admin.ModelAdmin):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ParcelAdmin(admin.ModelAdmin):\n list_display = ['billno', 'shippername', 'rate']\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ParcelAdmin(admin.ModelAdmin):\n list_display = ['billno', 'shippername', 'rate']\n\n\nadmin.site.register(Parcel, ParcelAdmin)\n", "step-4": "from django.contrib import admin\nfrom TestApp.models import Parcel\n\n\nclass ParcelAdmin(admin.ModelAdmin):\n list_display = ['billno', 'shippername', 'rate']\n\n\nadmin.site.register(Parcel, ParcelAdmin)\n", "step-5": "from django.contrib import admin\nfrom TestApp.models import Parcel\n\n# Register your models here.\nclass ParcelAdmin(admin.ModelAdmin):\n list_display = ['billno','shippername','rate']\nadmin.site.register(Parcel,ParcelAdmin)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from telegram.ext import Updater, Filters, MessageHandler, PicklePersistence import telegram import logging logging.basicConfig(format='%(asctime)s %(message)s\n', level=logging.INFO,filename='log.json') logger = logging.getLogger(__name__) def main(): # my_persistence = PicklePersistence(filename="users") #incomment if you need persistence # updater = Updater("",persistence=my_persistence,use_context=True) updater = Updater("",use_context=True) dp = updater.dispatcher jobs = updater.job_queue dp.add_error_handler(error) updater.start_polling() updater.idle() if __name__=="__main__": main()
normal
{ "blob_id": "0a90f29a4e18c2aed23cb31b4239d44d23526327", "index": 9133, "step-1": "<mask token>\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\n<mask token>\n", "step-2": "<mask token>\nlogging.basicConfig(format='%(asctime)s %(message)s\\n', level=logging.INFO,\n filename='log.json')\n<mask token>\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nlogging.basicConfig(format='%(asctime)s %(message)s\\n', level=logging.INFO,\n filename='log.json')\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from telegram.ext import Updater, Filters, MessageHandler, PicklePersistence\nimport telegram\nimport logging\nlogging.basicConfig(format='%(asctime)s %(message)s\\n', level=logging.INFO,\n filename='log.json')\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "from telegram.ext import Updater, Filters, MessageHandler, PicklePersistence\nimport telegram\nimport logging\n\n\nlogging.basicConfig(format='%(asctime)s %(message)s\\n',\n level=logging.INFO,filename='log.json')\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n\n # my_persistence = PicklePersistence(filename=\"users\") #incomment if you need persistence \n \n # updater = Updater(\"\",persistence=my_persistence,use_context=True)\n updater = Updater(\"\",use_context=True)\n\n dp = updater.dispatcher\n jobs = updater.job_queue\n \n\n \n dp.add_error_handler(error)\n\n updater.start_polling()\n \n updater.idle()\n\n\n\nif __name__==\"__main__\":\n main()", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class IndexView(generic.ListView): template_name = 'players/players.html' context_object_name = 'players' def get_queryset(self): return list(chain(Player.objects.all(), Player._meta.get_fields())) <|reserved_special_token_0|> @permission_required('admin.can_addlog_entry') def player_league_delete(request, league): Player.objects.filter(league=league).delete() return redirect('players') <|reserved_special_token_1|> <|reserved_special_token_0|> class IndexView(generic.ListView): template_name = 'players/players.html' context_object_name = 'players' def get_queryset(self): return list(chain(Player.objects.all(), Player._meta.get_fields())) def rate(stats): sum = 0.0 sum += float(stats[4]) / float(stats[3]) * 90 / 30 sum += float(stats[5]) / float(stats[3]) * 90 / 40 sum += float(stats[6]) / float(stats[3]) * 90 / 2 sum += float(stats[7]) / float(stats[3]) * 90 / 1 sum += float(stats[8]) / float(stats[3]) * 90 / 3 sum += float(stats[9]) / float(stats[3]) * 90 / 1.5 sum += float(stats[10]) / float(stats[3]) * 90 / 5 sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2 sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5 sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5 sum += float(stats[14]) / float(stats[3]) * 90 / 0.5 sum += float(stats[15]) / float(stats[3]) * 90 / 11 sum += float(stats[16]) / float(stats[3]) * 90 / 4 sum += float(stats[17]) / float(stats[3]) * 90 / 1 sum += float(stats[18]) / float(stats[3]) * 90 / 2 sum += float(stats[19]) / float(stats[3]) * 90 / 1 sum += float(stats[20]) / float(stats[3]) * 90 / 1 sum += float(stats[21]) / float(stats[3]) * 90 / 1 sum += float(stats[22]) / float(stats[3]) * 90 / 2.5 sum += float(stats[23]) / float(stats[3]) * 90 / 1 sum += float(stats[24]) / float(stats[3]) * 90 / 2 sum += float(stats[25]) / float(stats[3]) * 90 / 1 sum += float(stats[26]) / float(stats[3]) * 90 / 5 sum += float(stats[27]) / float(stats[3]) * 90 / 0.5 sum += float(stats[28]) / float(stats[3]) * 90 / 10 return sum @permission_required('admin.can_addlog_entry') def player_upload(request): template = 'players/player_upload.html' prompt = {'order': ''} if request.method == 'GET': return render(request, template, prompt) csv_file = request.FILES['file'] if not csv_file.name.endswith('.csv'): messages.error(request, 'This is not a csv file') data_set = csv_file.read().decode('UTF-8') io_string = io.StringIO(data_set) next(io_string) for column in csv.reader(io_string, delimiter=':', quotechar='|'): for i, stat in enumerate(column): if i in [0, 1]: continue column[i] = column[i].replace('Åš', 'Ś') column[i] = column[i].replace(',', '.') column[i] = column[i].replace('km', '') column[i] = column[i].replace('Â\xa0', '') column[i] = column[i].replace('-', '0') if int(column[3]) < 180: continue if column[32] == '0': continue if not League.objects.filter(name=column[32]): League.objects.update_or_create(name=column[32]) if not Team.objects.filter(name=column[31]): Team.objects.update_or_create(league_id=League.objects.filter( name=column[32])[0].id, name=column[31]) _, created = Player.objects.update_or_create(team_id=2, name=column [0], age=column[2], position=column[1], minutes=column[3], accurate_passes=float(column[4]) / float(column[3]) * 90, passes=float(column[5]) / float(column[3]) * 90, created_situations=float(column[6]) / float(column[3]) * 90, key_passes=float(column[7]) / float(column[3]) * 90, dribble= float(column[8]) / float(column[3]) * 90, fouls_on=float(column [9]) / float(column[3]) * 90, offsides=float(column[10]) / float(column[3]) * 90, mistakes=float(column[11]) / float( column[3]) * 90, culpable_goals=float(column[12]) / float( column[3]) * 90, accurate_cross=float(column[13]) / float( column[3]) * 90, assists=float(column[14]) / float(column[3]) * 90, heads=float(column[15]) / float(column[3]) * 90, tackles= float(column[16]) / float(column[3]) * 90, key_heads=float( column[17]) / float(column[3]) * 90, interceptions=float(column [18]) / float(column[3]) * 90, catch_saves=float(column[19]) / float(column[3]) * 90, saves=float(column[20]) / float(column[3 ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) * 90, complete_tackles=float(column[22]) / float(column[3]) * 90, accurate_shots=float(column[23]) / float(column[3]) * 90, shots =float(column[24]) / float(column[3]) * 90, key_tackles=float( column[25]) / float(column[3]) * 90, win_heads=float(column[26] ) / float(column[3]) * 90, goals=float(column[27]) / float( column[3]) * 90, crosses=float(column[28]) / float(column[3]) * 90, rating=float(column[29]), club=column[31], league=column[32 ], rate=rate(column)) context = {} return render(request, template, context) <|reserved_special_token_0|> @permission_required('admin.can_addlog_entry') def player_club_delete(request, club): Player.objects.filter(club=club).delete() return redirect('players') @permission_required('admin.can_addlog_entry') def player_league_delete(request, league): Player.objects.filter(league=league).delete() return redirect('players') <|reserved_special_token_1|> <|reserved_special_token_0|> class IndexView(generic.ListView): template_name = 'players/players.html' context_object_name = 'players' def get_queryset(self): return list(chain(Player.objects.all(), Player._meta.get_fields())) def rate(stats): sum = 0.0 sum += float(stats[4]) / float(stats[3]) * 90 / 30 sum += float(stats[5]) / float(stats[3]) * 90 / 40 sum += float(stats[6]) / float(stats[3]) * 90 / 2 sum += float(stats[7]) / float(stats[3]) * 90 / 1 sum += float(stats[8]) / float(stats[3]) * 90 / 3 sum += float(stats[9]) / float(stats[3]) * 90 / 1.5 sum += float(stats[10]) / float(stats[3]) * 90 / 5 sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2 sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5 sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5 sum += float(stats[14]) / float(stats[3]) * 90 / 0.5 sum += float(stats[15]) / float(stats[3]) * 90 / 11 sum += float(stats[16]) / float(stats[3]) * 90 / 4 sum += float(stats[17]) / float(stats[3]) * 90 / 1 sum += float(stats[18]) / float(stats[3]) * 90 / 2 sum += float(stats[19]) / float(stats[3]) * 90 / 1 sum += float(stats[20]) / float(stats[3]) * 90 / 1 sum += float(stats[21]) / float(stats[3]) * 90 / 1 sum += float(stats[22]) / float(stats[3]) * 90 / 2.5 sum += float(stats[23]) / float(stats[3]) * 90 / 1 sum += float(stats[24]) / float(stats[3]) * 90 / 2 sum += float(stats[25]) / float(stats[3]) * 90 / 1 sum += float(stats[26]) / float(stats[3]) * 90 / 5 sum += float(stats[27]) / float(stats[3]) * 90 / 0.5 sum += float(stats[28]) / float(stats[3]) * 90 / 10 return sum @permission_required('admin.can_addlog_entry') def player_upload(request): template = 'players/player_upload.html' prompt = {'order': ''} if request.method == 'GET': return render(request, template, prompt) csv_file = request.FILES['file'] if not csv_file.name.endswith('.csv'): messages.error(request, 'This is not a csv file') data_set = csv_file.read().decode('UTF-8') io_string = io.StringIO(data_set) next(io_string) for column in csv.reader(io_string, delimiter=':', quotechar='|'): for i, stat in enumerate(column): if i in [0, 1]: continue column[i] = column[i].replace('Åš', 'Ś') column[i] = column[i].replace(',', '.') column[i] = column[i].replace('km', '') column[i] = column[i].replace('Â\xa0', '') column[i] = column[i].replace('-', '0') if int(column[3]) < 180: continue if column[32] == '0': continue if not League.objects.filter(name=column[32]): League.objects.update_or_create(name=column[32]) if not Team.objects.filter(name=column[31]): Team.objects.update_or_create(league_id=League.objects.filter( name=column[32])[0].id, name=column[31]) _, created = Player.objects.update_or_create(team_id=2, name=column [0], age=column[2], position=column[1], minutes=column[3], accurate_passes=float(column[4]) / float(column[3]) * 90, passes=float(column[5]) / float(column[3]) * 90, created_situations=float(column[6]) / float(column[3]) * 90, key_passes=float(column[7]) / float(column[3]) * 90, dribble= float(column[8]) / float(column[3]) * 90, fouls_on=float(column [9]) / float(column[3]) * 90, offsides=float(column[10]) / float(column[3]) * 90, mistakes=float(column[11]) / float( column[3]) * 90, culpable_goals=float(column[12]) / float( column[3]) * 90, accurate_cross=float(column[13]) / float( column[3]) * 90, assists=float(column[14]) / float(column[3]) * 90, heads=float(column[15]) / float(column[3]) * 90, tackles= float(column[16]) / float(column[3]) * 90, key_heads=float( column[17]) / float(column[3]) * 90, interceptions=float(column [18]) / float(column[3]) * 90, catch_saves=float(column[19]) / float(column[3]) * 90, saves=float(column[20]) / float(column[3 ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) * 90, complete_tackles=float(column[22]) / float(column[3]) * 90, accurate_shots=float(column[23]) / float(column[3]) * 90, shots =float(column[24]) / float(column[3]) * 90, key_tackles=float( column[25]) / float(column[3]) * 90, win_heads=float(column[26] ) / float(column[3]) * 90, goals=float(column[27]) / float( column[3]) * 90, crosses=float(column[28]) / float(column[3]) * 90, rating=float(column[29]), club=column[31], league=column[32 ], rate=rate(column)) context = {} return render(request, template, context) @permission_required('admin.can_addlog_entry') def player_delete(request): Player.objects.all().delete() return redirect('player_upload') @permission_required('admin.can_addlog_entry') def player_club_delete(request, club): Player.objects.filter(club=club).delete() return redirect('players') @permission_required('admin.can_addlog_entry') def player_league_delete(request, league): Player.objects.filter(league=league).delete() return redirect('players') <|reserved_special_token_1|> import csv, io from django.shortcuts import render, redirect from django.contrib import messages from django.contrib.auth.decorators import permission_required from django.views import generic from itertools import chain from .models import Player, League, Team class IndexView(generic.ListView): template_name = 'players/players.html' context_object_name = 'players' def get_queryset(self): return list(chain(Player.objects.all(), Player._meta.get_fields())) def rate(stats): sum = 0.0 sum += float(stats[4]) / float(stats[3]) * 90 / 30 sum += float(stats[5]) / float(stats[3]) * 90 / 40 sum += float(stats[6]) / float(stats[3]) * 90 / 2 sum += float(stats[7]) / float(stats[3]) * 90 / 1 sum += float(stats[8]) / float(stats[3]) * 90 / 3 sum += float(stats[9]) / float(stats[3]) * 90 / 1.5 sum += float(stats[10]) / float(stats[3]) * 90 / 5 sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2 sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5 sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5 sum += float(stats[14]) / float(stats[3]) * 90 / 0.5 sum += float(stats[15]) / float(stats[3]) * 90 / 11 sum += float(stats[16]) / float(stats[3]) * 90 / 4 sum += float(stats[17]) / float(stats[3]) * 90 / 1 sum += float(stats[18]) / float(stats[3]) * 90 / 2 sum += float(stats[19]) / float(stats[3]) * 90 / 1 sum += float(stats[20]) / float(stats[3]) * 90 / 1 sum += float(stats[21]) / float(stats[3]) * 90 / 1 sum += float(stats[22]) / float(stats[3]) * 90 / 2.5 sum += float(stats[23]) / float(stats[3]) * 90 / 1 sum += float(stats[24]) / float(stats[3]) * 90 / 2 sum += float(stats[25]) / float(stats[3]) * 90 / 1 sum += float(stats[26]) / float(stats[3]) * 90 / 5 sum += float(stats[27]) / float(stats[3]) * 90 / 0.5 sum += float(stats[28]) / float(stats[3]) * 90 / 10 return sum @permission_required('admin.can_addlog_entry') def player_upload(request): template = 'players/player_upload.html' prompt = {'order': ''} if request.method == 'GET': return render(request, template, prompt) csv_file = request.FILES['file'] if not csv_file.name.endswith('.csv'): messages.error(request, 'This is not a csv file') data_set = csv_file.read().decode('UTF-8') io_string = io.StringIO(data_set) next(io_string) for column in csv.reader(io_string, delimiter=':', quotechar='|'): for i, stat in enumerate(column): if i in [0, 1]: continue column[i] = column[i].replace('Åš', 'Ś') column[i] = column[i].replace(',', '.') column[i] = column[i].replace('km', '') column[i] = column[i].replace('Â\xa0', '') column[i] = column[i].replace('-', '0') if int(column[3]) < 180: continue if column[32] == '0': continue if not League.objects.filter(name=column[32]): League.objects.update_or_create(name=column[32]) if not Team.objects.filter(name=column[31]): Team.objects.update_or_create(league_id=League.objects.filter( name=column[32])[0].id, name=column[31]) _, created = Player.objects.update_or_create(team_id=2, name=column [0], age=column[2], position=column[1], minutes=column[3], accurate_passes=float(column[4]) / float(column[3]) * 90, passes=float(column[5]) / float(column[3]) * 90, created_situations=float(column[6]) / float(column[3]) * 90, key_passes=float(column[7]) / float(column[3]) * 90, dribble= float(column[8]) / float(column[3]) * 90, fouls_on=float(column [9]) / float(column[3]) * 90, offsides=float(column[10]) / float(column[3]) * 90, mistakes=float(column[11]) / float( column[3]) * 90, culpable_goals=float(column[12]) / float( column[3]) * 90, accurate_cross=float(column[13]) / float( column[3]) * 90, assists=float(column[14]) / float(column[3]) * 90, heads=float(column[15]) / float(column[3]) * 90, tackles= float(column[16]) / float(column[3]) * 90, key_heads=float( column[17]) / float(column[3]) * 90, interceptions=float(column [18]) / float(column[3]) * 90, catch_saves=float(column[19]) / float(column[3]) * 90, saves=float(column[20]) / float(column[3 ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) * 90, complete_tackles=float(column[22]) / float(column[3]) * 90, accurate_shots=float(column[23]) / float(column[3]) * 90, shots =float(column[24]) / float(column[3]) * 90, key_tackles=float( column[25]) / float(column[3]) * 90, win_heads=float(column[26] ) / float(column[3]) * 90, goals=float(column[27]) / float( column[3]) * 90, crosses=float(column[28]) / float(column[3]) * 90, rating=float(column[29]), club=column[31], league=column[32 ], rate=rate(column)) context = {} return render(request, template, context) @permission_required('admin.can_addlog_entry') def player_delete(request): Player.objects.all().delete() return redirect('player_upload') @permission_required('admin.can_addlog_entry') def player_club_delete(request, club): Player.objects.filter(club=club).delete() return redirect('players') @permission_required('admin.can_addlog_entry') def player_league_delete(request, league): Player.objects.filter(league=league).delete() return redirect('players') <|reserved_special_token_1|> import csv, io from django.shortcuts import render, redirect from django.contrib import messages from django.contrib.auth.decorators import permission_required from django.views import generic from itertools import chain from .models import Player, League, Team class IndexView(generic.ListView): template_name = 'players/players.html' context_object_name = 'players' def get_queryset(self): return list(chain(Player.objects.all(), Player._meta.get_fields())) def rate(stats): sum = 0. sum += float(stats[4]) / float(stats[3]) * 90 / 30 sum += float(stats[5]) / float(stats[3]) * 90 / 40 sum += float(stats[6]) / float(stats[3]) * 90 / 2 sum += float(stats[7]) / float(stats[3]) * 90 / 1 sum += float(stats[8]) / float(stats[3]) * 90 / 3 sum += float(stats[9]) / float(stats[3]) * 90 / 1.5 sum += float(stats[10]) / float(stats[3]) * 90 / 5 sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2 sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5 sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5 sum += float(stats[14]) / float(stats[3]) * 90 / 0.5 sum += float(stats[15]) / float(stats[3]) * 90 / 11 sum += float(stats[16]) / float(stats[3]) * 90 / 4 sum += float(stats[17]) / float(stats[3]) * 90 / 1 sum += float(stats[18]) / float(stats[3]) * 90 / 2 sum += float(stats[19]) / float(stats[3]) * 90 / 1 sum += float(stats[20]) / float(stats[3]) * 90 / 1 sum += float(stats[21]) / float(stats[3]) * 90 / 1 sum += float(stats[22]) / float(stats[3]) * 90 / 2.5 sum += float(stats[23]) / float(stats[3]) * 90 / 1 sum += float(stats[24]) / float(stats[3]) * 90 / 2 sum += float(stats[25]) / float(stats[3]) * 90 / 1 sum += float(stats[26]) / float(stats[3]) * 90 / 5 sum += float(stats[27]) / float(stats[3]) * 90 / 0.5 sum += float(stats[28]) / float(stats[3]) * 90 / 10 return sum @permission_required('admin.can_addlog_entry') def player_upload(request): template = 'players/player_upload.html' prompt = { 'order': '' } if request.method == "GET": return render(request, template, prompt) csv_file = request.FILES['file'] if not csv_file.name.endswith('.csv'): messages.error(request, 'This is not a csv file') data_set = csv_file.read().decode('UTF-8') io_string = io.StringIO(data_set) next(io_string) for column in csv.reader(io_string, delimiter=':', quotechar='|'): for i, stat in enumerate(column): if i in [0, 1]: continue column[i] = column[i].replace('Åš', 'Ś') column[i] = column[i].replace(',', '.') column[i] = column[i].replace('km', '') column[i] = column[i].replace('Â\xa0', '') column[i] = column[i].replace('-', '0') if int(column[3]) < 180: continue if column[32] == '0': continue if not League.objects.filter(name=column[32]): League.objects.update_or_create( name=column[32] ) if not Team.objects.filter(name=column[31]): Team.objects.update_or_create( league_id=League.objects.filter(name=column[32])[0].id, name=column[31] ) _, created = Player.objects.update_or_create( team_id=2, name=column[0], age=column[2], position=column[1], minutes=column[3], accurate_passes=float(column[4])/float(column[3])*90, passes=float(column[5])/float(column[3])*90, created_situations=float(column[6])/float(column[3])*90, key_passes=float(column[7])/float(column[3])*90, dribble=float(column[8])/float(column[3])*90, fouls_on=float(column[9])/float(column[3])*90, offsides=float(column[10])/float(column[3])*90, mistakes=float(column[11])/float(column[3])*90, culpable_goals=float(column[12])/float(column[3])*90, accurate_cross=float(column[13])/float(column[3])*90, assists=float(column[14])/float(column[3])*90, heads=float(column[15])/float(column[3])*90, tackles=float(column[16])/float(column[3])*90, key_heads=float(column[17])/float(column[3])*90, interceptions=float(column[18])/float(column[3])*90, catch_saves=float(column[19])/float(column[3])*90, saves=float(column[20])/float(column[3])*90, saves_on_corner=float(column[21])/float(column[3])*90, complete_tackles=float(column[22])/float(column[3])*90, accurate_shots=float(column[23])/float(column[3])*90, shots=float(column[24])/float(column[3])*90, key_tackles=float(column[25])/float(column[3])*90, win_heads=float(column[26])/float(column[3])*90, goals=float(column[27])/float(column[3])*90, crosses=float(column[28])/float(column[3])*90, rating=float(column[29]), club=column[31], league=column[32], rate=rate(column) ) context = {} return render(request, template, context) @permission_required('admin.can_addlog_entry') def player_delete(request): Player.objects.all().delete() return redirect('player_upload') @permission_required('admin.can_addlog_entry') def player_club_delete(request, club): Player.objects.filter(club=club).delete() return redirect('players') @permission_required('admin.can_addlog_entry') def player_league_delete(request, league): Player.objects.filter(league=league).delete() return redirect('players')
flexible
{ "blob_id": "bce794616889b80c152a8ebec8d02e49a96684e9", "index": 2955, "step-1": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\n<mask token>\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n", "step-2": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\ndef rate(stats):\n sum = 0.0\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\n return sum\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_upload(request):\n template = 'players/player_upload.html'\n prompt = {'order': ''}\n if request.method == 'GET':\n return render(request, template, prompt)\n csv_file = request.FILES['file']\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'This is not a csv file')\n data_set = csv_file.read().decode('UTF-8')\n io_string = io.StringIO(data_set)\n next(io_string)\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\n for i, stat in enumerate(column):\n if i in [0, 1]:\n continue\n column[i] = column[i].replace('Åš', 'Ś')\n column[i] = column[i].replace(',', '.')\n column[i] = column[i].replace('km', '')\n column[i] = column[i].replace('Â\\xa0', '')\n column[i] = column[i].replace('-', '0')\n if int(column[3]) < 180:\n continue\n if column[32] == '0':\n continue\n if not League.objects.filter(name=column[32]):\n League.objects.update_or_create(name=column[32])\n if not Team.objects.filter(name=column[31]):\n Team.objects.update_or_create(league_id=League.objects.filter(\n name=column[32])[0].id, name=column[31])\n _, created = Player.objects.update_or_create(team_id=2, name=column\n [0], age=column[2], position=column[1], minutes=column[3],\n accurate_passes=float(column[4]) / float(column[3]) * 90,\n passes=float(column[5]) / float(column[3]) * 90,\n created_situations=float(column[6]) / float(column[3]) * 90,\n key_passes=float(column[7]) / float(column[3]) * 90, dribble=\n float(column[8]) / float(column[3]) * 90, fouls_on=float(column\n [9]) / float(column[3]) * 90, offsides=float(column[10]) /\n float(column[3]) * 90, mistakes=float(column[11]) / float(\n column[3]) * 90, culpable_goals=float(column[12]) / float(\n column[3]) * 90, accurate_cross=float(column[13]) / float(\n column[3]) * 90, assists=float(column[14]) / float(column[3]) *\n 90, heads=float(column[15]) / float(column[3]) * 90, tackles=\n float(column[16]) / float(column[3]) * 90, key_heads=float(\n column[17]) / float(column[3]) * 90, interceptions=float(column\n [18]) / float(column[3]) * 90, catch_saves=float(column[19]) /\n float(column[3]) * 90, saves=float(column[20]) / float(column[3\n ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) *\n 90, complete_tackles=float(column[22]) / float(column[3]) * 90,\n accurate_shots=float(column[23]) / float(column[3]) * 90, shots\n =float(column[24]) / float(column[3]) * 90, key_tackles=float(\n column[25]) / float(column[3]) * 90, win_heads=float(column[26]\n ) / float(column[3]) * 90, goals=float(column[27]) / float(\n column[3]) * 90, crosses=float(column[28]) / float(column[3]) *\n 90, rating=float(column[29]), club=column[31], league=column[32\n ], rate=rate(column))\n context = {}\n return render(request, template, context)\n\n\n<mask token>\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_club_delete(request, club):\n Player.objects.filter(club=club).delete()\n return redirect('players')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n", "step-3": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\ndef rate(stats):\n sum = 0.0\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\n return sum\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_upload(request):\n template = 'players/player_upload.html'\n prompt = {'order': ''}\n if request.method == 'GET':\n return render(request, template, prompt)\n csv_file = request.FILES['file']\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'This is not a csv file')\n data_set = csv_file.read().decode('UTF-8')\n io_string = io.StringIO(data_set)\n next(io_string)\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\n for i, stat in enumerate(column):\n if i in [0, 1]:\n continue\n column[i] = column[i].replace('Åš', 'Ś')\n column[i] = column[i].replace(',', '.')\n column[i] = column[i].replace('km', '')\n column[i] = column[i].replace('Â\\xa0', '')\n column[i] = column[i].replace('-', '0')\n if int(column[3]) < 180:\n continue\n if column[32] == '0':\n continue\n if not League.objects.filter(name=column[32]):\n League.objects.update_or_create(name=column[32])\n if not Team.objects.filter(name=column[31]):\n Team.objects.update_or_create(league_id=League.objects.filter(\n name=column[32])[0].id, name=column[31])\n _, created = Player.objects.update_or_create(team_id=2, name=column\n [0], age=column[2], position=column[1], minutes=column[3],\n accurate_passes=float(column[4]) / float(column[3]) * 90,\n passes=float(column[5]) / float(column[3]) * 90,\n created_situations=float(column[6]) / float(column[3]) * 90,\n key_passes=float(column[7]) / float(column[3]) * 90, dribble=\n float(column[8]) / float(column[3]) * 90, fouls_on=float(column\n [9]) / float(column[3]) * 90, offsides=float(column[10]) /\n float(column[3]) * 90, mistakes=float(column[11]) / float(\n column[3]) * 90, culpable_goals=float(column[12]) / float(\n column[3]) * 90, accurate_cross=float(column[13]) / float(\n column[3]) * 90, assists=float(column[14]) / float(column[3]) *\n 90, heads=float(column[15]) / float(column[3]) * 90, tackles=\n float(column[16]) / float(column[3]) * 90, key_heads=float(\n column[17]) / float(column[3]) * 90, interceptions=float(column\n [18]) / float(column[3]) * 90, catch_saves=float(column[19]) /\n float(column[3]) * 90, saves=float(column[20]) / float(column[3\n ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) *\n 90, complete_tackles=float(column[22]) / float(column[3]) * 90,\n accurate_shots=float(column[23]) / float(column[3]) * 90, shots\n =float(column[24]) / float(column[3]) * 90, key_tackles=float(\n column[25]) / float(column[3]) * 90, win_heads=float(column[26]\n ) / float(column[3]) * 90, goals=float(column[27]) / float(\n column[3]) * 90, crosses=float(column[28]) / float(column[3]) *\n 90, rating=float(column[29]), club=column[31], league=column[32\n ], rate=rate(column))\n context = {}\n return render(request, template, context)\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_delete(request):\n Player.objects.all().delete()\n return redirect('player_upload')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_club_delete(request, club):\n Player.objects.filter(club=club).delete()\n return redirect('players')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n", "step-4": "import csv, io\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import permission_required\nfrom django.views import generic\nfrom itertools import chain\nfrom .models import Player, League, Team\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\ndef rate(stats):\n sum = 0.0\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\n return sum\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_upload(request):\n template = 'players/player_upload.html'\n prompt = {'order': ''}\n if request.method == 'GET':\n return render(request, template, prompt)\n csv_file = request.FILES['file']\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'This is not a csv file')\n data_set = csv_file.read().decode('UTF-8')\n io_string = io.StringIO(data_set)\n next(io_string)\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\n for i, stat in enumerate(column):\n if i in [0, 1]:\n continue\n column[i] = column[i].replace('Åš', 'Ś')\n column[i] = column[i].replace(',', '.')\n column[i] = column[i].replace('km', '')\n column[i] = column[i].replace('Â\\xa0', '')\n column[i] = column[i].replace('-', '0')\n if int(column[3]) < 180:\n continue\n if column[32] == '0':\n continue\n if not League.objects.filter(name=column[32]):\n League.objects.update_or_create(name=column[32])\n if not Team.objects.filter(name=column[31]):\n Team.objects.update_or_create(league_id=League.objects.filter(\n name=column[32])[0].id, name=column[31])\n _, created = Player.objects.update_or_create(team_id=2, name=column\n [0], age=column[2], position=column[1], minutes=column[3],\n accurate_passes=float(column[4]) / float(column[3]) * 90,\n passes=float(column[5]) / float(column[3]) * 90,\n created_situations=float(column[6]) / float(column[3]) * 90,\n key_passes=float(column[7]) / float(column[3]) * 90, dribble=\n float(column[8]) / float(column[3]) * 90, fouls_on=float(column\n [9]) / float(column[3]) * 90, offsides=float(column[10]) /\n float(column[3]) * 90, mistakes=float(column[11]) / float(\n column[3]) * 90, culpable_goals=float(column[12]) / float(\n column[3]) * 90, accurate_cross=float(column[13]) / float(\n column[3]) * 90, assists=float(column[14]) / float(column[3]) *\n 90, heads=float(column[15]) / float(column[3]) * 90, tackles=\n float(column[16]) / float(column[3]) * 90, key_heads=float(\n column[17]) / float(column[3]) * 90, interceptions=float(column\n [18]) / float(column[3]) * 90, catch_saves=float(column[19]) /\n float(column[3]) * 90, saves=float(column[20]) / float(column[3\n ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) *\n 90, complete_tackles=float(column[22]) / float(column[3]) * 90,\n accurate_shots=float(column[23]) / float(column[3]) * 90, shots\n =float(column[24]) / float(column[3]) * 90, key_tackles=float(\n column[25]) / float(column[3]) * 90, win_heads=float(column[26]\n ) / float(column[3]) * 90, goals=float(column[27]) / float(\n column[3]) * 90, crosses=float(column[28]) / float(column[3]) *\n 90, rating=float(column[29]), club=column[31], league=column[32\n ], rate=rate(column))\n context = {}\n return render(request, template, context)\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_delete(request):\n Player.objects.all().delete()\n return redirect('player_upload')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_club_delete(request, club):\n Player.objects.filter(club=club).delete()\n return redirect('players')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n", "step-5": "import csv, io\r\nfrom django.shortcuts import render, redirect\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.decorators import permission_required\r\nfrom django.views import generic\r\nfrom itertools import chain\r\n\r\nfrom .models import Player, League, Team\r\n\r\n\r\nclass IndexView(generic.ListView):\r\n template_name = 'players/players.html'\r\n context_object_name = 'players'\r\n\r\n def get_queryset(self):\r\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\r\n\r\n\r\ndef rate(stats):\r\n sum = 0.\r\n\r\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\r\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\r\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\r\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\r\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\r\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\r\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\r\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\r\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\r\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\r\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\r\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\r\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\r\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\r\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\r\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\r\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\r\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\r\n\r\n return sum\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_upload(request):\r\n template = 'players/player_upload.html'\r\n\r\n prompt = {\r\n 'order': ''\r\n }\r\n\r\n if request.method == \"GET\":\r\n return render(request, template, prompt)\r\n\r\n csv_file = request.FILES['file']\r\n\r\n if not csv_file.name.endswith('.csv'):\r\n messages.error(request, 'This is not a csv file')\r\n\r\n data_set = csv_file.read().decode('UTF-8')\r\n io_string = io.StringIO(data_set)\r\n next(io_string)\r\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\r\n for i, stat in enumerate(column):\r\n if i in [0, 1]:\r\n continue\r\n\r\n column[i] = column[i].replace('Åš', 'Ś')\r\n column[i] = column[i].replace(',', '.')\r\n column[i] = column[i].replace('km', '')\r\n column[i] = column[i].replace('Â\\xa0', '')\r\n column[i] = column[i].replace('-', '0')\r\n if int(column[3]) < 180:\r\n continue\r\n if column[32] == '0':\r\n continue\r\n if not League.objects.filter(name=column[32]):\r\n League.objects.update_or_create(\r\n name=column[32]\r\n )\r\n if not Team.objects.filter(name=column[31]):\r\n Team.objects.update_or_create(\r\n league_id=League.objects.filter(name=column[32])[0].id,\r\n name=column[31]\r\n )\r\n _, created = Player.objects.update_or_create(\r\n team_id=2,\r\n name=column[0],\r\n age=column[2],\r\n position=column[1],\r\n minutes=column[3],\r\n accurate_passes=float(column[4])/float(column[3])*90,\r\n passes=float(column[5])/float(column[3])*90,\r\n created_situations=float(column[6])/float(column[3])*90,\r\n key_passes=float(column[7])/float(column[3])*90,\r\n dribble=float(column[8])/float(column[3])*90,\r\n fouls_on=float(column[9])/float(column[3])*90,\r\n offsides=float(column[10])/float(column[3])*90,\r\n mistakes=float(column[11])/float(column[3])*90,\r\n culpable_goals=float(column[12])/float(column[3])*90,\r\n accurate_cross=float(column[13])/float(column[3])*90,\r\n assists=float(column[14])/float(column[3])*90,\r\n heads=float(column[15])/float(column[3])*90,\r\n tackles=float(column[16])/float(column[3])*90,\r\n key_heads=float(column[17])/float(column[3])*90,\r\n interceptions=float(column[18])/float(column[3])*90,\r\n catch_saves=float(column[19])/float(column[3])*90,\r\n saves=float(column[20])/float(column[3])*90,\r\n saves_on_corner=float(column[21])/float(column[3])*90,\r\n complete_tackles=float(column[22])/float(column[3])*90,\r\n accurate_shots=float(column[23])/float(column[3])*90,\r\n shots=float(column[24])/float(column[3])*90,\r\n key_tackles=float(column[25])/float(column[3])*90,\r\n win_heads=float(column[26])/float(column[3])*90,\r\n goals=float(column[27])/float(column[3])*90,\r\n crosses=float(column[28])/float(column[3])*90,\r\n rating=float(column[29]),\r\n club=column[31],\r\n league=column[32],\r\n rate=rate(column)\r\n )\r\n\r\n context = {}\r\n return render(request, template, context)\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_delete(request):\r\n Player.objects.all().delete()\r\n return redirect('player_upload')\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_club_delete(request, club):\r\n Player.objects.filter(club=club).delete()\r\n return redirect('players')\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_league_delete(request, league):\r\n Player.objects.filter(league=league).delete()\r\n return redirect('players')\r\n", "step-ids": [ 4, 7, 8, 9, 10 ] }
[ 4, 7, 8, 9, 10 ]
""" Subfunction A31 is responsible for inputting the component parameters and then using the information about the component to determine the pressure drop across that component ---------------------------------------------------------- Using data structure from /SysEng/jsonParameterFileFormat/ recall that each cell is only present if there is data stored and thus we can call "if "parameterName" in dict.keys()" to see if it is there. """ #Need math function import math class A31: def __init__(self,dict): #dict is for dictionary self.dict = dict #Now we set several new local variables for ease of calling them later self.CID = self.dict["CID"] self.val = self.dict["values"] self.calc = self.val["calculated"] self.comp = self.val["component"] self.fluid = self.val["fluid"] # Create a new key for the pressure drop self.calc["pressureDrop"] = {} #We also need to define 'g' for this method (in SI) self.g = 9.81 # #Set up the logic tree to see what we need to do # #This method of finding the pressure drop for each different type # of component is WAY underoptimized. Feel free to improve it! :) if self.CID == 'LNE': self.calc['pressureDrop']["value"] = self.lineCalc() elif self.CID == 'BND': self.calc['pressureDrop']["value"] = self.bendCalc() elif self.CID == 'VLV': self.calc['pressureDrop']["value"] = False elif self.CID == 'ORF': self.calc['pressureDrop']["value"] = False elif self.CID == 'INJ': self.calc['pressureDrop']["value"] = False elif self.CID == 'CAT': self.calc['pressureDrop']["value"] = False elif self.CID == 'BND': self.calc['pressureDrop']["value"] = False elif self.CID == 'SPL': self.calc['pressureDrop']["value"] = False elif self.CID == 'JON': self.calc['pressureDrop']["value"] = False elif self.CID == 'EXP': self.calc['pressureDrop']["value"] = self.expansionCalc() elif self.CID == 'CON': self.calc['pressureDrop']["value"] = self.contractionCalc() if self.calc['pressureDrop']["value"] == False: raise NotImplementedError('Calcuations for a '+ str(self.dict['CID'])+' have not yet '+ 'been implemented in this' + 'pre-alpha state.') else: self.calc["pressureDrop"]["unit"] = "Pa" self.dict["values"]["calculated"]["pressureDrop"] = self.calc["pressureDrop"] def expansionCalc(self): q = self.calc['dynamicPressure'] kt = self.calc['ktLosses'] pDrop = kt * q return(pDrop) def contractionCalc(self): f = self.calc['frictionFactor'] kt = self.calc['ktLosses'] A1 = self.comp['upstreamArea']["value"] A2 = self.comp['downstreamArea']["value"] q = self.calc['dynamicPressure'] D1 = 2 * math.sqrt(A1/math.pi) D2 = 2 * math.sqrt(A2/math.pi) cL = self.comp['contractionLength'] if self.comp['contractionAngledOrCurved']["value"] == 'angle': angle = self.comp['angle']["value"] if angle < math.pi/4: pDrop = ( kt + 4*f * ( cL / ( (D1 + D2) / 2 ) ) ) * q else: pDrop = kt * q else: pDrop = kt * q return(pDrop) def lineCalc(self): # Create some local variables for ease of use rho = self.fluid["density"]["value"] q = self.calc["dynamicPressure"] g = self.g z = self.comp["height"]["value"] f = self.calc["frictionFactor"] x = self.comp["length"]["value"] Dh = self.comp["hydraulicDiameter"]["value"] pDrop = rho*g*z + q * ((4*f*x)/Dh) return(pDrop) def bendCalc(self): rho = self.fluid['density']["value"] g = self.g z = self.comp['height']["value"] f = self.calc['frictionFactor'] x = self.comp['length']["value"] Dh = self.comp['hydraulicDiameter']["value"] kt = self.calc['ktLosses'] pDrop = rho*g*z + q * ( ((4*f*x)/Dh) + kt ) return(pDrop)
normal
{ "blob_id": "4b8038ddea60f371aa8da168ea4456372d6f0388", "index": 2357, "step-1": "<mask token>\n\n\nclass A31:\n\n def __init__(self, dict):\n self.dict = dict\n self.CID = self.dict['CID']\n self.val = self.dict['values']\n self.calc = self.val['calculated']\n self.comp = self.val['component']\n self.fluid = self.val['fluid']\n self.calc['pressureDrop'] = {}\n self.g = 9.81\n if self.CID == 'LNE':\n self.calc['pressureDrop']['value'] = self.lineCalc()\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = self.bendCalc()\n elif self.CID == 'VLV':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'ORF':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'INJ':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'CAT':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'SPL':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'JON':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'EXP':\n self.calc['pressureDrop']['value'] = self.expansionCalc()\n elif self.CID == 'CON':\n self.calc['pressureDrop']['value'] = self.contractionCalc()\n if self.calc['pressureDrop']['value'] == False:\n raise NotImplementedError('Calcuations for a ' + str(self.dict[\n 'CID']) + ' have not yet ' + 'been implemented in this' +\n 'pre-alpha state.')\n else:\n self.calc['pressureDrop']['unit'] = 'Pa'\n self.dict['values']['calculated']['pressureDrop'] = self.calc[\n 'pressureDrop']\n <mask token>\n\n def contractionCalc(self):\n f = self.calc['frictionFactor']\n kt = self.calc['ktLosses']\n A1 = self.comp['upstreamArea']['value']\n A2 = self.comp['downstreamArea']['value']\n q = self.calc['dynamicPressure']\n D1 = 2 * math.sqrt(A1 / math.pi)\n D2 = 2 * math.sqrt(A2 / math.pi)\n cL = self.comp['contractionLength']\n if self.comp['contractionAngledOrCurved']['value'] == 'angle':\n angle = self.comp['angle']['value']\n if angle < math.pi / 4:\n pDrop = (kt + 4 * f * (cL / ((D1 + D2) / 2))) * q\n else:\n pDrop = kt * q\n else:\n pDrop = kt * q\n return pDrop\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass A31:\n\n def __init__(self, dict):\n self.dict = dict\n self.CID = self.dict['CID']\n self.val = self.dict['values']\n self.calc = self.val['calculated']\n self.comp = self.val['component']\n self.fluid = self.val['fluid']\n self.calc['pressureDrop'] = {}\n self.g = 9.81\n if self.CID == 'LNE':\n self.calc['pressureDrop']['value'] = self.lineCalc()\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = self.bendCalc()\n elif self.CID == 'VLV':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'ORF':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'INJ':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'CAT':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'SPL':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'JON':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'EXP':\n self.calc['pressureDrop']['value'] = self.expansionCalc()\n elif self.CID == 'CON':\n self.calc['pressureDrop']['value'] = self.contractionCalc()\n if self.calc['pressureDrop']['value'] == False:\n raise NotImplementedError('Calcuations for a ' + str(self.dict[\n 'CID']) + ' have not yet ' + 'been implemented in this' +\n 'pre-alpha state.')\n else:\n self.calc['pressureDrop']['unit'] = 'Pa'\n self.dict['values']['calculated']['pressureDrop'] = self.calc[\n 'pressureDrop']\n <mask token>\n\n def contractionCalc(self):\n f = self.calc['frictionFactor']\n kt = self.calc['ktLosses']\n A1 = self.comp['upstreamArea']['value']\n A2 = self.comp['downstreamArea']['value']\n q = self.calc['dynamicPressure']\n D1 = 2 * math.sqrt(A1 / math.pi)\n D2 = 2 * math.sqrt(A2 / math.pi)\n cL = self.comp['contractionLength']\n if self.comp['contractionAngledOrCurved']['value'] == 'angle':\n angle = self.comp['angle']['value']\n if angle < math.pi / 4:\n pDrop = (kt + 4 * f * (cL / ((D1 + D2) / 2))) * q\n else:\n pDrop = kt * q\n else:\n pDrop = kt * q\n return pDrop\n\n def lineCalc(self):\n rho = self.fluid['density']['value']\n q = self.calc['dynamicPressure']\n g = self.g\n z = self.comp['height']['value']\n f = self.calc['frictionFactor']\n x = self.comp['length']['value']\n Dh = self.comp['hydraulicDiameter']['value']\n pDrop = rho * g * z + q * (4 * f * x / Dh)\n return pDrop\n\n def bendCalc(self):\n rho = self.fluid['density']['value']\n g = self.g\n z = self.comp['height']['value']\n f = self.calc['frictionFactor']\n x = self.comp['length']['value']\n Dh = self.comp['hydraulicDiameter']['value']\n kt = self.calc['ktLosses']\n pDrop = rho * g * z + q * (4 * f * x / Dh + kt)\n return pDrop\n", "step-3": "<mask token>\n\n\nclass A31:\n\n def __init__(self, dict):\n self.dict = dict\n self.CID = self.dict['CID']\n self.val = self.dict['values']\n self.calc = self.val['calculated']\n self.comp = self.val['component']\n self.fluid = self.val['fluid']\n self.calc['pressureDrop'] = {}\n self.g = 9.81\n if self.CID == 'LNE':\n self.calc['pressureDrop']['value'] = self.lineCalc()\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = self.bendCalc()\n elif self.CID == 'VLV':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'ORF':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'INJ':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'CAT':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'SPL':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'JON':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'EXP':\n self.calc['pressureDrop']['value'] = self.expansionCalc()\n elif self.CID == 'CON':\n self.calc['pressureDrop']['value'] = self.contractionCalc()\n if self.calc['pressureDrop']['value'] == False:\n raise NotImplementedError('Calcuations for a ' + str(self.dict[\n 'CID']) + ' have not yet ' + 'been implemented in this' +\n 'pre-alpha state.')\n else:\n self.calc['pressureDrop']['unit'] = 'Pa'\n self.dict['values']['calculated']['pressureDrop'] = self.calc[\n 'pressureDrop']\n\n def expansionCalc(self):\n q = self.calc['dynamicPressure']\n kt = self.calc['ktLosses']\n pDrop = kt * q\n return pDrop\n\n def contractionCalc(self):\n f = self.calc['frictionFactor']\n kt = self.calc['ktLosses']\n A1 = self.comp['upstreamArea']['value']\n A2 = self.comp['downstreamArea']['value']\n q = self.calc['dynamicPressure']\n D1 = 2 * math.sqrt(A1 / math.pi)\n D2 = 2 * math.sqrt(A2 / math.pi)\n cL = self.comp['contractionLength']\n if self.comp['contractionAngledOrCurved']['value'] == 'angle':\n angle = self.comp['angle']['value']\n if angle < math.pi / 4:\n pDrop = (kt + 4 * f * (cL / ((D1 + D2) / 2))) * q\n else:\n pDrop = kt * q\n else:\n pDrop = kt * q\n return pDrop\n\n def lineCalc(self):\n rho = self.fluid['density']['value']\n q = self.calc['dynamicPressure']\n g = self.g\n z = self.comp['height']['value']\n f = self.calc['frictionFactor']\n x = self.comp['length']['value']\n Dh = self.comp['hydraulicDiameter']['value']\n pDrop = rho * g * z + q * (4 * f * x / Dh)\n return pDrop\n\n def bendCalc(self):\n rho = self.fluid['density']['value']\n g = self.g\n z = self.comp['height']['value']\n f = self.calc['frictionFactor']\n x = self.comp['length']['value']\n Dh = self.comp['hydraulicDiameter']['value']\n kt = self.calc['ktLosses']\n pDrop = rho * g * z + q * (4 * f * x / Dh + kt)\n return pDrop\n", "step-4": "<mask token>\nimport math\n\n\nclass A31:\n\n def __init__(self, dict):\n self.dict = dict\n self.CID = self.dict['CID']\n self.val = self.dict['values']\n self.calc = self.val['calculated']\n self.comp = self.val['component']\n self.fluid = self.val['fluid']\n self.calc['pressureDrop'] = {}\n self.g = 9.81\n if self.CID == 'LNE':\n self.calc['pressureDrop']['value'] = self.lineCalc()\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = self.bendCalc()\n elif self.CID == 'VLV':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'ORF':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'INJ':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'CAT':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'BND':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'SPL':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'JON':\n self.calc['pressureDrop']['value'] = False\n elif self.CID == 'EXP':\n self.calc['pressureDrop']['value'] = self.expansionCalc()\n elif self.CID == 'CON':\n self.calc['pressureDrop']['value'] = self.contractionCalc()\n if self.calc['pressureDrop']['value'] == False:\n raise NotImplementedError('Calcuations for a ' + str(self.dict[\n 'CID']) + ' have not yet ' + 'been implemented in this' +\n 'pre-alpha state.')\n else:\n self.calc['pressureDrop']['unit'] = 'Pa'\n self.dict['values']['calculated']['pressureDrop'] = self.calc[\n 'pressureDrop']\n\n def expansionCalc(self):\n q = self.calc['dynamicPressure']\n kt = self.calc['ktLosses']\n pDrop = kt * q\n return pDrop\n\n def contractionCalc(self):\n f = self.calc['frictionFactor']\n kt = self.calc['ktLosses']\n A1 = self.comp['upstreamArea']['value']\n A2 = self.comp['downstreamArea']['value']\n q = self.calc['dynamicPressure']\n D1 = 2 * math.sqrt(A1 / math.pi)\n D2 = 2 * math.sqrt(A2 / math.pi)\n cL = self.comp['contractionLength']\n if self.comp['contractionAngledOrCurved']['value'] == 'angle':\n angle = self.comp['angle']['value']\n if angle < math.pi / 4:\n pDrop = (kt + 4 * f * (cL / ((D1 + D2) / 2))) * q\n else:\n pDrop = kt * q\n else:\n pDrop = kt * q\n return pDrop\n\n def lineCalc(self):\n rho = self.fluid['density']['value']\n q = self.calc['dynamicPressure']\n g = self.g\n z = self.comp['height']['value']\n f = self.calc['frictionFactor']\n x = self.comp['length']['value']\n Dh = self.comp['hydraulicDiameter']['value']\n pDrop = rho * g * z + q * (4 * f * x / Dh)\n return pDrop\n\n def bendCalc(self):\n rho = self.fluid['density']['value']\n g = self.g\n z = self.comp['height']['value']\n f = self.calc['frictionFactor']\n x = self.comp['length']['value']\n Dh = self.comp['hydraulicDiameter']['value']\n kt = self.calc['ktLosses']\n pDrop = rho * g * z + q * (4 * f * x / Dh + kt)\n return pDrop\n", "step-5": "\"\"\"\nSubfunction A31 is responsible for inputting the component parameters\nand then using the information about the component to determine\nthe pressure drop across that component\n----------------------------------------------------------\nUsing data structure from /SysEng/jsonParameterFileFormat/ recall that each\ncell is only present if there is data stored and thus\nwe can call \"if \"parameterName\" in dict.keys()\" to see if it is there.\n\"\"\"\n\n#Need math function\nimport math\n\n\nclass A31:\n def __init__(self,dict): #dict is for dictionary\n self.dict = dict\n #Now we set several new local variables for ease of calling them later\n self.CID = self.dict[\"CID\"]\n self.val = self.dict[\"values\"]\n self.calc = self.val[\"calculated\"]\n self.comp = self.val[\"component\"]\n self.fluid = self.val[\"fluid\"]\n # Create a new key for the pressure drop\n self.calc[\"pressureDrop\"] = {}\n #We also need to define 'g' for this method (in SI)\n self.g = 9.81 \n #\n #Set up the logic tree to see what we need to do\n #\n #This method of finding the pressure drop for each different type\n # of component is WAY underoptimized. Feel free to improve it! :)\n if self.CID == 'LNE':\n self.calc['pressureDrop'][\"value\"] = self.lineCalc()\n elif self.CID == 'BND':\n self.calc['pressureDrop'][\"value\"] = self.bendCalc()\n elif self.CID == 'VLV':\n self.calc['pressureDrop'][\"value\"] = False\n elif self.CID == 'ORF':\n self.calc['pressureDrop'][\"value\"] = False\n elif self.CID == 'INJ':\n self.calc['pressureDrop'][\"value\"] = False\n elif self.CID == 'CAT':\n self.calc['pressureDrop'][\"value\"] = False\n elif self.CID == 'BND':\n self.calc['pressureDrop'][\"value\"] = False\n elif self.CID == 'SPL':\n self.calc['pressureDrop'][\"value\"] = False\n elif self.CID == 'JON':\n self.calc['pressureDrop'][\"value\"] = False\n elif self.CID == 'EXP':\n self.calc['pressureDrop'][\"value\"] = self.expansionCalc()\n elif self.CID == 'CON':\n self.calc['pressureDrop'][\"value\"] = self.contractionCalc()\n if self.calc['pressureDrop'][\"value\"] == False:\n raise NotImplementedError('Calcuations for a '+\n str(self.dict['CID'])+' have not yet '+\n 'been implemented in this' +\n 'pre-alpha state.')\n else:\n self.calc[\"pressureDrop\"][\"unit\"] = \"Pa\"\n self.dict[\"values\"][\"calculated\"][\"pressureDrop\"] = self.calc[\"pressureDrop\"]\n\n def expansionCalc(self):\n q = self.calc['dynamicPressure']\n kt = self.calc['ktLosses']\n pDrop = kt * q\n return(pDrop)\n\n def contractionCalc(self):\n f = self.calc['frictionFactor']\n kt = self.calc['ktLosses']\n A1 = self.comp['upstreamArea'][\"value\"]\n A2 = self.comp['downstreamArea'][\"value\"]\n q = self.calc['dynamicPressure']\n D1 = 2 * math.sqrt(A1/math.pi)\n D2 = 2 * math.sqrt(A2/math.pi)\n cL = self.comp['contractionLength']\n if self.comp['contractionAngledOrCurved'][\"value\"] == 'angle':\n angle = self.comp['angle'][\"value\"]\n if angle < math.pi/4:\n pDrop = (\n kt + 4*f * (\n cL / (\n (D1 + D2) / 2\n )\n )\n ) * q\n else:\n pDrop = kt * q\n else:\n pDrop = kt * q\n return(pDrop) \n\n def lineCalc(self):\n # Create some local variables for ease of use\n rho = self.fluid[\"density\"][\"value\"]\n q = self.calc[\"dynamicPressure\"]\n g = self.g\n z = self.comp[\"height\"][\"value\"]\n f = self.calc[\"frictionFactor\"]\n x = self.comp[\"length\"][\"value\"]\n Dh = self.comp[\"hydraulicDiameter\"][\"value\"]\n pDrop = rho*g*z + q * ((4*f*x)/Dh)\n return(pDrop)\n\n def bendCalc(self):\n rho = self.fluid['density'][\"value\"]\n g = self.g\n z = self.comp['height'][\"value\"]\n f = self.calc['frictionFactor']\n x = self.comp['length'][\"value\"]\n Dh = self.comp['hydraulicDiameter'][\"value\"]\n kt = self.calc['ktLosses']\n pDrop = rho*g*z + q * (\n ((4*f*x)/Dh) + kt\n )\n return(pDrop)\n \n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
#!/usr/bin/python """ An extensible private pypi index. NOTES ON PACKAGE NAMES ---------------------- MPyPi tries the following when it does not find a package with the given name in the index: - replaces all _ with - and - lowercases the package name """ from __future__ import print_function from __future__ import unicode_literals import cgi import re from .util import PY2, PY3 if PY2: from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer else: from http.server import BaseHTTPRequestHandler, HTTPServer # --- format strings ENTRY_FMT = """<a href="{url}">{name}</a><br/>\n""" PAGE_FMT = """<html><head><title>Simple MPyPi Index</title><meta name="api-version" value="2" /></head><body>\n""" PKG_PAGE_FMT = """<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>\n""" # ------------------------------------------------------------------------------ # Snippet from pip._vendor.packaging.core # ------------------------------------------------------------------------------ _canonicalize_regex = re.compile(r"[-_.]+") def canonicalize_name(name): # This is taken from PEP 503. return _canonicalize_regex.sub("-", name).lower() # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # INTERNALLY USED FUNCTIONS # ------------------------------------------------------------------------------ # --- page formatting functions def page_index(packages): yield PAGE_FMT for p in packages: name = p.name url = name yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name) def page_package(package): yield PKG_PAGE_FMT.format(name=package.name) for (name, link) in package.links: yield ENTRY_FMT.format(name=name, url=link) def msg_404(pkg_name): return '<html><body> Package <b>{}</b> does not exist.</body></html>\n'.format(cgi.escape(pkg_name)) def make_request_handler(index): """ Arguments --------- index: dict-like - allows key lookups - has a values() function that returns a list of package instances. - supports get """ root_paths = {'', '/'} class PyPiRequestHandler(BaseHTTPRequestHandler): def get_package(self, package_name): package = index.get(package_name) return package def write_unicode(self, text): self.wfile.write(bytearray(text, encoding='utf-8')) def do_GET(self): print("GET", self.path) if self.path in root_paths: self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # serve index page for line in page_index(index.values()): self.write_unicode(line) else: # follow pip standard of using lowercase names package_name = self.path.strip('/') package = self.get_package(package_name) if not package: self.send_response(404) self.send_header('Content-type','text/html') self.end_headers() self.write_unicode(msg_404(package_name)) return # serve package page self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # serve index page for line in page_package(package): self.write_unicode(line) return PyPiRequestHandler def main(packages, index=None, host='', port=7890): # optionally create an index if index is None: index = {} for p in packages: index[canonicalize_name(p.name)] = p try: server = HTTPServer((host, port), make_request_handler(index)) print('Started mpypi on port {}'.format(port)) server.serve_forever() except KeyboardInterrupt: print('^C received, shutting down the web server') server.socket.close() if __name__ == '__main__': main([])
normal
{ "blob_id": "bd25b97de78f04510e43f13d356eb6c0025e223d", "index": 8121, "step-1": "<mask token>\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\n<mask token>\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\ndef msg_404(pkg_name):\n return ('<html><body> Package <b>{}</b> does not exist.</body></html>\\n'\n .format(cgi.escape(pkg_name)))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\n<mask token>\n", "step-3": "<mask token>\nif PY2:\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nelse:\n from http.server import BaseHTTPRequestHandler, HTTPServer\n<mask token>\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\ndef msg_404(pkg_name):\n return ('<html><body> Package <b>{}</b> does not exist.</body></html>\\n'\n .format(cgi.escape(pkg_name)))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\nif __name__ == '__main__':\n main([])\n", "step-4": "<mask token>\nif PY2:\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nelse:\n from http.server import BaseHTTPRequestHandler, HTTPServer\nENTRY_FMT = '<a href=\"{url}\">{name}</a><br/>\\n'\nPAGE_FMT = \"\"\"<html><head><title>Simple MPyPi Index</title><meta name=\"api-version\" value=\"2\" /></head><body>\n\"\"\"\nPKG_PAGE_FMT = \"\"\"<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>\n\"\"\"\n_canonicalize_regex = re.compile('[-_.]+')\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\ndef msg_404(pkg_name):\n return ('<html><body> Package <b>{}</b> does not exist.</body></html>\\n'\n .format(cgi.escape(pkg_name)))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\nif __name__ == '__main__':\n main([])\n", "step-5": "#!/usr/bin/python\n\"\"\"\nAn extensible private pypi index.\n\nNOTES ON PACKAGE NAMES\n----------------------\nMPyPi tries the following when it does not find a package \nwith the given name in the index:\n - replaces all _ with - and\n - lowercases the package name\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport cgi\nimport re\n\nfrom .util import PY2, PY3\n\nif PY2:\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nelse:\n from http.server import BaseHTTPRequestHandler, HTTPServer\n\n# --- format strings\nENTRY_FMT = \"\"\"<a href=\"{url}\">{name}</a><br/>\\n\"\"\"\nPAGE_FMT = \"\"\"<html><head><title>Simple MPyPi Index</title><meta name=\"api-version\" value=\"2\" /></head><body>\\n\"\"\"\nPKG_PAGE_FMT = \"\"\"<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>\\n\"\"\"\n\n\n# ------------------------------------------------------------------------------ \n# Snippet from pip._vendor.packaging.core\n# ------------------------------------------------------------------------------ \n_canonicalize_regex = re.compile(r\"[-_.]+\")\n\ndef canonicalize_name(name):\n # This is taken from PEP 503.\n return _canonicalize_regex.sub(\"-\", name).lower()\n# ------------------------------------------------------------------------------ \n\n# ------------------------------------------------------------------------------ \n# INTERNALLY USED FUNCTIONS\n# ------------------------------------------------------------------------------ \n# --- page formatting functions\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for (name, link) in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\ndef msg_404(pkg_name):\n return '<html><body> Package <b>{}</b> does not exist.</body></html>\\n'.format(cgi.escape(pkg_name))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print(\"GET\", self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n\n # serve index page\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n # follow pip standard of using lowercase names\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n\n if not package:\n self.send_response(404)\n self.send_header('Content-type','text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n # serve package page\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n\n # serve index page\n for line in page_package(package):\n self.write_unicode(line)\n\n return PyPiRequestHandler \n\ndef main(packages, index=None, host='', port=7890):\n # optionally create an index\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\nif __name__ == '__main__':\n main([])\n", "step-ids": [ 5, 6, 7, 8, 10 ] }
[ 5, 6, 7, 8, 10 ]
<|reserved_special_token_0|> def execute_cmd(cmd): """ Sockets: https://docs.python.org/2/library/socket.html How to use the socket s: # Establish socket connection s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) Reading: data = s.recv(1024) # Receives 1024 bytes from IP/Port print(data) # Prints data Sending: s.send("something to send ") # Send a newline at the end of your command """ regex = re.match( '^\\s*(\\w*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$' , cmd) val = regex.group(1) if val == 'shell': path = '/' while True: usr_in = raw_input(path + '>') if usr_in == 'exit': break command = ';' + ' cd ' + path + '; ' + usr_in if 'cd' in usr_in: reg = re.match('^\\s*cd\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', usr_in) if reg.group(1) == '': path = '/' elif reg.group(1)[0] == '/': path = reg.group(1) else: path += reg.group(1) if path[-1] != '/': path += '/' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) print('%s' % data) s.close() elif val == 'pull': command = '; ' + 'cat ' + regex.group(2) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) s.close() f = open(regex.group(3), 'w') f.write(data) f.close() elif val == 'quit': return -1 elif val == 'help': print('shell - Drop into an interactive shell - exit with "exit"') print('pull <remote path> <local path> - download files') print('help - show the help menu') print('quit - quit this program') else: print('invalid command') return 0 <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def execute_cmd(cmd): """ Sockets: https://docs.python.org/2/library/socket.html How to use the socket s: # Establish socket connection s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) Reading: data = s.recv(1024) # Receives 1024 bytes from IP/Port print(data) # Prints data Sending: s.send("something to send ") # Send a newline at the end of your command """ regex = re.match( '^\\s*(\\w*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$' , cmd) val = regex.group(1) if val == 'shell': path = '/' while True: usr_in = raw_input(path + '>') if usr_in == 'exit': break command = ';' + ' cd ' + path + '; ' + usr_in if 'cd' in usr_in: reg = re.match('^\\s*cd\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', usr_in) if reg.group(1) == '': path = '/' elif reg.group(1)[0] == '/': path = reg.group(1) else: path += reg.group(1) if path[-1] != '/': path += '/' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) print('%s' % data) s.close() elif val == 'pull': command = '; ' + 'cat ' + regex.group(2) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) s.close() f = open(regex.group(3), 'w') f.write(data) f.close() elif val == 'quit': return -1 elif val == 'help': print('shell - Drop into an interactive shell - exit with "exit"') print('pull <remote path> <local path> - download files') print('help - show the help menu') print('quit - quit this program') else: print('invalid command') return 0 if __name__ == '__main__': while True: cmd = raw_input('>') if execute_cmd(cmd) == -1: break <|reserved_special_token_1|> <|reserved_special_token_0|> host = 'cornerstoneairlines.co' port = 45 def execute_cmd(cmd): """ Sockets: https://docs.python.org/2/library/socket.html How to use the socket s: # Establish socket connection s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) Reading: data = s.recv(1024) # Receives 1024 bytes from IP/Port print(data) # Prints data Sending: s.send("something to send ") # Send a newline at the end of your command """ regex = re.match( '^\\s*(\\w*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$' , cmd) val = regex.group(1) if val == 'shell': path = '/' while True: usr_in = raw_input(path + '>') if usr_in == 'exit': break command = ';' + ' cd ' + path + '; ' + usr_in if 'cd' in usr_in: reg = re.match('^\\s*cd\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', usr_in) if reg.group(1) == '': path = '/' elif reg.group(1)[0] == '/': path = reg.group(1) else: path += reg.group(1) if path[-1] != '/': path += '/' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) print('%s' % data) s.close() elif val == 'pull': command = '; ' + 'cat ' + regex.group(2) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) s.close() f = open(regex.group(3), 'w') f.write(data) f.close() elif val == 'quit': return -1 elif val == 'help': print('shell - Drop into an interactive shell - exit with "exit"') print('pull <remote path> <local path> - download files') print('help - show the help menu') print('quit - quit this program') else: print('invalid command') return 0 if __name__ == '__main__': while True: cmd = raw_input('>') if execute_cmd(cmd) == -1: break <|reserved_special_token_1|> <|reserved_special_token_0|> import socket import re import time host = 'cornerstoneairlines.co' port = 45 def execute_cmd(cmd): """ Sockets: https://docs.python.org/2/library/socket.html How to use the socket s: # Establish socket connection s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) Reading: data = s.recv(1024) # Receives 1024 bytes from IP/Port print(data) # Prints data Sending: s.send("something to send ") # Send a newline at the end of your command """ regex = re.match( '^\\s*(\\w*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$' , cmd) val = regex.group(1) if val == 'shell': path = '/' while True: usr_in = raw_input(path + '>') if usr_in == 'exit': break command = ';' + ' cd ' + path + '; ' + usr_in if 'cd' in usr_in: reg = re.match('^\\s*cd\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', usr_in) if reg.group(1) == '': path = '/' elif reg.group(1)[0] == '/': path = reg.group(1) else: path += reg.group(1) if path[-1] != '/': path += '/' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) print('%s' % data) s.close() elif val == 'pull': command = '; ' + 'cat ' + regex.group(2) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) data = s.recv(1024) s.close() f = open(regex.group(3), 'w') f.write(data) f.close() elif val == 'quit': return -1 elif val == 'help': print('shell - Drop into an interactive shell - exit with "exit"') print('pull <remote path> <local path> - download files') print('help - show the help menu') print('quit - quit this program') else: print('invalid command') return 0 if __name__ == '__main__': while True: cmd = raw_input('>') if execute_cmd(cmd) == -1: break <|reserved_special_token_1|> """ Use the same techniques such as (but not limited to): 1) Sockets 2) File I/O 3) raw_input() from the OSINT HW to complete this assignment. Good luck! """ import socket import re import time host = "cornerstoneairlines.co" # IP address here port = 45 # Port here def execute_cmd(cmd): """ Sockets: https://docs.python.org/2/library/socket.html How to use the socket s: # Establish socket connection s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) Reading: data = s.recv(1024) # Receives 1024 bytes from IP/Port print(data) # Prints data Sending: s.send("something to send\n") # Send a newline \n at the end of your command """ regex = re.match('^\s*(\w*)\s*([A-Za-z0-9.\/\-\_]*)\s*([A-Za-z0-9.\/\-\_]*)\s*$', cmd) val = regex.group(1) # print('val: %s' % val) if val == 'shell': path = '/' while True: usr_in = raw_input(path + ">") if usr_in == 'exit': break command = ';' + ' cd ' + path + '; ' + usr_in if ('cd' in usr_in): # print('here') reg = re.match('^\s*cd\s*([A-Za-z0-9.\/\-\_]*)\s*$', usr_in) if (reg.group(1) == ''): path = '/' elif (reg.group(1)[0] == '/'): path = reg.group(1) else: path += reg.group(1) if (path[-1] != '/'): path += '/' # print('command: "%s"' % command) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) # print('%s' % data) s.send(command + '\n') time.sleep(2) # print('"%s" sent' % command) data = s.recv(1024) print('%s' % data) s.close() elif val == 'pull': command = '; ' + 'cat ' + regex.group(2) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) data = s.recv(1024) time.sleep(2) s.send(command + '\n') time.sleep(2) # print('"%s" sent' % command) data = s.recv(1024) # print('%s' % data) s.close() f = open(regex.group(3), 'w') f.write(data) f.close() elif val == 'quit': return -1 elif val == 'help': print('shell - Drop into an interactive shell - exit with "exit"') print('pull <remote path> <local path> - download files') print('help - show the help menu') print('quit - quit this program') else: print('invalid command') return 0 if __name__ == '__main__': while True: cmd = raw_input('>') if execute_cmd(cmd) == -1: break
flexible
{ "blob_id": "e0f25addad8af4541f1404b76d4798d2223d9715", "index": 5116, "step-1": "<mask token>\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n", "step-3": "<mask token>\nhost = 'cornerstoneairlines.co'\nport = 45\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n", "step-4": "<mask token>\nimport socket\nimport re\nimport time\nhost = 'cornerstoneairlines.co'\nport = 45\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n", "step-5": "\"\"\"\n Use the same techniques such as (but not limited to):\n 1) Sockets\n 2) File I/O\n 3) raw_input()\n\n from the OSINT HW to complete this assignment. Good luck!\n\"\"\"\n\nimport socket\nimport re\nimport time\n\nhost = \"cornerstoneairlines.co\" # IP address here\nport = 45 # Port here\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\\n\") # Send a newline \\n at the end of your command\n \"\"\"\n regex = re.match('^\\s*(\\w*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', cmd)\n val = regex.group(1)\n# print('val: %s' % val)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + \">\")\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if ('cd' in usr_in):\n# print('here')\n reg = re.match('^\\s*cd\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', usr_in)\n if (reg.group(1) == ''):\n path = '/'\n elif (reg.group(1)[0] == '/'):\n path = reg.group(1)\n else:\n path += reg.group(1)\n if (path[-1] != '/'):\n path += '/'\n# print('command: \"%s\"' % command)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n# print('%s' % data)\n s.send(command + '\\n')\n time.sleep(2)\n# print('\"%s\" sent' % command)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n# print('\"%s\" sent' % command)\n data = s.recv(1024)\n# print('%s' % data)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n\n return 0\n\n\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
''' Factory for creating and running ssimulations against optimization tools Author: Matthew Barber <mfmbarber@gmail.com> ''' from .strategy_annealer import StrategyAnnealer from .strategy_deap import StrategyDeap class CalulateStrategyWith: @staticmethod def Annealing(car, include_initial_tyre=False, iterations=100000): ''' Use simulated annealing to determine the best strategy Args: car (Car): An initial car to test with include_initial_tyre (bool): Include the initial tyre in moves iterations (int): Iteration limit Returns: Car ''' sim = StrategyAnnealer(car) sim.setIncludeInitialTyreInMove(include_initial_tyre) sim.steps = iterations state, e = sim.anneal() return state @staticmethod def geneticAlgorithm(car, include_initial_tyre=False, generations=1000): ''' Use genetic evolution to determine the best strategy Args: car (Car): An initial car to test with include_initial_tyre (bool): Include the initial tyre in moves generations (int): Evolution generation limit Returns: Car ''' return StrategyDeap(car, include_initial_tyre, generations).run()
normal
{ "blob_id": "1cab38721e6b96a9877bd67cbddaa4d6b4e53d1b", "index": 8175, "step-1": "<mask token>\n\n\nclass CalulateStrategyWith:\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass CalulateStrategyWith:\n <mask token>\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n", "step-3": "<mask token>\n\n\nclass CalulateStrategyWith:\n\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n \"\"\"\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n \"\"\"\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n", "step-4": "<mask token>\nfrom .strategy_annealer import StrategyAnnealer\nfrom .strategy_deap import StrategyDeap\n\n\nclass CalulateStrategyWith:\n\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n \"\"\"\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n \"\"\"\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n", "step-5": "'''\n Factory for creating and running ssimulations against optimization tools\n\n Author:\n Matthew Barber <mfmbarber@gmail.com>\n'''\nfrom .strategy_annealer import StrategyAnnealer\nfrom .strategy_deap import StrategyDeap\n\n\nclass CalulateStrategyWith:\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n '''\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n '''\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n '''\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n '''\n return StrategyDeap(car, include_initial_tyre, generations).run()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# 3번 반복하고 싶은 경우 # 별 10개를 한줄로 for x in range(0, 10, 3): # 3번째 숫자는 증감할 양을 정해줌. # print(x) print("★", end=" ") print() print("------------------------") #이중 for문 for y in range(0, 10): for x in range(0, 10): # print(x) print("★", end=" ") print()
normal
{ "blob_id": "b360ba7412bd10e2818511cee81302d407f88fd1", "index": 1895, "step-1": "<mask token>\n", "step-2": "for x in range(0, 10, 3):\n print('★', end=' ')\nprint()\nprint('------------------------')\nfor y in range(0, 10):\n for x in range(0, 10):\n print('★', end=' ')\n print()\n", "step-3": "# 3번 반복하고 싶은 경우\r\n\r\n# 별 10개를 한줄로\r\nfor x in range(0, 10, 3): # 3번째 숫자는 증감할 양을 정해줌.\r\n # print(x)\r\n print(\"★\", end=\" \")\r\nprint()\r\nprint(\"------------------------\")\r\n#이중 for문\r\nfor y in range(0, 10):\r\n for x in range(0, 10):\r\n # print(x)\r\n print(\"★\", end=\" \")\r\n print()", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
class Solution(object): def findDisappearedNumbers(self, nums): """ :type nums: List[int] :rtype: List[int] """ ns = [0]*len(nums) for i in range(0, len(nums), 1): ns[nums[i]-1] = 1 ret = [] for j in range(0, len(ns), 1): if(ns[j] == 0): ret.append(j+1) return ret class Solution(object): def findDisappearedNumbers(self, nums): """ :type nums: List[int] :rtype: List[int] """ for i in range(0, len(nums), 1): index = abs(nums[i]) - 1 nums[index] = - abs(nums[index]) return [i + 1 for i in range(0, len(nums), 1) if nums[i] > 0]
normal
{ "blob_id": "87504fb88cbbf810ad8bab08bc59284d2cf37cce", "index": 850, "step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = -abs(nums[index])\n return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]\n", "step-3": "class Solution(object):\n <mask token>\n\n\nclass Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = -abs(nums[index])\n return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]\n", "step-4": "class Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n ns = [0] * len(nums)\n for i in range(0, len(nums), 1):\n ns[nums[i] - 1] = 1\n ret = []\n for j in range(0, len(ns), 1):\n if ns[j] == 0:\n ret.append(j + 1)\n return ret\n\n\nclass Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = -abs(nums[index])\n return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]\n", "step-5": "class Solution(object):\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n ns = [0]*len(nums)\n for i in range(0, len(nums), 1):\n ns[nums[i]-1] = 1\n \n ret = []\n for j in range(0, len(ns), 1):\n if(ns[j] == 0): ret.append(j+1)\n return ret\n\nclass Solution(object):\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = - abs(nums[index])\n\n return [i + 1 for i in range(0, len(nums), 1) if nums[i] > 0]", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
learningRateBase = 0.001 learningRateDecreaseStep = 80 epochNum = 100 generateNum = 3 batchSize = 16 trainPoems = "./data/poems.txt" checkpointsPath = "./model/"
normal
{ "blob_id": "2fb299f5454c251dc1c77c2597ee23bf414c716e", "index": 4845, "step-1": "<mask token>\n", "step-2": "learningRateBase = 0.001\nlearningRateDecreaseStep = 80\nepochNum = 100\ngenerateNum = 3\nbatchSize = 16\ntrainPoems = './data/poems.txt'\ncheckpointsPath = './model/'\n", "step-3": "learningRateBase = 0.001\nlearningRateDecreaseStep = 80\nepochNum = 100\ngenerateNum = 3\nbatchSize = 16\n\ntrainPoems = \"./data/poems.txt\"\ncheckpointsPath = \"./model/\"", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'KEY.ui' # # Created by: PyQt5 UI code generator 5.11.3 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_KEY(object): def setupUi(self, KEY): KEY.setObjectName("KEY") KEY.resize(419, 106) self.Key1 = QtWidgets.QLineEdit(KEY) self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31)) self.Key1.setText("") self.Key1.setObjectName("Key1") self.Key2 = QtWidgets.QLineEdit(KEY) self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31)) self.Key2.setObjectName("Key2") self.layoutWidget = QtWidgets.QWidget(KEY) self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71)) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.label = QtWidgets.QLabel(self.layoutWidget) self.label.setObjectName("label") self.verticalLayout.addWidget(self.label) self.label_2 = QtWidgets.QLabel(self.layoutWidget) self.label_2.setObjectName("label_2") self.verticalLayout.addWidget(self.label_2) self.enter = QtWidgets.QPushButton(KEY) self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31)) self.enter.setObjectName("enter") self.quxiao = QtWidgets.QPushButton(KEY) self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31)) self.quxiao.setObjectName("quxiao") self.retranslateUi(KEY) self.quxiao.clicked.connect(KEY.close) QtCore.QMetaObject.connectSlotsByName(KEY) def retranslateUi(self, KEY): _translate = QtCore.QCoreApplication.translate KEY.setWindowTitle(_translate("KEY", "KEY")) self.label.setText(_translate("KEY", "Keys 1")) self.label_2.setText(_translate("KEY", "Keys 2")) self.enter.setText(_translate("KEY", "确定")) self.quxiao.setText(_translate("KEY", "取消"))
normal
{ "blob_id": "1dab0084666588f61d0f9f95f88f06ed9d884e5b", "index": 3892, "step-1": "<mask token>\n\n\nclass Ui_KEY(object):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Ui_KEY(object):\n\n def setupUi(self, KEY):\n KEY.setObjectName('KEY')\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText('')\n self.Key1.setObjectName('Key1')\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName('Key2')\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName('layoutWidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName('verticalLayout')\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName('label')\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName('label_2')\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName('enter')\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName('quxiao')\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Ui_KEY(object):\n\n def setupUi(self, KEY):\n KEY.setObjectName('KEY')\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText('')\n self.Key1.setObjectName('Key1')\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName('Key2')\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName('layoutWidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName('verticalLayout')\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName('label')\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName('label_2')\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName('enter')\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName('quxiao')\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n\n def retranslateUi(self, KEY):\n _translate = QtCore.QCoreApplication.translate\n KEY.setWindowTitle(_translate('KEY', 'KEY'))\n self.label.setText(_translate('KEY', 'Keys 1'))\n self.label_2.setText(_translate('KEY', 'Keys 2'))\n self.enter.setText(_translate('KEY', '确定'))\n self.quxiao.setText(_translate('KEY', '取消'))\n", "step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_KEY(object):\n\n def setupUi(self, KEY):\n KEY.setObjectName('KEY')\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText('')\n self.Key1.setObjectName('Key1')\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName('Key2')\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName('layoutWidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName('verticalLayout')\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName('label')\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName('label_2')\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName('enter')\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName('quxiao')\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n\n def retranslateUi(self, KEY):\n _translate = QtCore.QCoreApplication.translate\n KEY.setWindowTitle(_translate('KEY', 'KEY'))\n self.label.setText(_translate('KEY', 'Keys 1'))\n self.label_2.setText(_translate('KEY', 'Keys 2'))\n self.enter.setText(_translate('KEY', '确定'))\n self.quxiao.setText(_translate('KEY', '取消'))\n", "step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'KEY.ui'\n#\n# Created by: PyQt5 UI code generator 5.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_KEY(object):\n def setupUi(self, KEY):\n KEY.setObjectName(\"KEY\")\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText(\"\")\n self.Key1.setObjectName(\"Key1\")\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName(\"Key2\")\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName(\"layoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName(\"label\")\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName(\"label_2\")\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName(\"enter\")\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName(\"quxiao\")\n\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n\n def retranslateUi(self, KEY):\n _translate = QtCore.QCoreApplication.translate\n KEY.setWindowTitle(_translate(\"KEY\", \"KEY\"))\n self.label.setText(_translate(\"KEY\", \"Keys 1\"))\n self.label_2.setText(_translate(\"KEY\", \"Keys 2\"))\n self.enter.setText(_translate(\"KEY\", \"确定\"))\n self.quxiao.setText(_translate(\"KEY\", \"取消\"))\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
############################################################################### # Copyright (c) 2017-2020 Koren Lev (Cisco Systems), # # Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others # # # # All rights reserved. This program and the accompanying materials # # are made available under the terms of the Apache License, Version 2.0 # # which accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################### from unittest.mock import MagicMock, patch from scan.fetchers.kube.kube_fetch_containers import KubeFetchContainers from scan.test.fetch.kube_fetch.kube_test_base import KubeTestBase from scan.test.fetch.kube_fetch.test_data.kube_access import KUBE_CONFIG from scan.test.fetch.kube_fetch.test_data.kube_fetch_containers import \ POD_DOCUMENT, CONTAINERS_FOLDER_ID, PODS_RESPONSE_NO_MATCH, \ EXPECTED_CONTAINER_DOC from scan.test.fetch.kube_fetch.test_data.kube_fetch_pods import PODS_RESPONSE, \ EMPTY_RESPONSE class TestKubeFetchContainers(KubeTestBase): class DummyConfig(object): def __init__(self, _environment): self.environment = _environment def setUp(self): super().setUp() self.conf_patcher = patch( 'utils.cli_access.Configuration' ) self.conf_class = self.conf_patcher.start() self.fetcher = KubeFetchContainers(KUBE_CONFIG) self.fetcher.configuration = TestKubeFetchContainers.DummyConfig({ 'environment_type': 'Kubernetes' }) @staticmethod def _get_by_id(environment, item_id): if environment: pass if item_id == POD_DOCUMENT['id']: return POD_DOCUMENT return None def test_get_flannel(self): self.fetcher.configuration.environment['mechanism_drivers'] = \ ['Flannel'] self.inv.get_by_id.side_effect = self._get_by_id self.fetcher.run = MagicMock(return_value="[]") response = self._get_response(payload=PODS_RESPONSE, response_type='V1PodList') self.api.list_pod_for_all_namespaces = MagicMock(return_value=response) containers = self.fetcher.get(CONTAINERS_FOLDER_ID) self.assertEqual(1, len(containers)) self.assertDictContains(EXPECTED_CONTAINER_DOC, containers[0]) def test_get_no_db_pod(self): self.inv.get_by_id.return_value = None containers = self.fetcher.get(CONTAINERS_FOLDER_ID) self.assertEqual(0, len(containers)) def test_get_no_kube_pods(self): self.inv.get_by_id.side_effect = self._get_by_id response = self._get_response(payload=EMPTY_RESPONSE, response_type='V1PodList') self.api.list_pod_for_all_namespaces = MagicMock(return_value=response) containers = self.fetcher.get(CONTAINERS_FOLDER_ID) self.assertEqual(0, len(containers)) def test_get_no_matching_pod(self): self.inv.get_by_id.side_effect = self._get_by_id response = self._get_response(payload=PODS_RESPONSE_NO_MATCH, response_type='V1PodList') self.api.list_pod_for_all_namespaces = MagicMock(return_value=response) containers = self.fetcher.get(CONTAINERS_FOLDER_ID) self.assertEqual(0, len(containers)) def tearDown(self): self.conf_patcher.stop() super().tearDown()
normal
{ "blob_id": "d60810ea0b19cc9163ce526e6a5a54da9c8b3f68", "index": 3595, "step-1": "<mask token>\n\n\nclass TestKubeFetchContainers(KubeTestBase):\n\n\n class DummyConfig(object):\n\n def __init__(self, _environment):\n self.environment = _environment\n <mask token>\n <mask token>\n\n def test_get_flannel(self):\n self.fetcher.configuration.environment['mechanism_drivers'] = [\n 'Flannel']\n self.inv.get_by_id.side_effect = self._get_by_id\n self.fetcher.run = MagicMock(return_value='[]')\n response = self._get_response(payload=PODS_RESPONSE, response_type=\n 'V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(1, len(containers))\n self.assertDictContains(EXPECTED_CONTAINER_DOC, containers[0])\n\n def test_get_no_db_pod(self):\n self.inv.get_by_id.return_value = None\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_kube_pods(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=EMPTY_RESPONSE, response_type\n ='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_matching_pod(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=PODS_RESPONSE_NO_MATCH,\n response_type='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TestKubeFetchContainers(KubeTestBase):\n\n\n class DummyConfig(object):\n\n def __init__(self, _environment):\n self.environment = _environment\n <mask token>\n\n @staticmethod\n def _get_by_id(environment, item_id):\n if environment:\n pass\n if item_id == POD_DOCUMENT['id']:\n return POD_DOCUMENT\n return None\n\n def test_get_flannel(self):\n self.fetcher.configuration.environment['mechanism_drivers'] = [\n 'Flannel']\n self.inv.get_by_id.side_effect = self._get_by_id\n self.fetcher.run = MagicMock(return_value='[]')\n response = self._get_response(payload=PODS_RESPONSE, response_type=\n 'V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(1, len(containers))\n self.assertDictContains(EXPECTED_CONTAINER_DOC, containers[0])\n\n def test_get_no_db_pod(self):\n self.inv.get_by_id.return_value = None\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_kube_pods(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=EMPTY_RESPONSE, response_type\n ='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_matching_pod(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=PODS_RESPONSE_NO_MATCH,\n response_type='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n <mask token>\n", "step-3": "<mask token>\n\n\nclass TestKubeFetchContainers(KubeTestBase):\n\n\n class DummyConfig(object):\n\n def __init__(self, _environment):\n self.environment = _environment\n <mask token>\n\n @staticmethod\n def _get_by_id(environment, item_id):\n if environment:\n pass\n if item_id == POD_DOCUMENT['id']:\n return POD_DOCUMENT\n return None\n\n def test_get_flannel(self):\n self.fetcher.configuration.environment['mechanism_drivers'] = [\n 'Flannel']\n self.inv.get_by_id.side_effect = self._get_by_id\n self.fetcher.run = MagicMock(return_value='[]')\n response = self._get_response(payload=PODS_RESPONSE, response_type=\n 'V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(1, len(containers))\n self.assertDictContains(EXPECTED_CONTAINER_DOC, containers[0])\n\n def test_get_no_db_pod(self):\n self.inv.get_by_id.return_value = None\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_kube_pods(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=EMPTY_RESPONSE, response_type\n ='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_matching_pod(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=PODS_RESPONSE_NO_MATCH,\n response_type='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def tearDown(self):\n self.conf_patcher.stop()\n super().tearDown()\n", "step-4": "<mask token>\n\n\nclass TestKubeFetchContainers(KubeTestBase):\n\n\n class DummyConfig(object):\n\n def __init__(self, _environment):\n self.environment = _environment\n\n def setUp(self):\n super().setUp()\n self.conf_patcher = patch('utils.cli_access.Configuration')\n self.conf_class = self.conf_patcher.start()\n self.fetcher = KubeFetchContainers(KUBE_CONFIG)\n self.fetcher.configuration = TestKubeFetchContainers.DummyConfig({\n 'environment_type': 'Kubernetes'})\n\n @staticmethod\n def _get_by_id(environment, item_id):\n if environment:\n pass\n if item_id == POD_DOCUMENT['id']:\n return POD_DOCUMENT\n return None\n\n def test_get_flannel(self):\n self.fetcher.configuration.environment['mechanism_drivers'] = [\n 'Flannel']\n self.inv.get_by_id.side_effect = self._get_by_id\n self.fetcher.run = MagicMock(return_value='[]')\n response = self._get_response(payload=PODS_RESPONSE, response_type=\n 'V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(1, len(containers))\n self.assertDictContains(EXPECTED_CONTAINER_DOC, containers[0])\n\n def test_get_no_db_pod(self):\n self.inv.get_by_id.return_value = None\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_kube_pods(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=EMPTY_RESPONSE, response_type\n ='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_matching_pod(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=PODS_RESPONSE_NO_MATCH,\n response_type='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def tearDown(self):\n self.conf_patcher.stop()\n super().tearDown()\n", "step-5": "###############################################################################\n# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #\n# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #\n# #\n# All rights reserved. This program and the accompanying materials #\n# are made available under the terms of the Apache License, Version 2.0 #\n# which accompanies this distribution, and is available at #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n###############################################################################\nfrom unittest.mock import MagicMock, patch\n\nfrom scan.fetchers.kube.kube_fetch_containers import KubeFetchContainers\nfrom scan.test.fetch.kube_fetch.kube_test_base import KubeTestBase\nfrom scan.test.fetch.kube_fetch.test_data.kube_access import KUBE_CONFIG\nfrom scan.test.fetch.kube_fetch.test_data.kube_fetch_containers import \\\n POD_DOCUMENT, CONTAINERS_FOLDER_ID, PODS_RESPONSE_NO_MATCH, \\\n EXPECTED_CONTAINER_DOC\nfrom scan.test.fetch.kube_fetch.test_data.kube_fetch_pods import PODS_RESPONSE, \\\n EMPTY_RESPONSE\n\n\nclass TestKubeFetchContainers(KubeTestBase):\n\n class DummyConfig(object):\n def __init__(self, _environment):\n self.environment = _environment\n\n def setUp(self):\n super().setUp()\n\n self.conf_patcher = patch(\n 'utils.cli_access.Configuration'\n )\n self.conf_class = self.conf_patcher.start()\n\n self.fetcher = KubeFetchContainers(KUBE_CONFIG)\n self.fetcher.configuration = TestKubeFetchContainers.DummyConfig({\n 'environment_type': 'Kubernetes'\n })\n\n @staticmethod\n def _get_by_id(environment, item_id):\n if environment:\n pass\n if item_id == POD_DOCUMENT['id']:\n return POD_DOCUMENT\n return None\n\n def test_get_flannel(self):\n self.fetcher.configuration.environment['mechanism_drivers'] = \\\n ['Flannel']\n self.inv.get_by_id.side_effect = self._get_by_id\n self.fetcher.run = MagicMock(return_value=\"[]\")\n response = self._get_response(payload=PODS_RESPONSE,\n response_type='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(1, len(containers))\n self.assertDictContains(EXPECTED_CONTAINER_DOC, containers[0])\n\n def test_get_no_db_pod(self):\n self.inv.get_by_id.return_value = None\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_kube_pods(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=EMPTY_RESPONSE,\n response_type='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def test_get_no_matching_pod(self):\n self.inv.get_by_id.side_effect = self._get_by_id\n response = self._get_response(payload=PODS_RESPONSE_NO_MATCH,\n response_type='V1PodList')\n self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)\n\n containers = self.fetcher.get(CONTAINERS_FOLDER_ID)\n self.assertEqual(0, len(containers))\n\n def tearDown(self):\n self.conf_patcher.stop()\n super().tearDown()\n", "step-ids": [ 5, 6, 7, 8, 10 ] }
[ 5, 6, 7, 8, 10 ]
import cv2 import dlib import faceBlendCommon as face from matplotlib import pyplot as plt from scipy.spatial import distance as dist import numpy as np import cmapy import math def eye_aspect_ratio(eye): A = dist.euclidean(eye[1], eye[5]) B = dist.euclidean(eye[2], eye[4]) C = dist.euclidean(eye[0], eye[3]) rad=(A+B)/2 return int(rad) # Load Image im = cv2.imread("imgs/2.jpg") # Detect face landmarks PREDICTOR_PATH = r"./model/shape_predictor_68_face_landmarks.dat" faceDetector = dlib.get_frontal_face_detector() landmarkDetector = dlib.shape_predictor(PREDICTOR_PATH) landmarks = face.getLandmarks(faceDetector, landmarkDetector, im) def createEyeMask(eyeLandmarks, im): leftEyePoints = eyeLandmarks eyeMask = np.zeros_like(im) cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255)) eyeMask = np.uint8(eyeMask) return eyeMask def findIris(eyeMask, im, thresh): r = im[:,:,2] _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4)) morph = cv2.dilate(binaryIm, kernel, 1) morph = cv2.merge((morph, morph, morph)) morph = morph.astype(float)/255 eyeMask = eyeMask.astype(float)/255 iris = cv2.multiply(eyeMask, morph) return iris def findCentroid(iris): M = cv2.moments(iris[:,:,0]) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) centroid = (cX,cY) return centroid def createIrisMask(iris, centroid,rad): cnts, _ = cv2.findContours(np.uint8(iris[:,:,0]), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) flag = 10000 final_cnt = None for cnt in cnts: (x,y),radius = cv2.minEnclosingCircle(cnt) distance = abs(centroid[0]-x)+abs(centroid[1]-y) if distance < flag : flag = distance final_cnt = cnt else: continue (x,y),radius = cv2.minEnclosingCircle(final_cnt) center = (int(x),int(y)) center = centroid # radius = int(radius-(radius//4)) radius=(rad//2)+2 print(radius) irisMask = np.zeros_like(iris) inverseIrisMask = np.ones_like(iris)*255 cv2.circle(irisMask,center,radius,(255, 255, 255),-1) cv2.circle(inverseIrisMask,center,radius,(0, 0, 0),-1) # irisMask = cv2.GaussianBlur(irisMask, (5,5), cv2.BORDER_DEFAULT) # inverseIrisMask = cv2.GaussianBlur(inverseIrisMask, (5,5), cv2.BORDER_DEFAULT) return irisMask, inverseIrisMask def changeEyeColor(im, irisMask, inverseIrisMask): imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r')) imCopy = imCopy.astype(float)/255 irisMask = irisMask.astype(float)/255 inverseIrisMask = inverseIrisMask.astype(float)/255 im = im.astype(float)/255 faceWithoutEye = cv2.multiply(inverseIrisMask, im) newIris = cv2.multiply(irisMask, imCopy) result = faceWithoutEye + newIris return result def float642Uint8(im): im2Convert = im.astype(np.float64) / np.amax(im) im2Convert = 255 * im2Convert convertedIm = im2Convert.astype(np.uint8) return convertedIm # Create eye mask using eye landmarks from facial landmark detection leftEyeMask = createEyeMask(landmarks[36:42], im) rightEyeMask = createEyeMask(landmarks[42:48], im) # Find the iris by thresholding the red channel of the image within the boundaries of the eye mask leftIris = findIris(leftEyeMask, im, 100) rightIris = findIris(rightEyeMask, im, 50) # Find the centroid of the binary image of the eye leftIrisCentroid = findCentroid(leftIris) rightIrisCentroid = findCentroid(rightIris) # Generate the iris mask and its inverse mask rad_left=eye_aspect_ratio(landmarks[36:42]) rad_right=eye_aspect_ratio(landmarks[42:48]) rightIrisMask, rightInverseIrisMask = createIrisMask(rightIris, rightIrisCentroid,rad_right) leftIrisMask, leftInverseIrisMask = createIrisMask(leftIris, leftIrisCentroid,rad_left) # Change the eye color and merge it to the original image coloredEyesLady = changeEyeColor(im, rightIrisMask, rightInverseIrisMask) coloredEyesLady = float642Uint8(coloredEyesLady) coloredEyesLady = changeEyeColor(coloredEyesLady, leftIrisMask, leftInverseIrisMask) coloredEyesLady = float642Uint8(coloredEyesLady) # Present results cv2.imwrite("3.jpg", coloredEyesLady)
normal
{ "blob_id": "65ff3b5137c94890c3293a2ae3f57dee1f60a54c", "index": 9097, "step-1": "<mask token>\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\n<mask token>\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\ndef createIrisMask(iris, centroid, rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0] - x) + abs(centroid[1] - y)\n if distance < flag:\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x, y), radius = cv2.minEnclosingCircle(final_cnt)\n center = int(x), int(y)\n center = centroid\n radius = rad // 2 + 2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris) * 255\n cv2.circle(irisMask, center, radius, (255, 255, 255), -1)\n cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)\n return irisMask, inverseIrisMask\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert\n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n rad = (A + B) / 2\n return int(rad)\n\n\n<mask token>\n\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\ndef createIrisMask(iris, centroid, rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0] - x) + abs(centroid[1] - y)\n if distance < flag:\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x, y), radius = cv2.minEnclosingCircle(final_cnt)\n center = int(x), int(y)\n center = centroid\n radius = rad // 2 + 2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris) * 255\n cv2.circle(irisMask, center, radius, (255, 255, 255), -1)\n cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)\n return irisMask, inverseIrisMask\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert\n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\n<mask token>\ncv2.imwrite('3.jpg', coloredEyesLady)\n", "step-4": "import cv2\nimport dlib\nimport faceBlendCommon as face\nfrom matplotlib import pyplot as plt\nfrom scipy.spatial import distance as dist\nimport numpy as np\nimport cmapy\nimport math\n\n\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n rad = (A + B) / 2\n return int(rad)\n\n\nim = cv2.imread('imgs/2.jpg')\nPREDICTOR_PATH = './model/shape_predictor_68_face_landmarks.dat'\nfaceDetector = dlib.get_frontal_face_detector()\nlandmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)\nlandmarks = face.getLandmarks(faceDetector, landmarkDetector, im)\n\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\ndef createIrisMask(iris, centroid, rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0] - x) + abs(centroid[1] - y)\n if distance < flag:\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x, y), radius = cv2.minEnclosingCircle(final_cnt)\n center = int(x), int(y)\n center = centroid\n radius = rad // 2 + 2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris) * 255\n cv2.circle(irisMask, center, radius, (255, 255, 255), -1)\n cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)\n return irisMask, inverseIrisMask\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert\n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\nleftEyeMask = createEyeMask(landmarks[36:42], im)\nrightEyeMask = createEyeMask(landmarks[42:48], im)\nleftIris = findIris(leftEyeMask, im, 100)\nrightIris = findIris(rightEyeMask, im, 50)\nleftIrisCentroid = findCentroid(leftIris)\nrightIrisCentroid = findCentroid(rightIris)\nrad_left = eye_aspect_ratio(landmarks[36:42])\nrad_right = eye_aspect_ratio(landmarks[42:48])\nrightIrisMask, rightInverseIrisMask = createIrisMask(rightIris,\n rightIrisCentroid, rad_right)\nleftIrisMask, leftInverseIrisMask = createIrisMask(leftIris,\n leftIrisCentroid, rad_left)\ncoloredEyesLady = changeEyeColor(im, rightIrisMask, rightInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\ncoloredEyesLady = changeEyeColor(coloredEyesLady, leftIrisMask,\n leftInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\ncv2.imwrite('3.jpg', coloredEyesLady)\n", "step-5": "import cv2\nimport dlib\nimport faceBlendCommon as face\nfrom matplotlib import pyplot as plt\nfrom scipy.spatial import distance as dist\nimport numpy as np\nimport cmapy\nimport math\n\n\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n rad=(A+B)/2\n return int(rad)\n\n# Load Image\nim = cv2.imread(\"imgs/2.jpg\")\n\n# Detect face landmarks\nPREDICTOR_PATH = r\"./model/shape_predictor_68_face_landmarks.dat\"\nfaceDetector = dlib.get_frontal_face_detector()\nlandmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)\nlandmarks = face.getLandmarks(faceDetector, landmarkDetector, im)\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\ndef findIris(eyeMask, im, thresh):\n r = im[:,:,2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float)/255\n eyeMask = eyeMask.astype(float)/255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:,:,0])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n centroid = (cX,cY)\n return centroid\n\ndef createIrisMask(iris, centroid,rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:,:,0]), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x,y),radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0]-x)+abs(centroid[1]-y)\n if distance < flag :\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x,y),radius = cv2.minEnclosingCircle(final_cnt)\n center = (int(x),int(y))\n center = centroid\n # radius = int(radius-(radius//4))\n radius=(rad//2)+2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris)*255\n cv2.circle(irisMask,center,radius,(255, 255, 255),-1)\n cv2.circle(inverseIrisMask,center,radius,(0, 0, 0),-1)\n # irisMask = cv2.GaussianBlur(irisMask, (5,5), cv2.BORDER_DEFAULT)\n # inverseIrisMask = cv2.GaussianBlur(inverseIrisMask, (5,5), cv2.BORDER_DEFAULT)\n return irisMask, inverseIrisMask\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n \n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r')) \n imCopy = imCopy.astype(float)/255\n irisMask = irisMask.astype(float)/255\n inverseIrisMask = inverseIrisMask.astype(float)/255\n im = im.astype(float)/255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert \n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\n\n# Create eye mask using eye landmarks from facial landmark detection\nleftEyeMask = createEyeMask(landmarks[36:42], im)\nrightEyeMask = createEyeMask(landmarks[42:48], im)\n\n# Find the iris by thresholding the red channel of the image within the boundaries of the eye mask\nleftIris = findIris(leftEyeMask, im, 100)\nrightIris = findIris(rightEyeMask, im, 50)\n\n\n# Find the centroid of the binary image of the eye\nleftIrisCentroid = findCentroid(leftIris)\nrightIrisCentroid = findCentroid(rightIris)\n\n# Generate the iris mask and its inverse mask\nrad_left=eye_aspect_ratio(landmarks[36:42])\nrad_right=eye_aspect_ratio(landmarks[42:48])\n\n\nrightIrisMask, rightInverseIrisMask = createIrisMask(rightIris, rightIrisCentroid,rad_right)\nleftIrisMask, leftInverseIrisMask = createIrisMask(leftIris, leftIrisCentroid,rad_left)\n\n\n# Change the eye color and merge it to the original image\ncoloredEyesLady = changeEyeColor(im, rightIrisMask, rightInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\ncoloredEyesLady = changeEyeColor(coloredEyesLady, leftIrisMask, leftInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\n\n# Present results\ncv2.imwrite(\"3.jpg\", coloredEyesLady)\n", "step-ids": [ 3, 6, 8, 10, 11 ] }
[ 3, 6, 8, 10, 11 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_visual_coding_2p_analysis ---------------------------------- Tests for `visual_coding_2p_analysis` module. """ import pytest @pytest.fixture def decorated_example(): """Sample pytest fixture. See more at: http://doc.pytest.org/en/latest/fixture.html """ def test_example(decorated_example): """Sample pytest test function with the pytest fixture as an argument. """ import visual_coding_2p_analysis
normal
{ "blob_id": "ae3198e68d9479605327b729c01fb15eae87ab98", "index": 3282, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\n\ndef test_example(decorated_example):\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\n \"\"\"\n import visual_coding_2p_analysis\n", "step-4": "<mask token>\nimport pytest\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\n\ndef test_example(decorated_example):\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\n \"\"\"\n import visual_coding_2p_analysis\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_visual_coding_2p_analysis\n----------------------------------\n\nTests for `visual_coding_2p_analysis` module.\n\"\"\"\nimport pytest\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\ndef test_example(decorated_example):\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\n \"\"\"\n import visual_coding_2p_analysis\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys import time import numpy as np import vii import cnn from cnn._utils import (FLOAT_DTYPE, _multi_convolve_image, _opencl_multi_convolve_image, _relu_max_pool_image, _opencl_relu_max_pool_image) GROUPS = 25, 20, 1 def subsample(x, pool_size): # Make sure it works with pool size > 2 !!!! dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)] return x[:dx:2, :dy:2] def probe_time(func): def wrapper(*args, **kwargs): t0 = time.time() res = func(*args, **kwargs) dt = time.time() - t0 print('Time (%s): %f' % (func.__name__, dt)) return res return wrapper @probe_time def cpu_multi_convolve_image(*args): return _multi_convolve_image(*args) @probe_time def cpu_relu_max_pool_image(*args): return _relu_max_pool_image(*args) @probe_time def opencl_multi_convolve_image(*args): return _opencl_multi_convolve_image(*args) @probe_time def opencl_relu_max_pool_image(*args): return _opencl_relu_max_pool_image(*args) ########################################################################### fimg = 'pizza.png' fmod = 'feb2.h5' device = 0 brute_force = False if len(sys.argv) > 1: fimg = sys.argv[1] if len(sys.argv) > 2: fmod = sys.argv[2] if len(sys.argv) > 3: device = int(sys.argv[3]) if device < 0: device = None img = vii.load_image(fimg) classif = cnn.load_image_classifier(fmod) def multi_convolve_image(data, kernel, bias, dil_x, dil_y): if device < 0: return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y) else: return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y, device, *(GROUPS[0:2])) def relu_max_pool_image(data, size_x, size_y, dil_x, dil_y): if device < 0: return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y) else: return opencl_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y, device, *GROUPS) ########################################################################### print('CNN test') x = np.random.randint(img.dims[0] - classif.image_size[0] + 1) y = np.random.randint(img.dims[1] - classif.image_size[1] + 1) data = img.get_data().astype(FLOAT_DTYPE)[x:(x + classif.image_size[0]), y:(y + classif.image_size[1])] / 255 gold = classif.run(data) flow = data for i in range(len(classif.conv_filters)): kernel, bias = classif.get_weights(i) flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :] flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.pool_size, 1, 1), 2) flow = flow.flatten() for i in range(len(classif.conv_filters), len(classif.layers)): kernel, bias = classif.get_weights(i) flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias if i < (len(classif.layers) - 1): flow = np.maximum(flow, 0) silver = cnn.softmax(flow) print('error = %f' % np.max(np.abs(gold - silver)))
normal
{ "blob_id": "8ec257d5dfe84e363e3c3aa5adee3470c20d1765", "index": 5866, "step-1": "<mask token>\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\n<mask token>\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n<mask token>\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\n<mask token>\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\ndef probe_time(func):\n\n def wrapper(*args, **kwargs):\n t0 = time.time()\n res = func(*args, **kwargs)\n dt = time.time() - t0\n print('Time (%s): %f' % (func.__name__, dt))\n return res\n return wrapper\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\nif len(sys.argv) > 1:\n fimg = sys.argv[1]\n if len(sys.argv) > 2:\n fmod = sys.argv[2]\n if len(sys.argv) > 3:\n device = int(sys.argv[3])\n if device < 0:\n device = None\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\nprint('CNN test')\n<mask token>\nfor i in range(len(classif.conv_filters)):\n kernel, bias = classif.get_weights(i)\n flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]\n flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.\n pool_size, 1, 1), 2)\n<mask token>\nfor i in range(len(classif.conv_filters), len(classif.layers)):\n kernel, bias = classif.get_weights(i)\n flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias\n if i < len(classif.layers) - 1:\n flow = np.maximum(flow, 0)\n<mask token>\nprint('error = %f' % np.max(np.abs(gold - silver)))\n", "step-5": "import sys\nimport time\nimport numpy as np\n\nimport vii\n\nimport cnn\nfrom cnn._utils import (FLOAT_DTYPE,\n _multi_convolve_image,\n _opencl_multi_convolve_image,\n _relu_max_pool_image,\n _opencl_relu_max_pool_image)\n\n\nGROUPS = 25, 20, 1\n\ndef subsample(x, pool_size):\n # Make sure it works with pool size > 2 !!!!\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)]\n return x[:dx:2, :dy:2]\n\n\ndef probe_time(func):\n def wrapper(*args, **kwargs):\n t0 = time.time()\n res = func(*args, **kwargs)\n dt = time.time() - t0\n print('Time (%s): %f' % (func.__name__, dt))\n return res\n return wrapper\n\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n###########################################################################\n\nfimg = 'pizza.png'\nfmod = 'feb2.h5'\ndevice = 0\nbrute_force = False\nif len(sys.argv) > 1:\n fimg = sys.argv[1]\n if len(sys.argv) > 2:\n fmod = sys.argv[2]\n if len(sys.argv) > 3:\n device = int(sys.argv[3])\n if device < 0:\n device = None\nimg = vii.load_image(fimg)\nclassif = cnn.load_image_classifier(fmod)\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y, device, *(GROUPS[0:2]))\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y, device, *GROUPS)\n\n\n###########################################################################\n \nprint('CNN test')\n\nx = np.random.randint(img.dims[0] - classif.image_size[0] + 1)\ny = np.random.randint(img.dims[1] - classif.image_size[1] + 1)\n\ndata = img.get_data().astype(FLOAT_DTYPE)[x:(x + classif.image_size[0]), y:(y + classif.image_size[1])] / 255\ngold = classif.run(data)\n\nflow = data\nfor i in range(len(classif.conv_filters)):\n kernel, bias = classif.get_weights(i)\n flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]\n flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.pool_size, 1, 1), 2)\nflow = flow.flatten()\n\nfor i in range(len(classif.conv_filters), len(classif.layers)):\n kernel, bias = classif.get_weights(i)\n flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias\n if i < (len(classif.layers) - 1):\n flow = np.maximum(flow, 0)\n\nsilver = cnn.softmax(flow)\n\nprint('error = %f' % np.max(np.abs(gold - silver))) \n", "step-ids": [ 2, 6, 7, 9, 12 ] }
[ 2, 6, 7, 9, 12 ]
import numpy as np from sklearn.metrics import mutual_info_score def mimic_binary(max_iter=100, fitness_func=None, space=None): assert fitness_func is not None assert space is not None idx = np.random.permutation(np.arange(len(space))) pool = space[idx[:int(len(space)/2)]] # randomly sample 50% of the oringal space new_pool = [] for i in range(max_iter): print("mimic: {}|{}".format(i+1, max_iter)) theta += delta for j, parent in enumerate(pool): if j in new_pool or fitness_func(parent)<theta: continue best_score = 0 best_child = parent for k, child in enumerate(pool): if k<=j or child in new_pool: continue score = mutual_info(parent, child) if score > best_score and fitness_func(child)>=theta: best_score = score new_pool.append(parent) new_pool.append(child) return None def mutual_info(parent, child): parent = [int(x) for x in parent] child = [int(x) for x in child] return mutual_info_score(parent,child)
normal
{ "blob_id": "360e661d8538a8f40b7546a54e9a9582fa64bd67", "index": 700, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent, child)\n", "step-3": "<mask token>\n\n\ndef mimic_binary(max_iter=100, fitness_func=None, space=None):\n assert fitness_func is not None\n assert space is not None\n idx = np.random.permutation(np.arange(len(space)))\n pool = space[idx[:int(len(space) / 2)]]\n new_pool = []\n for i in range(max_iter):\n print('mimic: {}|{}'.format(i + 1, max_iter))\n theta += delta\n for j, parent in enumerate(pool):\n if j in new_pool or fitness_func(parent) < theta:\n continue\n best_score = 0\n best_child = parent\n for k, child in enumerate(pool):\n if k <= j or child in new_pool:\n continue\n score = mutual_info(parent, child)\n if score > best_score and fitness_func(child) >= theta:\n best_score = score\n new_pool.append(parent)\n new_pool.append(child)\n return None\n\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent, child)\n", "step-4": "import numpy as np\nfrom sklearn.metrics import mutual_info_score\n\n\ndef mimic_binary(max_iter=100, fitness_func=None, space=None):\n assert fitness_func is not None\n assert space is not None\n idx = np.random.permutation(np.arange(len(space)))\n pool = space[idx[:int(len(space) / 2)]]\n new_pool = []\n for i in range(max_iter):\n print('mimic: {}|{}'.format(i + 1, max_iter))\n theta += delta\n for j, parent in enumerate(pool):\n if j in new_pool or fitness_func(parent) < theta:\n continue\n best_score = 0\n best_child = parent\n for k, child in enumerate(pool):\n if k <= j or child in new_pool:\n continue\n score = mutual_info(parent, child)\n if score > best_score and fitness_func(child) >= theta:\n best_score = score\n new_pool.append(parent)\n new_pool.append(child)\n return None\n\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent, child)\n", "step-5": "import numpy as np\nfrom sklearn.metrics import mutual_info_score\n\ndef mimic_binary(max_iter=100, fitness_func=None, space=None):\n\n assert fitness_func is not None\n assert space is not None\n\n idx = np.random.permutation(np.arange(len(space)))\n pool = space[idx[:int(len(space)/2)]] # randomly sample 50% of the oringal space\n\n new_pool = []\n\n for i in range(max_iter):\n print(\"mimic: {}|{}\".format(i+1, max_iter))\n theta += delta\n for j, parent in enumerate(pool):\n if j in new_pool or fitness_func(parent)<theta: continue\n best_score = 0\n best_child = parent\n for k, child in enumerate(pool):\n if k<=j or child in new_pool: continue\n score = mutual_info(parent, child)\n if score > best_score and fitness_func(child)>=theta:\n best_score = score\n new_pool.append(parent)\n new_pool.append(child)\n return None\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent,child)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def py_make_float_array(cst, op_version=None): """ Creates an array with a single element from a constant. @param cst constant @param op_version unused @return array .. runpython:: :showcode: :warningout: DeprecationWarning from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array print(py_make_float_array(5.5)) """ return numpy.array([cst], dtype=numpy.float32) <|reserved_special_token_0|> def py_mul(*x, op_version=None): """ Function for python operator ``*``. @param x floats @param op_version unused @return `x*y` """ if len(x) == 2: return x[0] * x[1] p = x[0] for y in x[1:]: p *= y return p def py_opp(x, op_version=None): """ Function for python unary operator ``-``. @param x floats @param op_version unused @return `-x` """ return -x <|reserved_special_token_0|> def get_default_context_cpl(): """ Returns a default useful context to compile the converter returned by @see fn translate_fct2onnx. """ ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy} try: from skl2onnx.algebra.complex_functions import onnx_squareform_pdist from skl2onnx.algebra.complex_functions import onnx_cdist ctx['onnx_squareform_pdist'] = onnx_squareform_pdist ctx['onnx_cdist'] = onnx_cdist except ImportError: pass from skl2onnx.algebra import onnx_ops from skl2onnx.algebra.onnx_operator import OnnxOperator d = onnx_ops.__dict__ for k, v in d.items(): try: if k.startswith('Onnx') and issubclass(v, OnnxOperator): ctx[k] = v except TypeError as e: if inspect.isfunction(v): continue raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e return ctx def translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None, output_names=None, dtype=numpy.float32, verbose=0, fLOG=None): """ Translates a function into :epkg:`ONNX`. The code it produces is using classes *OnnxAbs*, *OnnxAdd*, ... @param fct function to convert @param context context of the function to convert something like ``{'numpy.transpose': numpy.transpose}``, if *context* is None, it receives a default value returnd by @see fn get_default_context @param cpl compile the function after it was created @param context_cpl context used at compiling time if *context_cpl* is None, it receives a default value returnd by @see fn get_default_context_cpl @param output_names names of the output in the :epkg:`ONNX` graph @param dtype :epkg:`numpy` float type used to produce the model @param verbose integer, display more information @param fLOG logging function @return code or compiled code .. exref:: :title: Convert a function into ONNX code The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx2.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z onnx_code = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}) print(onnx_code) Next example goes further and compile the outcome. .. exref:: :title: Convert a function into ONNX code and run The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. The example executes the function, creates an :epkg:`ONNX` then uses @see cl OnnxInference to compute *predictions*. Finally it compares them to the original. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx3.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx from mlprodict.plotting.text_plot import onnx_simple_text_plot from mlprodict.onnxrt import OnnxInference from mlprodict.npy.xop import loadop OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop( 'Add', 'Transpose', 'Mul', 'Identity') ctx = {'OnnxAdd': OnnxAdd, 'OnnxTranspose': OnnxTranspose, 'OnnxMul': OnnxMul, 'OnnxIdentity': OnnxIdentity} def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32), 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T} original = trs(inputs['x'], inputs['y']) print('original output:', original) onnx_fct = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}, cpl=True, context_cpl=ctx, output_names=['Z']) onnx_code = onnx_fct('x', 'y', op_version=12) onnx_g = onnx_code.to_onnx(inputs, target_opset=12) print("ONNX model") print(onnx_simple_text_plot(onnx_g)) oinf = OnnxInference(onnx_g) res = oinf.run(inputs) print('-----------') print("ONNX inference:", res['Z']) The function to be converted may include python functions which must not be converted. In that case, their name must be prefixed by ``py_``. The execution of the function this one builds produces the following error:: TypeError: Parameter to MergeFrom() must be instance of same class: expected onnx.TensorProto got onnx.AttributeProto. It indicates that constants in the code marges multiple types, usually floats and tensor of floats. Floats should be converted using the following function:: def py_make_float_array(cst): return numpy.array([cst], dtype=numpy.float32) The function replaces empty contexts by default values which covers many :epkg:`numpy` functions. The tutorial :ref:`l-onnx-tutorial` gives an example of how it can be used on a more complex function. """ def compile_code(name, code, context=None): """ Compiles a python function with the given context. @param name function name @param code python code @param context context used at compilation @return compiled function """ if context is None: context = {} try: obj = compile(code, '', 'exec') except SyntaxError as e: raise SyntaxError(f'Unable to compile\n{code}') from e context_g = context.copy() context_l = context.copy() exec(obj, context_g, context_l) return context_l[name] if isinstance(fct, str): code = fct elif callable(fct): code = inspect.getsource(fct) else: raise TypeError(f'Unable to guess code from type {type(fct)}.') node = ast.parse(dedent(code)) v = CodeNodeVisitor() v.visit(node) if context is None: context = get_default_context() onnx_code = v.export(context=context, output_names=output_names) if not cpl: return onnx_code if verbose > 0 and fLOG is not None: fLOG('[translate_fct2onnx] python code') fLOG(code) fLOG('[translate_fct2onnx] ONNX code') fLOG(onnx_code) if context_cpl is None: context_cpl = get_default_context_cpl() if 'numpy' not in context_cpl: context_cpl = context_cpl.copy() context_cpl['numpy'] = numpy return compile_code(fct.__name__, onnx_code, context_cpl) <|reserved_special_token_1|> <|reserved_special_token_0|> def py_make_float_array(cst, op_version=None): """ Creates an array with a single element from a constant. @param cst constant @param op_version unused @return array .. runpython:: :showcode: :warningout: DeprecationWarning from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array print(py_make_float_array(5.5)) """ return numpy.array([cst], dtype=numpy.float32) def py_pow(x, p, op_version=None): """ Function for python operator ``**``. @param x float @param p power @param op_version unused @return :math:`x^p` """ return x ** p def py_mul(*x, op_version=None): """ Function for python operator ``*``. @param x floats @param op_version unused @return `x*y` """ if len(x) == 2: return x[0] * x[1] p = x[0] for y in x[1:]: p *= y return p def py_opp(x, op_version=None): """ Function for python unary operator ``-``. @param x floats @param op_version unused @return `-x` """ return -x <|reserved_special_token_0|> def get_default_context_cpl(): """ Returns a default useful context to compile the converter returned by @see fn translate_fct2onnx. """ ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy} try: from skl2onnx.algebra.complex_functions import onnx_squareform_pdist from skl2onnx.algebra.complex_functions import onnx_cdist ctx['onnx_squareform_pdist'] = onnx_squareform_pdist ctx['onnx_cdist'] = onnx_cdist except ImportError: pass from skl2onnx.algebra import onnx_ops from skl2onnx.algebra.onnx_operator import OnnxOperator d = onnx_ops.__dict__ for k, v in d.items(): try: if k.startswith('Onnx') and issubclass(v, OnnxOperator): ctx[k] = v except TypeError as e: if inspect.isfunction(v): continue raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e return ctx def translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None, output_names=None, dtype=numpy.float32, verbose=0, fLOG=None): """ Translates a function into :epkg:`ONNX`. The code it produces is using classes *OnnxAbs*, *OnnxAdd*, ... @param fct function to convert @param context context of the function to convert something like ``{'numpy.transpose': numpy.transpose}``, if *context* is None, it receives a default value returnd by @see fn get_default_context @param cpl compile the function after it was created @param context_cpl context used at compiling time if *context_cpl* is None, it receives a default value returnd by @see fn get_default_context_cpl @param output_names names of the output in the :epkg:`ONNX` graph @param dtype :epkg:`numpy` float type used to produce the model @param verbose integer, display more information @param fLOG logging function @return code or compiled code .. exref:: :title: Convert a function into ONNX code The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx2.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z onnx_code = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}) print(onnx_code) Next example goes further and compile the outcome. .. exref:: :title: Convert a function into ONNX code and run The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. The example executes the function, creates an :epkg:`ONNX` then uses @see cl OnnxInference to compute *predictions*. Finally it compares them to the original. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx3.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx from mlprodict.plotting.text_plot import onnx_simple_text_plot from mlprodict.onnxrt import OnnxInference from mlprodict.npy.xop import loadop OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop( 'Add', 'Transpose', 'Mul', 'Identity') ctx = {'OnnxAdd': OnnxAdd, 'OnnxTranspose': OnnxTranspose, 'OnnxMul': OnnxMul, 'OnnxIdentity': OnnxIdentity} def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32), 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T} original = trs(inputs['x'], inputs['y']) print('original output:', original) onnx_fct = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}, cpl=True, context_cpl=ctx, output_names=['Z']) onnx_code = onnx_fct('x', 'y', op_version=12) onnx_g = onnx_code.to_onnx(inputs, target_opset=12) print("ONNX model") print(onnx_simple_text_plot(onnx_g)) oinf = OnnxInference(onnx_g) res = oinf.run(inputs) print('-----------') print("ONNX inference:", res['Z']) The function to be converted may include python functions which must not be converted. In that case, their name must be prefixed by ``py_``. The execution of the function this one builds produces the following error:: TypeError: Parameter to MergeFrom() must be instance of same class: expected onnx.TensorProto got onnx.AttributeProto. It indicates that constants in the code marges multiple types, usually floats and tensor of floats. Floats should be converted using the following function:: def py_make_float_array(cst): return numpy.array([cst], dtype=numpy.float32) The function replaces empty contexts by default values which covers many :epkg:`numpy` functions. The tutorial :ref:`l-onnx-tutorial` gives an example of how it can be used on a more complex function. """ def compile_code(name, code, context=None): """ Compiles a python function with the given context. @param name function name @param code python code @param context context used at compilation @return compiled function """ if context is None: context = {} try: obj = compile(code, '', 'exec') except SyntaxError as e: raise SyntaxError(f'Unable to compile\n{code}') from e context_g = context.copy() context_l = context.copy() exec(obj, context_g, context_l) return context_l[name] if isinstance(fct, str): code = fct elif callable(fct): code = inspect.getsource(fct) else: raise TypeError(f'Unable to guess code from type {type(fct)}.') node = ast.parse(dedent(code)) v = CodeNodeVisitor() v.visit(node) if context is None: context = get_default_context() onnx_code = v.export(context=context, output_names=output_names) if not cpl: return onnx_code if verbose > 0 and fLOG is not None: fLOG('[translate_fct2onnx] python code') fLOG(code) fLOG('[translate_fct2onnx] ONNX code') fLOG(onnx_code) if context_cpl is None: context_cpl = get_default_context_cpl() if 'numpy' not in context_cpl: context_cpl = context_cpl.copy() context_cpl['numpy'] = numpy return compile_code(fct.__name__, onnx_code, context_cpl) <|reserved_special_token_1|> <|reserved_special_token_0|> def py_make_float_array(cst, op_version=None): """ Creates an array with a single element from a constant. @param cst constant @param op_version unused @return array .. runpython:: :showcode: :warningout: DeprecationWarning from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array print(py_make_float_array(5.5)) """ return numpy.array([cst], dtype=numpy.float32) def py_pow(x, p, op_version=None): """ Function for python operator ``**``. @param x float @param p power @param op_version unused @return :math:`x^p` """ return x ** p def py_mul(*x, op_version=None): """ Function for python operator ``*``. @param x floats @param op_version unused @return `x*y` """ if len(x) == 2: return x[0] * x[1] p = x[0] for y in x[1:]: p *= y return p def py_opp(x, op_version=None): """ Function for python unary operator ``-``. @param x floats @param op_version unused @return `-x` """ return -x <|reserved_special_token_0|> def get_default_context(): """ Returns a default context useful for most of the conversion from a function using :epkg:`numpy` into :epkg:`ONNX`. """ context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array, 'py_mul': py_mul, 'py_opp': py_opp, 'cdist': 'cdist', 'squareform_pdist': 'squareform_pdist'} allow = set( 'abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divideequal exp floor greater invert less log matmul maximum minimum modmultiply power sign sin sinh sqrt square subtract tan tanh transpose' .split()) for k, v in numpy.__dict__.items(): if k not in allow: continue context[f'numpy.{k}'] = v context[f'np.{k}'] = v return context def get_default_context_cpl(): """ Returns a default useful context to compile the converter returned by @see fn translate_fct2onnx. """ ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy} try: from skl2onnx.algebra.complex_functions import onnx_squareform_pdist from skl2onnx.algebra.complex_functions import onnx_cdist ctx['onnx_squareform_pdist'] = onnx_squareform_pdist ctx['onnx_cdist'] = onnx_cdist except ImportError: pass from skl2onnx.algebra import onnx_ops from skl2onnx.algebra.onnx_operator import OnnxOperator d = onnx_ops.__dict__ for k, v in d.items(): try: if k.startswith('Onnx') and issubclass(v, OnnxOperator): ctx[k] = v except TypeError as e: if inspect.isfunction(v): continue raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e return ctx def translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None, output_names=None, dtype=numpy.float32, verbose=0, fLOG=None): """ Translates a function into :epkg:`ONNX`. The code it produces is using classes *OnnxAbs*, *OnnxAdd*, ... @param fct function to convert @param context context of the function to convert something like ``{'numpy.transpose': numpy.transpose}``, if *context* is None, it receives a default value returnd by @see fn get_default_context @param cpl compile the function after it was created @param context_cpl context used at compiling time if *context_cpl* is None, it receives a default value returnd by @see fn get_default_context_cpl @param output_names names of the output in the :epkg:`ONNX` graph @param dtype :epkg:`numpy` float type used to produce the model @param verbose integer, display more information @param fLOG logging function @return code or compiled code .. exref:: :title: Convert a function into ONNX code The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx2.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z onnx_code = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}) print(onnx_code) Next example goes further and compile the outcome. .. exref:: :title: Convert a function into ONNX code and run The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. The example executes the function, creates an :epkg:`ONNX` then uses @see cl OnnxInference to compute *predictions*. Finally it compares them to the original. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx3.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx from mlprodict.plotting.text_plot import onnx_simple_text_plot from mlprodict.onnxrt import OnnxInference from mlprodict.npy.xop import loadop OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop( 'Add', 'Transpose', 'Mul', 'Identity') ctx = {'OnnxAdd': OnnxAdd, 'OnnxTranspose': OnnxTranspose, 'OnnxMul': OnnxMul, 'OnnxIdentity': OnnxIdentity} def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32), 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T} original = trs(inputs['x'], inputs['y']) print('original output:', original) onnx_fct = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}, cpl=True, context_cpl=ctx, output_names=['Z']) onnx_code = onnx_fct('x', 'y', op_version=12) onnx_g = onnx_code.to_onnx(inputs, target_opset=12) print("ONNX model") print(onnx_simple_text_plot(onnx_g)) oinf = OnnxInference(onnx_g) res = oinf.run(inputs) print('-----------') print("ONNX inference:", res['Z']) The function to be converted may include python functions which must not be converted. In that case, their name must be prefixed by ``py_``. The execution of the function this one builds produces the following error:: TypeError: Parameter to MergeFrom() must be instance of same class: expected onnx.TensorProto got onnx.AttributeProto. It indicates that constants in the code marges multiple types, usually floats and tensor of floats. Floats should be converted using the following function:: def py_make_float_array(cst): return numpy.array([cst], dtype=numpy.float32) The function replaces empty contexts by default values which covers many :epkg:`numpy` functions. The tutorial :ref:`l-onnx-tutorial` gives an example of how it can be used on a more complex function. """ def compile_code(name, code, context=None): """ Compiles a python function with the given context. @param name function name @param code python code @param context context used at compilation @return compiled function """ if context is None: context = {} try: obj = compile(code, '', 'exec') except SyntaxError as e: raise SyntaxError(f'Unable to compile\n{code}') from e context_g = context.copy() context_l = context.copy() exec(obj, context_g, context_l) return context_l[name] if isinstance(fct, str): code = fct elif callable(fct): code = inspect.getsource(fct) else: raise TypeError(f'Unable to guess code from type {type(fct)}.') node = ast.parse(dedent(code)) v = CodeNodeVisitor() v.visit(node) if context is None: context = get_default_context() onnx_code = v.export(context=context, output_names=output_names) if not cpl: return onnx_code if verbose > 0 and fLOG is not None: fLOG('[translate_fct2onnx] python code') fLOG(code) fLOG('[translate_fct2onnx] ONNX code') fLOG(onnx_code) if context_cpl is None: context_cpl = get_default_context_cpl() if 'numpy' not in context_cpl: context_cpl = context_cpl.copy() context_cpl['numpy'] = numpy return compile_code(fct.__name__, onnx_code, context_cpl) <|reserved_special_token_1|> <|reserved_special_token_0|> import inspect import ast from textwrap import dedent import numpy from scipy.spatial.distance import squareform, pdist from .node_visitor_translator import CodeNodeVisitor def py_make_float_array(cst, op_version=None): """ Creates an array with a single element from a constant. @param cst constant @param op_version unused @return array .. runpython:: :showcode: :warningout: DeprecationWarning from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array print(py_make_float_array(5.5)) """ return numpy.array([cst], dtype=numpy.float32) def py_pow(x, p, op_version=None): """ Function for python operator ``**``. @param x float @param p power @param op_version unused @return :math:`x^p` """ return x ** p def py_mul(*x, op_version=None): """ Function for python operator ``*``. @param x floats @param op_version unused @return `x*y` """ if len(x) == 2: return x[0] * x[1] p = x[0] for y in x[1:]: p *= y return p def py_opp(x, op_version=None): """ Function for python unary operator ``-``. @param x floats @param op_version unused @return `-x` """ return -x def squareform_pdist(X, metric='sqeuclidean', op_version=None): """ Replacements for `squareform <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_ and `pdist <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_. """ return squareform(pdist(X, metric=metric)) def get_default_context(): """ Returns a default context useful for most of the conversion from a function using :epkg:`numpy` into :epkg:`ONNX`. """ context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array, 'py_mul': py_mul, 'py_opp': py_opp, 'cdist': 'cdist', 'squareform_pdist': 'squareform_pdist'} allow = set( 'abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divideequal exp floor greater invert less log matmul maximum minimum modmultiply power sign sin sinh sqrt square subtract tan tanh transpose' .split()) for k, v in numpy.__dict__.items(): if k not in allow: continue context[f'numpy.{k}'] = v context[f'np.{k}'] = v return context def get_default_context_cpl(): """ Returns a default useful context to compile the converter returned by @see fn translate_fct2onnx. """ ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy} try: from skl2onnx.algebra.complex_functions import onnx_squareform_pdist from skl2onnx.algebra.complex_functions import onnx_cdist ctx['onnx_squareform_pdist'] = onnx_squareform_pdist ctx['onnx_cdist'] = onnx_cdist except ImportError: pass from skl2onnx.algebra import onnx_ops from skl2onnx.algebra.onnx_operator import OnnxOperator d = onnx_ops.__dict__ for k, v in d.items(): try: if k.startswith('Onnx') and issubclass(v, OnnxOperator): ctx[k] = v except TypeError as e: if inspect.isfunction(v): continue raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e return ctx def translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None, output_names=None, dtype=numpy.float32, verbose=0, fLOG=None): """ Translates a function into :epkg:`ONNX`. The code it produces is using classes *OnnxAbs*, *OnnxAdd*, ... @param fct function to convert @param context context of the function to convert something like ``{'numpy.transpose': numpy.transpose}``, if *context* is None, it receives a default value returnd by @see fn get_default_context @param cpl compile the function after it was created @param context_cpl context used at compiling time if *context_cpl* is None, it receives a default value returnd by @see fn get_default_context_cpl @param output_names names of the output in the :epkg:`ONNX` graph @param dtype :epkg:`numpy` float type used to produce the model @param verbose integer, display more information @param fLOG logging function @return code or compiled code .. exref:: :title: Convert a function into ONNX code The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx2.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z onnx_code = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}) print(onnx_code) Next example goes further and compile the outcome. .. exref:: :title: Convert a function into ONNX code and run The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. The example executes the function, creates an :epkg:`ONNX` then uses @see cl OnnxInference to compute *predictions*. Finally it compares them to the original. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx3.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx from mlprodict.plotting.text_plot import onnx_simple_text_plot from mlprodict.onnxrt import OnnxInference from mlprodict.npy.xop import loadop OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop( 'Add', 'Transpose', 'Mul', 'Identity') ctx = {'OnnxAdd': OnnxAdd, 'OnnxTranspose': OnnxTranspose, 'OnnxMul': OnnxMul, 'OnnxIdentity': OnnxIdentity} def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32), 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T} original = trs(inputs['x'], inputs['y']) print('original output:', original) onnx_fct = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}, cpl=True, context_cpl=ctx, output_names=['Z']) onnx_code = onnx_fct('x', 'y', op_version=12) onnx_g = onnx_code.to_onnx(inputs, target_opset=12) print("ONNX model") print(onnx_simple_text_plot(onnx_g)) oinf = OnnxInference(onnx_g) res = oinf.run(inputs) print('-----------') print("ONNX inference:", res['Z']) The function to be converted may include python functions which must not be converted. In that case, their name must be prefixed by ``py_``. The execution of the function this one builds produces the following error:: TypeError: Parameter to MergeFrom() must be instance of same class: expected onnx.TensorProto got onnx.AttributeProto. It indicates that constants in the code marges multiple types, usually floats and tensor of floats. Floats should be converted using the following function:: def py_make_float_array(cst): return numpy.array([cst], dtype=numpy.float32) The function replaces empty contexts by default values which covers many :epkg:`numpy` functions. The tutorial :ref:`l-onnx-tutorial` gives an example of how it can be used on a more complex function. """ def compile_code(name, code, context=None): """ Compiles a python function with the given context. @param name function name @param code python code @param context context used at compilation @return compiled function """ if context is None: context = {} try: obj = compile(code, '', 'exec') except SyntaxError as e: raise SyntaxError(f'Unable to compile\n{code}') from e context_g = context.copy() context_l = context.copy() exec(obj, context_g, context_l) return context_l[name] if isinstance(fct, str): code = fct elif callable(fct): code = inspect.getsource(fct) else: raise TypeError(f'Unable to guess code from type {type(fct)}.') node = ast.parse(dedent(code)) v = CodeNodeVisitor() v.visit(node) if context is None: context = get_default_context() onnx_code = v.export(context=context, output_names=output_names) if not cpl: return onnx_code if verbose > 0 and fLOG is not None: fLOG('[translate_fct2onnx] python code') fLOG(code) fLOG('[translate_fct2onnx] ONNX code') fLOG(onnx_code) if context_cpl is None: context_cpl = get_default_context_cpl() if 'numpy' not in context_cpl: context_cpl = context_cpl.copy() context_cpl['numpy'] = numpy return compile_code(fct.__name__, onnx_code, context_cpl) <|reserved_special_token_1|> """ @file @brief One class which visits a syntax tree. """ import inspect import ast from textwrap import dedent import numpy from scipy.spatial.distance import squareform, pdist from .node_visitor_translator import CodeNodeVisitor def py_make_float_array(cst, op_version=None): """ Creates an array with a single element from a constant. @param cst constant @param op_version unused @return array .. runpython:: :showcode: :warningout: DeprecationWarning from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array print(py_make_float_array(5.5)) """ return numpy.array([cst], dtype=numpy.float32) def py_pow(x, p, op_version=None): """ Function for python operator ``**``. @param x float @param p power @param op_version unused @return :math:`x^p` """ return x ** p def py_mul(*x, op_version=None): """ Function for python operator ``*``. @param x floats @param op_version unused @return `x*y` """ if len(x) == 2: return x[0] * x[1] p = x[0] for y in x[1:]: p *= y return p def py_opp(x, op_version=None): """ Function for python unary operator ``-``. @param x floats @param op_version unused @return `-x` """ return -x def squareform_pdist(X, metric='sqeuclidean', op_version=None): """ Replacements for `squareform <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_ and `pdist <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_. """ return squareform(pdist(X, metric=metric)) def get_default_context(): """ Returns a default context useful for most of the conversion from a function using :epkg:`numpy` into :epkg:`ONNX`. """ context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array, 'py_mul': py_mul, 'py_opp': py_opp, 'cdist': 'cdist', 'squareform_pdist': 'squareform_pdist'} allow = set(('abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divide' 'equal exp floor greater invert less log matmul maximum minimum mod' 'multiply power sign sin sinh sqrt square subtract tan tanh transpose').split()) for k, v in numpy.__dict__.items(): if k not in allow: continue context[f'numpy.{k}'] = v context[f'np.{k}'] = v return context def get_default_context_cpl(): """ Returns a default useful context to compile the converter returned by @see fn translate_fct2onnx. """ ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy} try: from skl2onnx.algebra.complex_functions import onnx_squareform_pdist # delayed from skl2onnx.algebra.complex_functions import onnx_cdist # delayed ctx['onnx_squareform_pdist'] = onnx_squareform_pdist ctx['onnx_cdist'] = onnx_cdist except ImportError: # pragma: no cover # Too old version for skl2onnx. pass from skl2onnx.algebra import onnx_ops # delayed from skl2onnx.algebra.onnx_operator import OnnxOperator # delayed d = onnx_ops.__dict__ for k, v in d.items(): try: if k.startswith("Onnx") and issubclass(v, OnnxOperator): ctx[k] = v except TypeError as e: if inspect.isfunction(v): continue raise RuntimeError( # pragma: no cover f"Issue with {k}={v} (type={type(v)})") from e return ctx def translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None, output_names=None, dtype=numpy.float32, verbose=0, fLOG=None): """ Translates a function into :epkg:`ONNX`. The code it produces is using classes *OnnxAbs*, *OnnxAdd*, ... @param fct function to convert @param context context of the function to convert something like ``{'numpy.transpose': numpy.transpose}``, if *context* is None, it receives a default value returnd by @see fn get_default_context @param cpl compile the function after it was created @param context_cpl context used at compiling time if *context_cpl* is None, it receives a default value returnd by @see fn get_default_context_cpl @param output_names names of the output in the :epkg:`ONNX` graph @param dtype :epkg:`numpy` float type used to produce the model @param verbose integer, display more information @param fLOG logging function @return code or compiled code .. exref:: :title: Convert a function into ONNX code The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx2.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z onnx_code = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}) print(onnx_code) Next example goes further and compile the outcome. .. exref:: :title: Convert a function into ONNX code and run The following code parses a python function and returns another python function which produces an :epkg:`ONNX` graph if executed. The example executes the function, creates an :epkg:`ONNX` then uses @see cl OnnxInference to compute *predictions*. Finally it compares them to the original. .. runpython:: :showcode: :warningout: DeprecationWarning :process: :store_in_file: fct2onnx3.py import numpy from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx from mlprodict.plotting.text_plot import onnx_simple_text_plot from mlprodict.onnxrt import OnnxInference from mlprodict.npy.xop import loadop OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop( 'Add', 'Transpose', 'Mul', 'Identity') ctx = {'OnnxAdd': OnnxAdd, 'OnnxTranspose': OnnxTranspose, 'OnnxMul': OnnxMul, 'OnnxIdentity': OnnxIdentity} def trs(x, y): z = x + numpy.transpose(y, axes=[1, 0]) return x * z inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32), 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T} original = trs(inputs['x'], inputs['y']) print('original output:', original) onnx_fct = translate_fct2onnx( trs, context={'numpy.transpose': numpy.transpose}, cpl=True, context_cpl=ctx, output_names=['Z']) onnx_code = onnx_fct('x', 'y', op_version=12) onnx_g = onnx_code.to_onnx(inputs, target_opset=12) print("ONNX model") print(onnx_simple_text_plot(onnx_g)) oinf = OnnxInference(onnx_g) res = oinf.run(inputs) print('-----------') print("ONNX inference:", res['Z']) The function to be converted may include python functions which must not be converted. In that case, their name must be prefixed by ``py_``. The execution of the function this one builds produces the following error:: TypeError: Parameter to MergeFrom() must be instance of same class: expected onnx.TensorProto got onnx.AttributeProto. It indicates that constants in the code marges multiple types, usually floats and tensor of floats. Floats should be converted using the following function:: def py_make_float_array(cst): return numpy.array([cst], dtype=numpy.float32) The function replaces empty contexts by default values which covers many :epkg:`numpy` functions. The tutorial :ref:`l-onnx-tutorial` gives an example of how it can be used on a more complex function. """ def compile_code(name, code, context=None): """ Compiles a python function with the given context. @param name function name @param code python code @param context context used at compilation @return compiled function """ if context is None: context = {} # pragma: no cover try: obj = compile(code, "", "exec") except SyntaxError as e: # pragma: no cover raise SyntaxError(f"Unable to compile\n{code}") from e context_g = context.copy() context_l = context.copy() exec(obj, context_g, context_l) # pylint: disable=W0122 return context_l[name] if isinstance(fct, str): code = fct elif callable(fct): code = inspect.getsource(fct) else: raise TypeError( # pragma: no cover f"Unable to guess code from type {type(fct)}.") node = ast.parse(dedent(code)) v = CodeNodeVisitor() v.visit(node) if context is None: context = get_default_context() onnx_code = v.export(context=context, output_names=output_names) if not cpl: return onnx_code if verbose > 0 and fLOG is not None: # pragma: no cover fLOG('[translate_fct2onnx] python code') fLOG(code) fLOG('[translate_fct2onnx] ONNX code') fLOG(onnx_code) if context_cpl is None: context_cpl = get_default_context_cpl() if 'numpy' not in context_cpl: context_cpl = context_cpl.copy() context_cpl['numpy'] = numpy return compile_code(fct.__name__, onnx_code, context_cpl)
flexible
{ "blob_id": "fdf6c28e65b50c52550a95c2d991b1eb3ec53a2f", "index": 3540, "step-1": "<mask token>\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\n<mask token>\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\n<mask token>\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n", "step-2": "<mask token>\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\n<mask token>\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n", "step-3": "<mask token>\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\n<mask token>\n\n\ndef get_default_context():\n \"\"\"\n Returns a default context useful for most of the conversion\n from a function using :epkg:`numpy` into :epkg:`ONNX`.\n \"\"\"\n context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,\n 'py_mul': py_mul, 'py_opp': py_opp, 'cdist': 'cdist',\n 'squareform_pdist': 'squareform_pdist'}\n allow = set(\n 'abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divideequal exp floor greater invert less log matmul maximum minimum modmultiply power sign sin sinh sqrt square subtract tan tanh transpose'\n .split())\n for k, v in numpy.__dict__.items():\n if k not in allow:\n continue\n context[f'numpy.{k}'] = v\n context[f'np.{k}'] = v\n return context\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n", "step-4": "<mask token>\nimport inspect\nimport ast\nfrom textwrap import dedent\nimport numpy\nfrom scipy.spatial.distance import squareform, pdist\nfrom .node_visitor_translator import CodeNodeVisitor\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\ndef squareform_pdist(X, metric='sqeuclidean', op_version=None):\n \"\"\"\n Replacements for `squareform\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_\n and `pdist\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_.\n \"\"\"\n return squareform(pdist(X, metric=metric))\n\n\ndef get_default_context():\n \"\"\"\n Returns a default context useful for most of the conversion\n from a function using :epkg:`numpy` into :epkg:`ONNX`.\n \"\"\"\n context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,\n 'py_mul': py_mul, 'py_opp': py_opp, 'cdist': 'cdist',\n 'squareform_pdist': 'squareform_pdist'}\n allow = set(\n 'abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divideequal exp floor greater invert less log matmul maximum minimum modmultiply power sign sin sinh sqrt square subtract tan tanh transpose'\n .split())\n for k, v in numpy.__dict__.items():\n if k not in allow:\n continue\n context[f'numpy.{k}'] = v\n context[f'np.{k}'] = v\n return context\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n", "step-5": "\"\"\"\n@file\n@brief One class which visits a syntax tree.\n\"\"\"\nimport inspect\nimport ast\nfrom textwrap import dedent\nimport numpy\nfrom scipy.spatial.distance import squareform, pdist\nfrom .node_visitor_translator import CodeNodeVisitor\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\ndef squareform_pdist(X, metric='sqeuclidean', op_version=None):\n \"\"\"\n Replacements for `squareform\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_\n and `pdist\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_.\n \"\"\"\n return squareform(pdist(X, metric=metric))\n\n\ndef get_default_context():\n \"\"\"\n Returns a default context useful for most of the conversion\n from a function using :epkg:`numpy` into :epkg:`ONNX`.\n \"\"\"\n context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,\n 'py_mul': py_mul, 'py_opp': py_opp,\n 'cdist': 'cdist', 'squareform_pdist': 'squareform_pdist'}\n allow = set(('abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divide'\n 'equal exp floor greater invert less log matmul maximum minimum mod'\n 'multiply power sign sin sinh sqrt square subtract tan tanh transpose').split())\n for k, v in numpy.__dict__.items():\n if k not in allow:\n continue\n context[f'numpy.{k}'] = v\n context[f'np.{k}'] = v\n return context\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array,\n 'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp,\n 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist # delayed\n from skl2onnx.algebra.complex_functions import onnx_cdist # delayed\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError: # pragma: no cover\n # Too old version for skl2onnx.\n pass\n\n from skl2onnx.algebra import onnx_ops # delayed\n from skl2onnx.algebra.onnx_operator import OnnxOperator # delayed\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith(\"Onnx\") and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError( # pragma: no cover\n f\"Issue with {k}={v} (type={type(v)})\") from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False,\n context_cpl=None, output_names=None,\n dtype=numpy.float32,\n verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {} # pragma: no cover\n try:\n obj = compile(code, \"\", \"exec\")\n except SyntaxError as e: # pragma: no cover\n raise SyntaxError(f\"Unable to compile\\n{code}\") from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l) # pylint: disable=W0122\n return context_l[name]\n\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError( # pragma: no cover\n f\"Unable to guess code from type {type(fct)}.\")\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context,\n output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None: # pragma: no cover\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n", "step-ids": [ 5, 6, 7, 9, 10 ] }
[ 5, 6, 7, 9, 10 ]
<|reserved_special_token_0|> def get_gpu_name(): try: out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'], stdout=subprocess.PIPE).stdout out_list = out_str.decode('utf-8').split('\n') out_list = out_list[1:-1] return out_list except Exception as e: print(e) def read_batch(src): """Unpack the pickle files """ with open(src, 'rb') as f: if sys.version_info.major == 2: data = pickle.load(f) else: data = pickle.load(f, encoding='latin1') return data def shuffle_data(X, y): s = np.arange(len(X)) np.random.shuffle(s) X = X[s] y = y[s] return X, y <|reserved_special_token_0|> def download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'): """Load the training and testing data """ print('Downloading ' + src) fname, h = urlretrieve(src, './delete.me') print('Done.') try: print('Extracting files...') with np.load(fname) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] print('Done.') finally: os.remove(fname) return x_train, x_test, y_train, y_test <|reserved_special_token_0|> def imdb_for_library(seq_len=100, max_features=20000, one_hot=False): """ Replicates same pre-processing as: https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py I'm not sure if we want to load another version of IMDB that has got words, but if it does have words we would still convert to index in this backend script that is not meant for others to see ... But I'm worried this obfuscates the data a bit? """ START_CHAR = 1 OOV_CHAR = 2 INDEX_FROM = 3 x_train, x_test, y_train, y_test = download_imdb() idx = len(x_train) _xs = np.concatenate([x_train, x_test]) _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs] if max_features: print('Trimming to {} max-features'.format(max_features)) _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs] print('Padding to length {}'.format(seq_len)) xs = np.zeros((len(_xs), seq_len), dtype=np.int) for o_idx, obs in enumerate(_xs): obs = obs[-seq_len:] for i_idx in range(len(obs)): if i_idx < seq_len: xs[o_idx][i_idx] = obs[i_idx] if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() x_train = np.array(xs[:idx]).astype(np.int32) x_test = np.array(xs[idx:]).astype(np.int32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test <|reserved_special_token_1|> <|reserved_special_token_0|> def get_gpu_name(): try: out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'], stdout=subprocess.PIPE).stdout out_list = out_str.decode('utf-8').split('\n') out_list = out_list[1:-1] return out_list except Exception as e: print(e) def read_batch(src): """Unpack the pickle files """ with open(src, 'rb') as f: if sys.version_info.major == 2: data = pickle.load(f) else: data = pickle.load(f, encoding='latin1') return data def shuffle_data(X, y): s = np.arange(len(X)) np.random.shuffle(s) X = X[s] y = y[s] return X, y <|reserved_special_token_0|> def download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'): """Load the training and testing data """ print('Downloading ' + src) fname, h = urlretrieve(src, './delete.me') print('Done.') try: print('Extracting files...') with np.load(fname) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] print('Done.') finally: os.remove(fname) return x_train, x_test, y_train, y_test def cifar_for_library(download_dir, channel_first=True, one_hot=False): x_train, x_test, y_train, y_test = download_cifar(download_dir) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(-1, 3, 32, 32) x_test = x_test.reshape(-1, 3, 32, 32) if not channel_first: x_train = np.swapaxes(x_train, 1, 3) x_test = np.swapaxes(x_test, 1, 3) if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() x_train = x_train.astype(np.float32) x_test = x_test.astype(np.float32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test def imdb_for_library(seq_len=100, max_features=20000, one_hot=False): """ Replicates same pre-processing as: https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py I'm not sure if we want to load another version of IMDB that has got words, but if it does have words we would still convert to index in this backend script that is not meant for others to see ... But I'm worried this obfuscates the data a bit? """ START_CHAR = 1 OOV_CHAR = 2 INDEX_FROM = 3 x_train, x_test, y_train, y_test = download_imdb() idx = len(x_train) _xs = np.concatenate([x_train, x_test]) _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs] if max_features: print('Trimming to {} max-features'.format(max_features)) _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs] print('Padding to length {}'.format(seq_len)) xs = np.zeros((len(_xs), seq_len), dtype=np.int) for o_idx, obs in enumerate(_xs): obs = obs[-seq_len:] for i_idx in range(len(obs)): if i_idx < seq_len: xs[o_idx][i_idx] = obs[i_idx] if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() x_train = np.array(xs[:idx]).astype(np.int32) x_test = np.array(xs[idx:]).astype(np.int32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test <|reserved_special_token_1|> <|reserved_special_token_0|> def get_gpu_name(): try: out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'], stdout=subprocess.PIPE).stdout out_list = out_str.decode('utf-8').split('\n') out_list = out_list[1:-1] return out_list except Exception as e: print(e) def read_batch(src): """Unpack the pickle files """ with open(src, 'rb') as f: if sys.version_info.major == 2: data = pickle.load(f) else: data = pickle.load(f, encoding='latin1') return data def shuffle_data(X, y): s = np.arange(len(X)) np.random.shuffle(s) X = X[s] y = y[s] return X, y <|reserved_special_token_0|> def download_cifar(download_dir, src= 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'): """Load the training and testing data """ if not os.path.isfile('{}/cifar-10-python.tar.gz'.format(download_dir)): print('Downloading ' + src) fname, h = urlretrieve(src, '{}/cifar-10-python.tar.gz'.format( download_dir)) print('Done.') print('Extracting files...') with tarfile.open(fname) as tar: tar.extractall(download_dir) print('Done.') print('Preparing train set...') train_list = [read_batch('{0}/cifar-10-batches-py/data_batch_{1}'. format(download_dir, i + 1)) for i in range(5)] x_train = np.concatenate([t['data'] for t in train_list]) y_train = np.concatenate([t['labels'] for t in train_list]) print('Preparing test set...') tst = read_batch('{0}/cifar-10-batches-py/test_batch'.format(download_dir)) x_test = tst['data'] y_test = np.asarray(tst['labels']) print('Done.') return x_train, x_test, y_train, y_test def download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'): """Load the training and testing data """ print('Downloading ' + src) fname, h = urlretrieve(src, './delete.me') print('Done.') try: print('Extracting files...') with np.load(fname) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] print('Done.') finally: os.remove(fname) return x_train, x_test, y_train, y_test def cifar_for_library(download_dir, channel_first=True, one_hot=False): x_train, x_test, y_train, y_test = download_cifar(download_dir) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(-1, 3, 32, 32) x_test = x_test.reshape(-1, 3, 32, 32) if not channel_first: x_train = np.swapaxes(x_train, 1, 3) x_test = np.swapaxes(x_test, 1, 3) if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() x_train = x_train.astype(np.float32) x_test = x_test.astype(np.float32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test def imdb_for_library(seq_len=100, max_features=20000, one_hot=False): """ Replicates same pre-processing as: https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py I'm not sure if we want to load another version of IMDB that has got words, but if it does have words we would still convert to index in this backend script that is not meant for others to see ... But I'm worried this obfuscates the data a bit? """ START_CHAR = 1 OOV_CHAR = 2 INDEX_FROM = 3 x_train, x_test, y_train, y_test = download_imdb() idx = len(x_train) _xs = np.concatenate([x_train, x_test]) _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs] if max_features: print('Trimming to {} max-features'.format(max_features)) _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs] print('Padding to length {}'.format(seq_len)) xs = np.zeros((len(_xs), seq_len), dtype=np.int) for o_idx, obs in enumerate(_xs): obs = obs[-seq_len:] for i_idx in range(len(obs)): if i_idx < seq_len: xs[o_idx][i_idx] = obs[i_idx] if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() x_train = np.array(xs[:idx]).astype(np.int32) x_test = np.array(xs[idx:]).astype(np.int32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test <|reserved_special_token_1|> <|reserved_special_token_0|> def get_gpu_name(): try: out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'], stdout=subprocess.PIPE).stdout out_list = out_str.decode('utf-8').split('\n') out_list = out_list[1:-1] return out_list except Exception as e: print(e) def read_batch(src): """Unpack the pickle files """ with open(src, 'rb') as f: if sys.version_info.major == 2: data = pickle.load(f) else: data = pickle.load(f, encoding='latin1') return data def shuffle_data(X, y): s = np.arange(len(X)) np.random.shuffle(s) X = X[s] y = y[s] return X, y def yield_mb(X, y, batchsize=64, shuffle=False): assert len(X) == len(y) if shuffle: X, y = shuffle_data(X, y) for i in range(len(X) // batchsize): yield X[i * batchsize:(i + 1) * batchsize], y[i * batchsize:(i + 1) * batchsize] def download_cifar(download_dir, src= 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'): """Load the training and testing data """ if not os.path.isfile('{}/cifar-10-python.tar.gz'.format(download_dir)): print('Downloading ' + src) fname, h = urlretrieve(src, '{}/cifar-10-python.tar.gz'.format( download_dir)) print('Done.') print('Extracting files...') with tarfile.open(fname) as tar: tar.extractall(download_dir) print('Done.') print('Preparing train set...') train_list = [read_batch('{0}/cifar-10-batches-py/data_batch_{1}'. format(download_dir, i + 1)) for i in range(5)] x_train = np.concatenate([t['data'] for t in train_list]) y_train = np.concatenate([t['labels'] for t in train_list]) print('Preparing test set...') tst = read_batch('{0}/cifar-10-batches-py/test_batch'.format(download_dir)) x_test = tst['data'] y_test = np.asarray(tst['labels']) print('Done.') return x_train, x_test, y_train, y_test def download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'): """Load the training and testing data """ print('Downloading ' + src) fname, h = urlretrieve(src, './delete.me') print('Done.') try: print('Extracting files...') with np.load(fname) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] print('Done.') finally: os.remove(fname) return x_train, x_test, y_train, y_test def cifar_for_library(download_dir, channel_first=True, one_hot=False): x_train, x_test, y_train, y_test = download_cifar(download_dir) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(-1, 3, 32, 32) x_test = x_test.reshape(-1, 3, 32, 32) if not channel_first: x_train = np.swapaxes(x_train, 1, 3) x_test = np.swapaxes(x_test, 1, 3) if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() x_train = x_train.astype(np.float32) x_test = x_test.astype(np.float32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test def imdb_for_library(seq_len=100, max_features=20000, one_hot=False): """ Replicates same pre-processing as: https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py I'm not sure if we want to load another version of IMDB that has got words, but if it does have words we would still convert to index in this backend script that is not meant for others to see ... But I'm worried this obfuscates the data a bit? """ START_CHAR = 1 OOV_CHAR = 2 INDEX_FROM = 3 x_train, x_test, y_train, y_test = download_imdb() idx = len(x_train) _xs = np.concatenate([x_train, x_test]) _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs] if max_features: print('Trimming to {} max-features'.format(max_features)) _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs] print('Padding to length {}'.format(seq_len)) xs = np.zeros((len(_xs), seq_len), dtype=np.int) for o_idx, obs in enumerate(_xs): obs = obs[-seq_len:] for i_idx in range(len(obs)): if i_idx < seq_len: xs[o_idx][i_idx] = obs[i_idx] if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() x_train = np.array(xs[:idx]).astype(np.int32) x_test = np.array(xs[idx:]).astype(np.int32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test <|reserved_special_token_1|> from sklearn.datasets import fetch_mldata from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split import numpy as np import os import tarfile import pickle import subprocess import sys if sys.version_info.major == 2: # Backward compatibility with python 2. from six.moves import urllib urlretrieve = urllib.request.urlretrieve else: from urllib.request import urlretrieve def get_gpu_name(): try: out_str = subprocess.run(["nvidia-smi", "--query-gpu=gpu_name", "--format=csv"], stdout=subprocess.PIPE).stdout out_list = out_str.decode("utf-8").split('\n') out_list = out_list[1:-1] return out_list except Exception as e: print(e) def read_batch(src): '''Unpack the pickle files ''' with open(src, 'rb') as f: if sys.version_info.major == 2: data = pickle.load(f) else: data = pickle.load(f, encoding='latin1') return data def shuffle_data(X, y): s = np.arange(len(X)) np.random.shuffle(s) X = X[s] y = y[s] return X, y def yield_mb(X, y, batchsize=64, shuffle=False): assert len(X) == len(y) if shuffle: X, y = shuffle_data(X, y) # Only complete batches are submitted for i in range(len(X)//batchsize): yield X[i*batchsize:(i+1)*batchsize], y[i*batchsize:(i+1)*batchsize] def download_cifar(download_dir, src="http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"): '''Load the training and testing data ''' if not os.path.isfile("{}/cifar-10-python.tar.gz".format(download_dir)): print ('Downloading ' + src) fname, h = urlretrieve(src, '{}/cifar-10-python.tar.gz'.format(download_dir)) print ('Done.') print ('Extracting files...') with tarfile.open(fname) as tar: tar.extractall(download_dir) print ('Done.') print ('Preparing train set...') train_list = [read_batch('{0}/cifar-10-batches-py/data_batch_{1}'.format(download_dir, i + 1)) for i in range(5)] x_train = np.concatenate([t['data'] for t in train_list]) y_train = np.concatenate([t['labels'] for t in train_list]) print ('Preparing test set...') tst = read_batch('{0}/cifar-10-batches-py/test_batch'.format(download_dir)) x_test = tst['data'] y_test = np.asarray(tst['labels']) print ('Done.') return x_train, x_test, y_train, y_test def download_imdb(src="https://s3.amazonaws.com/text-datasets/imdb.npz"): '''Load the training and testing data ''' # FLAG: should we host this on azure? print ('Downloading ' + src) fname, h = urlretrieve(src, './delete.me') print ('Done.') try: print ('Extracting files...') with np.load(fname) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] print ('Done.') finally: os.remove(fname) return x_train, x_test, y_train, y_test def cifar_for_library(download_dir, channel_first=True, one_hot=False): # Raw data x_train, x_test, y_train, y_test = download_cifar(download_dir) # Scale pixel intensity x_train = x_train/255.0 x_test = x_test/255.0 # Reshape x_train = x_train.reshape(-1, 3, 32, 32) x_test = x_test.reshape(-1, 3, 32, 32) # Channel last if not channel_first: x_train = np.swapaxes(x_train, 1, 3) x_test = np.swapaxes(x_test, 1, 3) # One-hot encode y if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() # dtypes x_train = x_train.astype(np.float32) x_test = x_test.astype(np.float32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test def imdb_for_library(seq_len=100, max_features=20000, one_hot=False): ''' Replicates same pre-processing as: https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py I'm not sure if we want to load another version of IMDB that has got words, but if it does have words we would still convert to index in this backend script that is not meant for others to see ... But I'm worried this obfuscates the data a bit? ''' # 0 (padding), 1 (start), 2 (OOV) START_CHAR=1 OOV_CHAR=2 INDEX_FROM=3 # Raw data (has been encoded into words already) x_train, x_test, y_train, y_test = download_imdb() # Combine for processing idx = len(x_train) _xs = np.concatenate([x_train, x_test]) # Words will start from INDEX_FROM (shift by 3) _xs = [[START_CHAR] + [w + INDEX_FROM for w in x] for x in _xs] # Max-features - replace words bigger than index with oov_char # E.g. if max_features = 5 then keep 0, 1, 2, 3, 4 i.e. words 3 and 4 if max_features: print("Trimming to {} max-features".format(max_features)) _xs = [[w if (w < max_features) else OOV_CHAR for w in x] for x in _xs] # Pad to same sequences print("Padding to length {}".format(seq_len)) xs = np.zeros((len(_xs), seq_len), dtype=np.int) for o_idx, obs in enumerate(_xs): # Match keras pre-processing of taking last elements obs = obs[-seq_len:] for i_idx in range(len(obs)): if i_idx < seq_len: xs[o_idx][i_idx] = obs[i_idx] # One-hot if one_hot: y_train = np.expand_dims(y_train, axis=-1) y_test = np.expand_dims(y_test, axis=-1) enc = OneHotEncoder(categorical_features='all') fit = enc.fit(y_train) y_train = fit.transform(y_train).toarray() y_test = fit.transform(y_test).toarray() # dtypes x_train = np.array(xs[:idx]).astype(np.int32) x_test = np.array(xs[idx:]).astype(np.int32) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return x_train, x_test, y_train, y_test
flexible
{ "blob_id": "6eec95932ef445ba588f200233495f59c4d77aac", "index": 5396, "step-1": "<mask token>\n\n\ndef get_gpu_name():\n try:\n out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name',\n '--format=csv'], stdout=subprocess.PIPE).stdout\n out_list = out_str.decode('utf-8').split('\\n')\n out_list = out_list[1:-1]\n return out_list\n except Exception as e:\n print(e)\n\n\ndef read_batch(src):\n \"\"\"Unpack the pickle files\n \"\"\"\n with open(src, 'rb') as f:\n if sys.version_info.major == 2:\n data = pickle.load(f)\n else:\n data = pickle.load(f, encoding='latin1')\n return data\n\n\ndef shuffle_data(X, y):\n s = np.arange(len(X))\n np.random.shuffle(s)\n X = X[s]\n y = y[s]\n return X, y\n\n\n<mask token>\n\n\ndef download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'):\n \"\"\"Load the training and testing data\n \"\"\"\n print('Downloading ' + src)\n fname, h = urlretrieve(src, './delete.me')\n print('Done.')\n try:\n print('Extracting files...')\n with np.load(fname) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n print('Done.')\n finally:\n os.remove(fname)\n return x_train, x_test, y_train, y_test\n\n\n<mask token>\n\n\ndef imdb_for_library(seq_len=100, max_features=20000, one_hot=False):\n \"\"\" Replicates same pre-processing as:\n https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py\n \n I'm not sure if we want to load another version of IMDB that has got \n words, but if it does have words we would still convert to index in this \n backend script that is not meant for others to see ... \n \n But I'm worried this obfuscates the data a bit?\n \"\"\"\n START_CHAR = 1\n OOV_CHAR = 2\n INDEX_FROM = 3\n x_train, x_test, y_train, y_test = download_imdb()\n idx = len(x_train)\n _xs = np.concatenate([x_train, x_test])\n _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs]\n if max_features:\n print('Trimming to {} max-features'.format(max_features))\n _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs]\n print('Padding to length {}'.format(seq_len))\n xs = np.zeros((len(_xs), seq_len), dtype=np.int)\n for o_idx, obs in enumerate(_xs):\n obs = obs[-seq_len:]\n for i_idx in range(len(obs)):\n if i_idx < seq_len:\n xs[o_idx][i_idx] = obs[i_idx]\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n x_train = np.array(xs[:idx]).astype(np.int32)\n x_test = np.array(xs[idx:]).astype(np.int32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n", "step-2": "<mask token>\n\n\ndef get_gpu_name():\n try:\n out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name',\n '--format=csv'], stdout=subprocess.PIPE).stdout\n out_list = out_str.decode('utf-8').split('\\n')\n out_list = out_list[1:-1]\n return out_list\n except Exception as e:\n print(e)\n\n\ndef read_batch(src):\n \"\"\"Unpack the pickle files\n \"\"\"\n with open(src, 'rb') as f:\n if sys.version_info.major == 2:\n data = pickle.load(f)\n else:\n data = pickle.load(f, encoding='latin1')\n return data\n\n\ndef shuffle_data(X, y):\n s = np.arange(len(X))\n np.random.shuffle(s)\n X = X[s]\n y = y[s]\n return X, y\n\n\n<mask token>\n\n\ndef download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'):\n \"\"\"Load the training and testing data\n \"\"\"\n print('Downloading ' + src)\n fname, h = urlretrieve(src, './delete.me')\n print('Done.')\n try:\n print('Extracting files...')\n with np.load(fname) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n print('Done.')\n finally:\n os.remove(fname)\n return x_train, x_test, y_train, y_test\n\n\ndef cifar_for_library(download_dir, channel_first=True, one_hot=False):\n x_train, x_test, y_train, y_test = download_cifar(download_dir)\n x_train = x_train / 255.0\n x_test = x_test / 255.0\n x_train = x_train.reshape(-1, 3, 32, 32)\n x_test = x_test.reshape(-1, 3, 32, 32)\n if not channel_first:\n x_train = np.swapaxes(x_train, 1, 3)\n x_test = np.swapaxes(x_test, 1, 3)\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n x_train = x_train.astype(np.float32)\n x_test = x_test.astype(np.float32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n\n\ndef imdb_for_library(seq_len=100, max_features=20000, one_hot=False):\n \"\"\" Replicates same pre-processing as:\n https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py\n \n I'm not sure if we want to load another version of IMDB that has got \n words, but if it does have words we would still convert to index in this \n backend script that is not meant for others to see ... \n \n But I'm worried this obfuscates the data a bit?\n \"\"\"\n START_CHAR = 1\n OOV_CHAR = 2\n INDEX_FROM = 3\n x_train, x_test, y_train, y_test = download_imdb()\n idx = len(x_train)\n _xs = np.concatenate([x_train, x_test])\n _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs]\n if max_features:\n print('Trimming to {} max-features'.format(max_features))\n _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs]\n print('Padding to length {}'.format(seq_len))\n xs = np.zeros((len(_xs), seq_len), dtype=np.int)\n for o_idx, obs in enumerate(_xs):\n obs = obs[-seq_len:]\n for i_idx in range(len(obs)):\n if i_idx < seq_len:\n xs[o_idx][i_idx] = obs[i_idx]\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n x_train = np.array(xs[:idx]).astype(np.int32)\n x_test = np.array(xs[idx:]).astype(np.int32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n", "step-3": "<mask token>\n\n\ndef get_gpu_name():\n try:\n out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name',\n '--format=csv'], stdout=subprocess.PIPE).stdout\n out_list = out_str.decode('utf-8').split('\\n')\n out_list = out_list[1:-1]\n return out_list\n except Exception as e:\n print(e)\n\n\ndef read_batch(src):\n \"\"\"Unpack the pickle files\n \"\"\"\n with open(src, 'rb') as f:\n if sys.version_info.major == 2:\n data = pickle.load(f)\n else:\n data = pickle.load(f, encoding='latin1')\n return data\n\n\ndef shuffle_data(X, y):\n s = np.arange(len(X))\n np.random.shuffle(s)\n X = X[s]\n y = y[s]\n return X, y\n\n\n<mask token>\n\n\ndef download_cifar(download_dir, src=\n 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'):\n \"\"\"Load the training and testing data\n \"\"\"\n if not os.path.isfile('{}/cifar-10-python.tar.gz'.format(download_dir)):\n print('Downloading ' + src)\n fname, h = urlretrieve(src, '{}/cifar-10-python.tar.gz'.format(\n download_dir))\n print('Done.')\n print('Extracting files...')\n with tarfile.open(fname) as tar:\n tar.extractall(download_dir)\n print('Done.')\n print('Preparing train set...')\n train_list = [read_batch('{0}/cifar-10-batches-py/data_batch_{1}'.\n format(download_dir, i + 1)) for i in range(5)]\n x_train = np.concatenate([t['data'] for t in train_list])\n y_train = np.concatenate([t['labels'] for t in train_list])\n print('Preparing test set...')\n tst = read_batch('{0}/cifar-10-batches-py/test_batch'.format(download_dir))\n x_test = tst['data']\n y_test = np.asarray(tst['labels'])\n print('Done.')\n return x_train, x_test, y_train, y_test\n\n\ndef download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'):\n \"\"\"Load the training and testing data\n \"\"\"\n print('Downloading ' + src)\n fname, h = urlretrieve(src, './delete.me')\n print('Done.')\n try:\n print('Extracting files...')\n with np.load(fname) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n print('Done.')\n finally:\n os.remove(fname)\n return x_train, x_test, y_train, y_test\n\n\ndef cifar_for_library(download_dir, channel_first=True, one_hot=False):\n x_train, x_test, y_train, y_test = download_cifar(download_dir)\n x_train = x_train / 255.0\n x_test = x_test / 255.0\n x_train = x_train.reshape(-1, 3, 32, 32)\n x_test = x_test.reshape(-1, 3, 32, 32)\n if not channel_first:\n x_train = np.swapaxes(x_train, 1, 3)\n x_test = np.swapaxes(x_test, 1, 3)\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n x_train = x_train.astype(np.float32)\n x_test = x_test.astype(np.float32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n\n\ndef imdb_for_library(seq_len=100, max_features=20000, one_hot=False):\n \"\"\" Replicates same pre-processing as:\n https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py\n \n I'm not sure if we want to load another version of IMDB that has got \n words, but if it does have words we would still convert to index in this \n backend script that is not meant for others to see ... \n \n But I'm worried this obfuscates the data a bit?\n \"\"\"\n START_CHAR = 1\n OOV_CHAR = 2\n INDEX_FROM = 3\n x_train, x_test, y_train, y_test = download_imdb()\n idx = len(x_train)\n _xs = np.concatenate([x_train, x_test])\n _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs]\n if max_features:\n print('Trimming to {} max-features'.format(max_features))\n _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs]\n print('Padding to length {}'.format(seq_len))\n xs = np.zeros((len(_xs), seq_len), dtype=np.int)\n for o_idx, obs in enumerate(_xs):\n obs = obs[-seq_len:]\n for i_idx in range(len(obs)):\n if i_idx < seq_len:\n xs[o_idx][i_idx] = obs[i_idx]\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n x_train = np.array(xs[:idx]).astype(np.int32)\n x_test = np.array(xs[idx:]).astype(np.int32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n", "step-4": "<mask token>\n\n\ndef get_gpu_name():\n try:\n out_str = subprocess.run(['nvidia-smi', '--query-gpu=gpu_name',\n '--format=csv'], stdout=subprocess.PIPE).stdout\n out_list = out_str.decode('utf-8').split('\\n')\n out_list = out_list[1:-1]\n return out_list\n except Exception as e:\n print(e)\n\n\ndef read_batch(src):\n \"\"\"Unpack the pickle files\n \"\"\"\n with open(src, 'rb') as f:\n if sys.version_info.major == 2:\n data = pickle.load(f)\n else:\n data = pickle.load(f, encoding='latin1')\n return data\n\n\ndef shuffle_data(X, y):\n s = np.arange(len(X))\n np.random.shuffle(s)\n X = X[s]\n y = y[s]\n return X, y\n\n\ndef yield_mb(X, y, batchsize=64, shuffle=False):\n assert len(X) == len(y)\n if shuffle:\n X, y = shuffle_data(X, y)\n for i in range(len(X) // batchsize):\n yield X[i * batchsize:(i + 1) * batchsize], y[i * batchsize:(i + 1) *\n batchsize]\n\n\ndef download_cifar(download_dir, src=\n 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'):\n \"\"\"Load the training and testing data\n \"\"\"\n if not os.path.isfile('{}/cifar-10-python.tar.gz'.format(download_dir)):\n print('Downloading ' + src)\n fname, h = urlretrieve(src, '{}/cifar-10-python.tar.gz'.format(\n download_dir))\n print('Done.')\n print('Extracting files...')\n with tarfile.open(fname) as tar:\n tar.extractall(download_dir)\n print('Done.')\n print('Preparing train set...')\n train_list = [read_batch('{0}/cifar-10-batches-py/data_batch_{1}'.\n format(download_dir, i + 1)) for i in range(5)]\n x_train = np.concatenate([t['data'] for t in train_list])\n y_train = np.concatenate([t['labels'] for t in train_list])\n print('Preparing test set...')\n tst = read_batch('{0}/cifar-10-batches-py/test_batch'.format(download_dir))\n x_test = tst['data']\n y_test = np.asarray(tst['labels'])\n print('Done.')\n return x_train, x_test, y_train, y_test\n\n\ndef download_imdb(src='https://s3.amazonaws.com/text-datasets/imdb.npz'):\n \"\"\"Load the training and testing data\n \"\"\"\n print('Downloading ' + src)\n fname, h = urlretrieve(src, './delete.me')\n print('Done.')\n try:\n print('Extracting files...')\n with np.load(fname) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n print('Done.')\n finally:\n os.remove(fname)\n return x_train, x_test, y_train, y_test\n\n\ndef cifar_for_library(download_dir, channel_first=True, one_hot=False):\n x_train, x_test, y_train, y_test = download_cifar(download_dir)\n x_train = x_train / 255.0\n x_test = x_test / 255.0\n x_train = x_train.reshape(-1, 3, 32, 32)\n x_test = x_test.reshape(-1, 3, 32, 32)\n if not channel_first:\n x_train = np.swapaxes(x_train, 1, 3)\n x_test = np.swapaxes(x_test, 1, 3)\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n x_train = x_train.astype(np.float32)\n x_test = x_test.astype(np.float32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n\n\ndef imdb_for_library(seq_len=100, max_features=20000, one_hot=False):\n \"\"\" Replicates same pre-processing as:\n https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py\n \n I'm not sure if we want to load another version of IMDB that has got \n words, but if it does have words we would still convert to index in this \n backend script that is not meant for others to see ... \n \n But I'm worried this obfuscates the data a bit?\n \"\"\"\n START_CHAR = 1\n OOV_CHAR = 2\n INDEX_FROM = 3\n x_train, x_test, y_train, y_test = download_imdb()\n idx = len(x_train)\n _xs = np.concatenate([x_train, x_test])\n _xs = [([START_CHAR] + [(w + INDEX_FROM) for w in x]) for x in _xs]\n if max_features:\n print('Trimming to {} max-features'.format(max_features))\n _xs = [[(w if w < max_features else OOV_CHAR) for w in x] for x in _xs]\n print('Padding to length {}'.format(seq_len))\n xs = np.zeros((len(_xs), seq_len), dtype=np.int)\n for o_idx, obs in enumerate(_xs):\n obs = obs[-seq_len:]\n for i_idx in range(len(obs)):\n if i_idx < seq_len:\n xs[o_idx][i_idx] = obs[i_idx]\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n x_train = np.array(xs[:idx]).astype(np.int32)\n x_test = np.array(xs[idx:]).astype(np.int32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n", "step-5": "from sklearn.datasets import fetch_mldata\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\n\nimport numpy as np\nimport os\nimport tarfile\nimport pickle\nimport subprocess\nimport sys\nif sys.version_info.major == 2:\n # Backward compatibility with python 2.\n from six.moves import urllib\n urlretrieve = urllib.request.urlretrieve\nelse:\n from urllib.request import urlretrieve\n\ndef get_gpu_name():\n try:\n out_str = subprocess.run([\"nvidia-smi\", \"--query-gpu=gpu_name\", \"--format=csv\"], stdout=subprocess.PIPE).stdout\n out_list = out_str.decode(\"utf-8\").split('\\n')\n out_list = out_list[1:-1]\n return out_list\n except Exception as e:\n print(e)\n \ndef read_batch(src):\n '''Unpack the pickle files\n '''\n with open(src, 'rb') as f:\n if sys.version_info.major == 2:\n data = pickle.load(f)\n else:\n data = pickle.load(f, encoding='latin1')\n return data\n\ndef shuffle_data(X, y):\n s = np.arange(len(X))\n np.random.shuffle(s)\n X = X[s]\n y = y[s]\n return X, y\n\ndef yield_mb(X, y, batchsize=64, shuffle=False):\n assert len(X) == len(y)\n if shuffle:\n X, y = shuffle_data(X, y)\n # Only complete batches are submitted\n for i in range(len(X)//batchsize):\n yield X[i*batchsize:(i+1)*batchsize], y[i*batchsize:(i+1)*batchsize]\n\ndef download_cifar(download_dir, src=\"http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"):\n '''Load the training and testing data\n '''\n\n if not os.path.isfile(\"{}/cifar-10-python.tar.gz\".format(download_dir)):\n print ('Downloading ' + src)\n fname, h = urlretrieve(src, '{}/cifar-10-python.tar.gz'.format(download_dir))\n print ('Done.')\n\n print ('Extracting files...')\n with tarfile.open(fname) as tar:\n tar.extractall(download_dir)\n print ('Done.')\n \n print ('Preparing train set...')\n train_list = [read_batch('{0}/cifar-10-batches-py/data_batch_{1}'.format(download_dir, i + 1)) for i in range(5)]\n x_train = np.concatenate([t['data'] for t in train_list])\n y_train = np.concatenate([t['labels'] for t in train_list])\n print ('Preparing test set...')\n tst = read_batch('{0}/cifar-10-batches-py/test_batch'.format(download_dir))\n x_test = tst['data']\n y_test = np.asarray(tst['labels'])\n print ('Done.')\n \n return x_train, x_test, y_train, y_test\n\ndef download_imdb(src=\"https://s3.amazonaws.com/text-datasets/imdb.npz\"):\n '''Load the training and testing data\n '''\n # FLAG: should we host this on azure?\n print ('Downloading ' + src)\n fname, h = urlretrieve(src, './delete.me')\n print ('Done.')\n try:\n print ('Extracting files...')\n with np.load(fname) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n print ('Done.')\n finally:\n os.remove(fname)\n return x_train, x_test, y_train, y_test\n\ndef cifar_for_library(download_dir, channel_first=True, one_hot=False): \n # Raw data\n x_train, x_test, y_train, y_test = download_cifar(download_dir)\n # Scale pixel intensity\n x_train = x_train/255.0\n x_test = x_test/255.0\n # Reshape\n x_train = x_train.reshape(-1, 3, 32, 32)\n x_test = x_test.reshape(-1, 3, 32, 32) \n # Channel last\n if not channel_first:\n x_train = np.swapaxes(x_train, 1, 3)\n x_test = np.swapaxes(x_test, 1, 3)\n # One-hot encode y\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n # dtypes\n x_train = x_train.astype(np.float32)\n x_test = x_test.astype(np.float32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n \ndef imdb_for_library(seq_len=100, max_features=20000, one_hot=False):\n ''' Replicates same pre-processing as:\n https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py\n \n I'm not sure if we want to load another version of IMDB that has got \n words, but if it does have words we would still convert to index in this \n backend script that is not meant for others to see ... \n \n But I'm worried this obfuscates the data a bit?\n '''\n # 0 (padding), 1 (start), 2 (OOV)\n START_CHAR=1\n OOV_CHAR=2\n INDEX_FROM=3\n # Raw data (has been encoded into words already)\n x_train, x_test, y_train, y_test = download_imdb()\n # Combine for processing\n idx = len(x_train)\n _xs = np.concatenate([x_train, x_test])\n # Words will start from INDEX_FROM (shift by 3)\n _xs = [[START_CHAR] + [w + INDEX_FROM for w in x] for x in _xs]\n # Max-features - replace words bigger than index with oov_char\n # E.g. if max_features = 5 then keep 0, 1, 2, 3, 4 i.e. words 3 and 4\n if max_features:\n print(\"Trimming to {} max-features\".format(max_features))\n _xs = [[w if (w < max_features) else OOV_CHAR for w in x] for x in _xs] \n # Pad to same sequences\n print(\"Padding to length {}\".format(seq_len))\n xs = np.zeros((len(_xs), seq_len), dtype=np.int)\n for o_idx, obs in enumerate(_xs): \n # Match keras pre-processing of taking last elements\n obs = obs[-seq_len:]\n for i_idx in range(len(obs)):\n if i_idx < seq_len:\n xs[o_idx][i_idx] = obs[i_idx]\n # One-hot\n if one_hot:\n y_train = np.expand_dims(y_train, axis=-1)\n y_test = np.expand_dims(y_test, axis=-1)\n enc = OneHotEncoder(categorical_features='all')\n fit = enc.fit(y_train)\n y_train = fit.transform(y_train).toarray()\n y_test = fit.transform(y_test).toarray()\n # dtypes\n x_train = np.array(xs[:idx]).astype(np.int32)\n x_test = np.array(xs[idx:]).astype(np.int32)\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n return x_train, x_test, y_train, y_test\n", "step-ids": [ 5, 6, 7, 8, 11 ] }
[ 5, 6, 7, 8, 11 ]
# Cutting a Rod | DP-13 # Difficulty Level : Medium # Last Updated : 13 Nov, 2020 # Given a rod of length n inches and an array of prices that contains prices of all pieces of size smaller than n. Determine the maximum value obtainable by cutting up the rod and selling the pieces. For example, if length of the rod is 8 and the values of different pieces are given as following, then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6) # length | 1 2 3 4 5 6 7 8 # -------------------------------------------- # price | 1 5 8 9 10 17 17 20 # And if the prices are as following, then the maximum obtainable value is 24 (by cutting in eight pieces of length 1) # length | 1 2 3 4 5 6 7 8 # -------------------------------------------- # price | 3 5 8 9 10 17 17 20 import numpy as np def cut_rod(price, n): if n <= 0: return 0 max_val = -1 val = 0 for i in range(0, n): val = price[i] + cut_rod(price, n - i - 1) if max_val < val: max_val = val # print("i:", i, "n:", n, "max_val:", max_val) return max_val def cut_rod2(price, n): val = [0 for x in range(n+1)] val[0] = 0 for i in range(1, n+1): max_val = -1 for j in range(i): max_val = max(max_val, price[j] + val[i-j-1]) # print("i:", i, "j:", j, "max_val:", max_val, "val:", val) val[i] = max_val # print("i:", i, "val:", val) return val[n] # Driver code arr = [1, 5, 8, 9, 10, 17, 17, 20] arr1 = [3, 5, 8, 9, 10, 17, 17, 20] arr2 = [5, 5, 8, 9, 10, 17, 17, 20] size = len(arr) # print("Maximum Obtainable Value is", cut_rod(arr1, size)) # print("Maximum Obtainable Value is", cut_rod2(arr1, size)) print("Maximum Obtainable Value is", cut_rod2([2, 5, 7, 3, 9], 5)) def rodCut(price, n): if n <= 0: return 0 max_val = -1 # val = 0 for i in range(n): # val = price[i] + rodCut(price, n-1-i) max_val = max(max_val, price[i] + rodCut(price, n-1-i)) return max_val # print("Maximum Obtainable Value is", rodCut(arr1, size))
normal
{ "blob_id": "9cca73ebdf2b05fe29c14dc63ec1b1a7c917b085", "index": 6508, "step-1": "<mask token>\n\n\ndef cut_rod2(price, n):\n val = [(0) for x in range(n + 1)]\n val[0] = 0\n for i in range(1, n + 1):\n max_val = -1\n for j in range(i):\n max_val = max(max_val, price[j] + val[i - j - 1])\n val[i] = max_val\n return val[n]\n\n\n<mask token>\n\n\ndef rodCut(price, n):\n if n <= 0:\n return 0\n max_val = -1\n for i in range(n):\n max_val = max(max_val, price[i] + rodCut(price, n - 1 - i))\n return max_val\n", "step-2": "<mask token>\n\n\ndef cut_rod(price, n):\n if n <= 0:\n return 0\n max_val = -1\n val = 0\n for i in range(0, n):\n val = price[i] + cut_rod(price, n - i - 1)\n if max_val < val:\n max_val = val\n return max_val\n\n\ndef cut_rod2(price, n):\n val = [(0) for x in range(n + 1)]\n val[0] = 0\n for i in range(1, n + 1):\n max_val = -1\n for j in range(i):\n max_val = max(max_val, price[j] + val[i - j - 1])\n val[i] = max_val\n return val[n]\n\n\n<mask token>\n\n\ndef rodCut(price, n):\n if n <= 0:\n return 0\n max_val = -1\n for i in range(n):\n max_val = max(max_val, price[i] + rodCut(price, n - 1 - i))\n return max_val\n", "step-3": "<mask token>\n\n\ndef cut_rod(price, n):\n if n <= 0:\n return 0\n max_val = -1\n val = 0\n for i in range(0, n):\n val = price[i] + cut_rod(price, n - i - 1)\n if max_val < val:\n max_val = val\n return max_val\n\n\ndef cut_rod2(price, n):\n val = [(0) for x in range(n + 1)]\n val[0] = 0\n for i in range(1, n + 1):\n max_val = -1\n for j in range(i):\n max_val = max(max_val, price[j] + val[i - j - 1])\n val[i] = max_val\n return val[n]\n\n\n<mask token>\nprint('Maximum Obtainable Value is', cut_rod2([2, 5, 7, 3, 9], 5))\n\n\ndef rodCut(price, n):\n if n <= 0:\n return 0\n max_val = -1\n for i in range(n):\n max_val = max(max_val, price[i] + rodCut(price, n - 1 - i))\n return max_val\n", "step-4": "<mask token>\n\n\ndef cut_rod(price, n):\n if n <= 0:\n return 0\n max_val = -1\n val = 0\n for i in range(0, n):\n val = price[i] + cut_rod(price, n - i - 1)\n if max_val < val:\n max_val = val\n return max_val\n\n\ndef cut_rod2(price, n):\n val = [(0) for x in range(n + 1)]\n val[0] = 0\n for i in range(1, n + 1):\n max_val = -1\n for j in range(i):\n max_val = max(max_val, price[j] + val[i - j - 1])\n val[i] = max_val\n return val[n]\n\n\narr = [1, 5, 8, 9, 10, 17, 17, 20]\narr1 = [3, 5, 8, 9, 10, 17, 17, 20]\narr2 = [5, 5, 8, 9, 10, 17, 17, 20]\nsize = len(arr)\nprint('Maximum Obtainable Value is', cut_rod2([2, 5, 7, 3, 9], 5))\n\n\ndef rodCut(price, n):\n if n <= 0:\n return 0\n max_val = -1\n for i in range(n):\n max_val = max(max_val, price[i] + rodCut(price, n - 1 - i))\n return max_val\n", "step-5": "# Cutting a Rod | DP-13\n# Difficulty Level : Medium\n# Last Updated : 13 Nov, 2020\n\n# Given a rod of length n inches and an array of prices that contains prices of all pieces of size smaller than n. Determine the maximum value obtainable by cutting up the rod and selling the pieces. For example, if length of the rod is 8 and the values of different pieces are given as following, then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6)\n\n# length | 1 2 3 4 5 6 7 8\n# --------------------------------------------\n# price | 1 5 8 9 10 17 17 20\n# And if the prices are as following, then the maximum obtainable value is 24 (by cutting in eight pieces of length 1)\n\n# length | 1 2 3 4 5 6 7 8\n# --------------------------------------------\n# price | 3 5 8 9 10 17 17 20\n\nimport numpy as np\n\n\ndef cut_rod(price, n):\n if n <= 0:\n return 0\n max_val = -1\n\n val = 0\n for i in range(0, n):\n val = price[i] + cut_rod(price, n - i - 1)\n if max_val < val:\n max_val = val\n # print(\"i:\", i, \"n:\", n, \"max_val:\", max_val)\n return max_val\n\n\ndef cut_rod2(price, n):\n val = [0 for x in range(n+1)]\n val[0] = 0\n\n for i in range(1, n+1):\n max_val = -1\n for j in range(i):\n max_val = max(max_val, price[j] + val[i-j-1])\n # print(\"i:\", i, \"j:\", j, \"max_val:\", max_val, \"val:\", val)\n val[i] = max_val\n # print(\"i:\", i, \"val:\", val)\n\n return val[n]\n\n\n# Driver code\narr = [1, 5, 8, 9, 10, 17, 17, 20]\narr1 = [3, 5, 8, 9, 10, 17, 17, 20]\narr2 = [5, 5, 8, 9, 10, 17, 17, 20]\nsize = len(arr)\n# print(\"Maximum Obtainable Value is\", cut_rod(arr1, size))\n# print(\"Maximum Obtainable Value is\", cut_rod2(arr1, size))\nprint(\"Maximum Obtainable Value is\", cut_rod2([2, 5, 7, 3, 9], 5))\n\n\ndef rodCut(price, n):\n if n <= 0:\n return 0\n max_val = -1\n\n # val = 0\n for i in range(n):\n # val = price[i] + rodCut(price, n-1-i)\n max_val = max(max_val, price[i] + rodCut(price, n-1-i))\n\n return max_val\n\n\n# print(\"Maximum Obtainable Value is\", rodCut(arr1, size))\n", "step-ids": [ 2, 3, 4, 5, 7 ] }
[ 2, 3, 4, 5, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('profiles', '0018_userprofile_membership_fee_pending')] operations = [migrations.RenameField(model_name='userprofile', old_name ='membership_fee_pending', new_name='membership_fee_paid')] <|reserved_special_token_1|> from django.db import migrations class Migration(migrations.Migration): dependencies = [('profiles', '0018_userprofile_membership_fee_pending')] operations = [migrations.RenameField(model_name='userprofile', old_name ='membership_fee_pending', new_name='membership_fee_paid')] <|reserved_special_token_1|> # Generated by Django 3.2.3 on 2021-07-24 12:14 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('profiles', '0018_userprofile_membership_fee_pending'), ] operations = [ migrations.RenameField( model_name='userprofile', old_name='membership_fee_pending', new_name='membership_fee_paid', ), ]
flexible
{ "blob_id": "464980a2f17aeedfa08548d6c4e247f8c047e2cb", "index": 5743, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('profiles', '0018_userprofile_membership_fee_pending')]\n operations = [migrations.RenameField(model_name='userprofile', old_name\n ='membership_fee_pending', new_name='membership_fee_paid')]\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('profiles', '0018_userprofile_membership_fee_pending')]\n operations = [migrations.RenameField(model_name='userprofile', old_name\n ='membership_fee_pending', new_name='membership_fee_paid')]\n", "step-5": "# Generated by Django 3.2.3 on 2021-07-24 12:14\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0018_userprofile_membership_fee_pending'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='userprofile',\n old_name='membership_fee_pending',\n new_name='membership_fee_paid',\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
COPY_GOOGLE_DOC_KEY = '1CdafeVmmtNa_PMV99TapPHvLUVzYz0xkvHcpINQtQ6c' DEPLOY_SLUG = 'al-qassemi' NUM_SLIDES_AFTER_CONTENT = 2 # Configuration AUDIO = True VIDEO = False FILMSTRIP = False PROGRESS_BAR = False
normal
{ "blob_id": "f398b724fc28bc25ddb8baf492f34075db0c1f61", "index": 7703, "step-1": "<mask token>\n", "step-2": "COPY_GOOGLE_DOC_KEY = '1CdafeVmmtNa_PMV99TapPHvLUVzYz0xkvHcpINQtQ6c'\nDEPLOY_SLUG = 'al-qassemi'\nNUM_SLIDES_AFTER_CONTENT = 2\nAUDIO = True\nVIDEO = False\nFILMSTRIP = False\nPROGRESS_BAR = False\n", "step-3": "COPY_GOOGLE_DOC_KEY = '1CdafeVmmtNa_PMV99TapPHvLUVzYz0xkvHcpINQtQ6c'\nDEPLOY_SLUG = 'al-qassemi'\nNUM_SLIDES_AFTER_CONTENT = 2\n\n# Configuration\nAUDIO = True\nVIDEO = False\nFILMSTRIP = False\nPROGRESS_BAR = False", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(max((y - x + 9) // 10, 0)) <|reserved_special_token_1|> x, y = map(int, input().split()) print(max((y - x + 9) // 10, 0))
flexible
{ "blob_id": "c9f3e956d4016846c8efe0382b79882559d6ce64", "index": 1488, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(max((y - x + 9) // 10, 0))\n", "step-3": "x, y = map(int, input().split())\nprint(max((y - x + 9) // 10, 0))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- import config import web import hashlib import sys db = web.database(dbn="mysql", db=config.db, user=config.user, pw=config.passwd) def signIn(user, pw): pwhash = hashlib.md5(pw).hexdigest() uid = db.insert("users", uname=user, passwd=pwhash) return uid # def select(): # db.select(, ) def main(): if len(sys.argv) > 1: user = sys.argv[1] pw = sys.argv[2] signIn(user, pw) if __name__ == "__main__": main() r = db.select("users") for i in r: print i.uname # conn = MySQLdb.connect(host=config.host, user=config.user, passwd=config.passwd, # db=config.db, port=config.port, charset=config.charset) # conn
normal
{ "blob_id": "6d032df195854703f36dce7d27524c8f5089c04d", "index": 2334, "step-1": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport config\r\nimport web\r\nimport hashlib\r\nimport sys\r\n\r\n\r\ndb = web.database(dbn=\"mysql\", db=config.db, user=config.user, pw=config.passwd)\r\n\r\ndef signIn(user, pw):\r\n pwhash = hashlib.md5(pw).hexdigest()\r\n uid = db.insert(\"users\", uname=user, passwd=pwhash)\r\n return uid\r\n\r\n# def select():\r\n# db.select(, )\r\n\r\ndef main():\r\n if len(sys.argv) > 1:\r\n user = sys.argv[1]\r\n pw = sys.argv[2]\r\n signIn(user, pw)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n r = db.select(\"users\")\r\n for i in r:\r\n print i.uname\r\n # conn = MySQLdb.connect(host=config.host, user=config.user, passwd=config.passwd,\r\n # db=config.db, port=config.port, charset=config.charset)\r\n # conn\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# The sort() method sorts the list ascending by default. #syntax # list.sort(reverse=True|False, key=myFunc) # Parameter Description # reverse Optional. reverse=True will sort the list descending. Default is reverse=False # key Optional. A function to specify the sorting criteria(s) cars = ['Ford', 'BMW','ea','Volvo'] cars.sort() print(cars) print() cars.sort(reverse = True) print(cars) print() # Sort the list by the length of the values: def length(data): return len(data) cars.sort(key= length) print(cars) print() cars.sort(key= lambda x : x[1]) print(cars) #each item in the iterable is passed into the function individually # Sort a list of dictionaries based on the "year" value of the dictionaries: cars = [ {'car': 'Ford', 'year': 2005}, {'car': 'Mitsubishi', 'year': 2000}, {'car': 'BMW', 'year': 2019}, {'car': 'VW', 'year': 2011} ] def year(data): return data['year'] cars.sort(key=year) print(cars) print() # Sort the list by the length of the values and reversed: cars = ['Ford', 'Mitsubishi', 'BMW', 'VW'] def length_of_cars(car): return len(car) cars.sort(reverse= True, key= length_of_cars) print(cars)
normal
{ "blob_id": "5ab8d9eab30d72557f1a85b5b82c0df456e3843d", "index": 1740, "step-1": "<mask token>\n\n\ndef length_of_cars(car):\n return len(car)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef length(data):\n return len(data)\n\n\n<mask token>\n\n\ndef year(data):\n return data['year']\n\n\n<mask token>\n\n\ndef length_of_cars(car):\n return len(car)\n\n\n<mask token>\n", "step-3": "<mask token>\ncars.sort()\nprint(cars)\nprint()\ncars.sort(reverse=True)\nprint(cars)\nprint()\n\n\ndef length(data):\n return len(data)\n\n\ncars.sort(key=length)\nprint(cars)\nprint()\ncars.sort(key=lambda x: x[1])\nprint(cars)\n<mask token>\n\n\ndef year(data):\n return data['year']\n\n\ncars.sort(key=year)\nprint(cars)\nprint()\n<mask token>\n\n\ndef length_of_cars(car):\n return len(car)\n\n\ncars.sort(reverse=True, key=length_of_cars)\nprint(cars)\n", "step-4": "cars = ['Ford', 'BMW', 'ea', 'Volvo']\ncars.sort()\nprint(cars)\nprint()\ncars.sort(reverse=True)\nprint(cars)\nprint()\n\n\ndef length(data):\n return len(data)\n\n\ncars.sort(key=length)\nprint(cars)\nprint()\ncars.sort(key=lambda x: x[1])\nprint(cars)\ncars = [{'car': 'Ford', 'year': 2005}, {'car': 'Mitsubishi', 'year': 2000},\n {'car': 'BMW', 'year': 2019}, {'car': 'VW', 'year': 2011}]\n\n\ndef year(data):\n return data['year']\n\n\ncars.sort(key=year)\nprint(cars)\nprint()\ncars = ['Ford', 'Mitsubishi', 'BMW', 'VW']\n\n\ndef length_of_cars(car):\n return len(car)\n\n\ncars.sort(reverse=True, key=length_of_cars)\nprint(cars)\n", "step-5": "# The sort() method sorts the list ascending by default.\n\n\n#syntax\n# list.sort(reverse=True|False, key=myFunc)\n\n# Parameter\t Description\n# reverse\t Optional. reverse=True will sort the list descending. Default is reverse=False\n# key\t Optional. A function to specify the sorting criteria(s)\n\ncars = ['Ford', 'BMW','ea','Volvo']\ncars.sort()\nprint(cars)\nprint()\ncars.sort(reverse = True)\nprint(cars)\nprint()\n\n# Sort the list by the length of the values:\n\ndef length(data):\n return len(data)\n\ncars.sort(key= length)\nprint(cars)\n\nprint()\n\ncars.sort(key= lambda x : x[1])\nprint(cars)\n\n#each item in the iterable is passed into the function individually \n\n# Sort a list of dictionaries based on the \"year\" value of the dictionaries:\n\ncars = [\n {'car': 'Ford', 'year': 2005},\n {'car': 'Mitsubishi', 'year': 2000},\n {'car': 'BMW', 'year': 2019},\n {'car': 'VW', 'year': 2011}\n]\n\ndef year(data):\n return data['year']\n\ncars.sort(key=year)\nprint(cars)\nprint()\n\n# Sort the list by the length of the values and reversed:\n\ncars = ['Ford', 'Mitsubishi', 'BMW', 'VW']\n\ndef length_of_cars(car):\n return len(car)\n\ncars.sort(reverse= True, key= length_of_cars)\nprint(cars)\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [migrations.swappable_dependency(settings. AUTH_USER_MODEL), ('employee', '0013_auto_20210808_1242')] operations = [] <|reserved_special_token_1|> from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [migrations.swappable_dependency(settings. AUTH_USER_MODEL), ('employee', '0013_auto_20210808_1242')] operations = [] <|reserved_special_token_1|> # Generated by Django 3.2.4 on 2021-08-09 03:22 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('employee', '0013_auto_20210808_1242'), ] operations = [ ]
flexible
{ "blob_id": "f7a335db0ddf8a871e98eac54b59c41a40622153", "index": 4566, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('employee', '0013_auto_20210808_1242')]\n operations = []\n", "step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('employee', '0013_auto_20210808_1242')]\n operations = []\n", "step-5": "# Generated by Django 3.2.4 on 2021-08-09 03:22\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('employee', '0013_auto_20210808_1242'),\n ]\n\n operations = [\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def preprocess_img(img, size): img = np.rollaxis(img, 0, 3) img = skimage.transform.resize(img, size) img = skimage.color.rgb2gray(img) return img <|reserved_special_token_1|> import numpy as np import skimage def preprocess_img(img, size): img = np.rollaxis(img, 0, 3) img = skimage.transform.resize(img, size) img = skimage.color.rgb2gray(img) return img <|reserved_special_token_1|> import numpy as np import skimage def preprocess_img(img, size): img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3) img = skimage.transform.resize(img, size) img = skimage.color.rgb2gray(img) return img # data = minerl.data.make("MineRLNavigateDense-v0", data_dir="../dataset/navigate") # # # Iterate through a single epoch gathering sequences of at most 32 steps # for current_state, action, reward, next_state, done in data.sarsd_iter(num_epochs=1, max_sequence_len=32): # # Print the POV @ the first step of the sequence # print(current_state['pov'][0]) # # # Print the final reward pf the sequence! # print(reward[-1]) # # # Check if final (next_state) is terminal. # print(done[-1]) # # # ... do something with the data. # print("At the end of trajectories the length can be < max_sequence_len", len(reward))
flexible
{ "blob_id": "9706b9ba81f41b131c364a16bb17a0c1e31e3a04", "index": 6608, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef preprocess_img(img, size):\n img = np.rollaxis(img, 0, 3)\n img = skimage.transform.resize(img, size)\n img = skimage.color.rgb2gray(img)\n return img\n", "step-3": "import numpy as np\nimport skimage\n\n\ndef preprocess_img(img, size):\n img = np.rollaxis(img, 0, 3)\n img = skimage.transform.resize(img, size)\n img = skimage.color.rgb2gray(img)\n return img\n", "step-4": "import numpy as np\nimport skimage\n\n\ndef preprocess_img(img, size):\n img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)\n img = skimage.transform.resize(img, size)\n img = skimage.color.rgb2gray(img)\n\n return img\n\n# data = minerl.data.make(\"MineRLNavigateDense-v0\", data_dir=\"../dataset/navigate\")\n#\n# # Iterate through a single epoch gathering sequences of at most 32 steps\n# for current_state, action, reward, next_state, done in data.sarsd_iter(num_epochs=1, max_sequence_len=32):\n# # Print the POV @ the first step of the sequence\n# print(current_state['pov'][0])\n#\n# # Print the final reward pf the sequence!\n# print(reward[-1])\n#\n# # Check if final (next_state) is terminal.\n# print(done[-1])\n#\n# # ... do something with the data.\n# print(\"At the end of trajectories the length can be < max_sequence_len\", len(reward))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import re from pathlib import Path RAW_DUMP_XML = Path("raw_data/Wikipedia.xml") def count_regexp(): """Counts the occurences of the regular expressions you will write. """ # Here's an example regular expression that roughly matches a valid email address. # The ones you write below should be shorter than this email = re.compile("[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\.[a-zA-Z]{2,5}") ###### Write below ######### subheading = re.compile("\=\=+.*\=\=+") link_to_subheading = re.compile("\[\[[\w\'*\-*\:*\(*\)*\_*\s*]+[#][\s*\w\\'*\-*\:*\(*\)*\_*s*]+\|*") doi_citation = re.compile("\{\{[c][ite](?!{{).*[dD][oO][iI]\s*[:|,=\/]*\s*[0-9]+\.[0-9]+.*\}\}") ###### End of your work ######### patterns = { "emails": email, "subheadings": subheading, "links to subheadings": link_to_subheading, "citations with DOI numbers": doi_citation, } with open(RAW_DUMP_XML, encoding="utf-8") as f: dump_text = f.read() for name, pattern in patterns.items(): if pattern is None: continue matches = pattern.findall(dump_text) count = len(matches) example_matches = [matches[i * (count // 5)] for i in range(5)] print("Found {} occurences of {}".format(count, name)) print("Here are examples:") print("\n".join(example_matches)) print("\n") if __name__ == "__main__": count_regexp()
normal
{ "blob_id": "8a4269f2094fa8ab8f6a93e653183dafb141232e", "index": 5717, "step-1": "<mask token>\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n", "step-3": "<mask token>\nRAW_DUMP_XML = Path('raw_data/Wikipedia.xml')\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n", "step-4": "import re\nfrom pathlib import Path\nRAW_DUMP_XML = Path('raw_data/Wikipedia.xml')\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n", "step-5": "import re\r\nfrom pathlib import Path\r\n\r\nRAW_DUMP_XML = Path(\"raw_data/Wikipedia.xml\")\r\n\r\n\r\ndef count_regexp():\r\n \"\"\"Counts the occurences of the regular expressions you will write.\r\n \"\"\"\r\n # Here's an example regular expression that roughly matches a valid email address.\r\n # The ones you write below should be shorter than this\r\n email = re.compile(\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}\")\r\n\r\n ###### Write below #########\r\n subheading = re.compile(\"\\=\\=+.*\\=\\=+\")\r\n link_to_subheading = re.compile(\"\\[\\[[\\w\\'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*\")\r\n doi_citation = re.compile(\"\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}\")\r\n ###### End of your work #########\r\n\r\n patterns = {\r\n \"emails\": email,\r\n \"subheadings\": subheading,\r\n \"links to subheadings\": link_to_subheading,\r\n \"citations with DOI numbers\": doi_citation,\r\n }\r\n\r\n with open(RAW_DUMP_XML, encoding=\"utf-8\") as f:\r\n dump_text = f.read()\r\n for name, pattern in patterns.items():\r\n if pattern is None:\r\n continue\r\n matches = pattern.findall(dump_text)\r\n count = len(matches)\r\n\r\n example_matches = [matches[i * (count // 5)] for i in range(5)]\r\n\r\n print(\"Found {} occurences of {}\".format(count, name))\r\n print(\"Here are examples:\")\r\n print(\"\\n\".join(example_matches))\r\n print(\"\\n\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n count_regexp()\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> f.close() <|reserved_special_token_0|> for element in word_token: if element in token_dictionary: token_dictionary[element] += 1 else: token_dictionary[element] = 1 for key in sorted(token_dictionary.keys()): print('{} {}'.format(key, token_dictionary[key])) print('Tokens: ' + str(len(word_token))) print('Types: ' + str(len(token_dictionary))) <|reserved_special_token_1|> <|reserved_special_token_0|> f = open('q4text.txt') text = f.read() f.close() pattern = ( '[0-9]+[,][0-9]+|[0-9]+[.][0-9]+|[0-9]+|\\b[A-Z][a-z]+[.]|\\b[A-Za-z]+[\'][a-z]+|[A-Z.]+[A-Z]|\\b[A-Za-z-]+|[.]+|[.,\'"!?:;]' ) word_token = re.findall(pattern, text) token_dictionary = {} for element in word_token: if element in token_dictionary: token_dictionary[element] += 1 else: token_dictionary[element] = 1 for key in sorted(token_dictionary.keys()): print('{} {}'.format(key, token_dictionary[key])) print('Tokens: ' + str(len(word_token))) print('Types: ' + str(len(token_dictionary))) <|reserved_special_token_1|> import re f = open('q4text.txt') text = f.read() f.close() pattern = ( '[0-9]+[,][0-9]+|[0-9]+[.][0-9]+|[0-9]+|\\b[A-Z][a-z]+[.]|\\b[A-Za-z]+[\'][a-z]+|[A-Z.]+[A-Z]|\\b[A-Za-z-]+|[.]+|[.,\'"!?:;]' ) word_token = re.findall(pattern, text) token_dictionary = {} for element in word_token: if element in token_dictionary: token_dictionary[element] += 1 else: token_dictionary[element] = 1 for key in sorted(token_dictionary.keys()): print('{} {}'.format(key, token_dictionary[key])) print('Tokens: ' + str(len(word_token))) print('Types: ' + str(len(token_dictionary))) <|reserved_special_token_1|> import re f = open('q4text.txt') text = f.read() f.close() pattern = r'''[0-9]+[,][0-9]+|[0-9]+[.][0-9]+|[0-9]+|\b[A-Z][a-z]+[.]|\b[A-Za-z]+['][a-z]+|[A-Z.]+[A-Z]|\b[A-Za-z-]+|[.]+|[.,'"!?:;]''' word_token = re.findall(pattern, text) token_dictionary = {} for element in word_token: if element in token_dictionary: token_dictionary[element] += 1 else: token_dictionary[element] = 1 for key in sorted(token_dictionary.keys()): print("{} {}".format(key, token_dictionary[key])) print('Tokens: ' + str(len(word_token))) print('Types: ' + str(len(token_dictionary)))
flexible
{ "blob_id": "2e27302abbe239c1a6067a9eb52f5a857fff7dd2", "index": 1736, "step-1": "<mask token>\n", "step-2": "<mask token>\nf.close()\n<mask token>\nfor element in word_token:\n if element in token_dictionary:\n token_dictionary[element] += 1\n else:\n token_dictionary[element] = 1\nfor key in sorted(token_dictionary.keys()):\n print('{} {}'.format(key, token_dictionary[key]))\nprint('Tokens: ' + str(len(word_token)))\nprint('Types: ' + str(len(token_dictionary)))\n", "step-3": "<mask token>\nf = open('q4text.txt')\ntext = f.read()\nf.close()\npattern = (\n '[0-9]+[,][0-9]+|[0-9]+[.][0-9]+|[0-9]+|\\\\b[A-Z][a-z]+[.]|\\\\b[A-Za-z]+[\\'][a-z]+|[A-Z.]+[A-Z]|\\\\b[A-Za-z-]+|[.]+|[.,\\'\"!?:;]'\n )\nword_token = re.findall(pattern, text)\ntoken_dictionary = {}\nfor element in word_token:\n if element in token_dictionary:\n token_dictionary[element] += 1\n else:\n token_dictionary[element] = 1\nfor key in sorted(token_dictionary.keys()):\n print('{} {}'.format(key, token_dictionary[key]))\nprint('Tokens: ' + str(len(word_token)))\nprint('Types: ' + str(len(token_dictionary)))\n", "step-4": "import re\nf = open('q4text.txt')\ntext = f.read()\nf.close()\npattern = (\n '[0-9]+[,][0-9]+|[0-9]+[.][0-9]+|[0-9]+|\\\\b[A-Z][a-z]+[.]|\\\\b[A-Za-z]+[\\'][a-z]+|[A-Z.]+[A-Z]|\\\\b[A-Za-z-]+|[.]+|[.,\\'\"!?:;]'\n )\nword_token = re.findall(pattern, text)\ntoken_dictionary = {}\nfor element in word_token:\n if element in token_dictionary:\n token_dictionary[element] += 1\n else:\n token_dictionary[element] = 1\nfor key in sorted(token_dictionary.keys()):\n print('{} {}'.format(key, token_dictionary[key]))\nprint('Tokens: ' + str(len(word_token)))\nprint('Types: ' + str(len(token_dictionary)))\n", "step-5": "import re\n\nf = open('q4text.txt')\ntext = f.read()\nf.close()\npattern = r'''[0-9]+[,][0-9]+|[0-9]+[.][0-9]+|[0-9]+|\\b[A-Z][a-z]+[.]|\\b[A-Za-z]+['][a-z]+|[A-Z.]+[A-Z]|\\b[A-Za-z-]+|[.]+|[.,'\"!?:;]'''\n\nword_token = re.findall(pattern, text)\ntoken_dictionary = {}\n\nfor element in word_token:\n if element in token_dictionary:\n token_dictionary[element] += 1\n else:\n token_dictionary[element] = 1\n\nfor key in sorted(token_dictionary.keys()):\n print(\"{} {}\".format(key, token_dictionary[key]))\nprint('Tokens: ' + str(len(word_token)))\nprint('Types: ' + str(len(token_dictionary)))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def to_absolute_path(filepath): filepath = os.path.expanduser(filepath) return os.path.abspath(filepath) def is_valid_path(path): abs_path = to_absolute_path(path) if os.path.exists(abs_path) and os.path.isdir(abs_path): return True else: return False <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def to_absolute_path(filepath): filepath = os.path.expanduser(filepath) return os.path.abspath(filepath) def is_valid_path(path): abs_path = to_absolute_path(path) if os.path.exists(abs_path) and os.path.isdir(abs_path): return True else: return False <|reserved_special_token_0|> if query != '' and is_valid_path(new_path): wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle= new_path, arg=f'{new_path}|add') elif query.startswith('/') or query.startswith('~'): wf.setItem(title='Path does not exists, create?', subtitle=new_path, arg=f'{new_path}|create') else: wf.setItem(title=f'Enter {path_to_ask} path', subtitle= 'Type a directory path starting with / or ~', valid=False) wf.addItem() wf.write() <|reserved_special_token_1|> <|reserved_special_token_0|> def to_absolute_path(filepath): filepath = os.path.expanduser(filepath) return os.path.abspath(filepath) def is_valid_path(path): abs_path = to_absolute_path(path) if os.path.exists(abs_path) and os.path.isdir(abs_path): return True else: return False env_source = Tools.getEnv('source') env_target = Tools.getEnv('target') query = Tools.getArgv(1) path_to_ask = 'source' if env_source == '' else 'target' new_path = to_absolute_path(query) wf = Items() if query != '' and is_valid_path(new_path): wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle= new_path, arg=f'{new_path}|add') elif query.startswith('/') or query.startswith('~'): wf.setItem(title='Path does not exists, create?', subtitle=new_path, arg=f'{new_path}|create') else: wf.setItem(title=f'Enter {path_to_ask} path', subtitle= 'Type a directory path starting with / or ~', valid=False) wf.addItem() wf.write() <|reserved_special_token_1|> import os from Alfred3 import Items, Tools def to_absolute_path(filepath): filepath = os.path.expanduser(filepath) return os.path.abspath(filepath) def is_valid_path(path): abs_path = to_absolute_path(path) if os.path.exists(abs_path) and os.path.isdir(abs_path): return True else: return False env_source = Tools.getEnv('source') env_target = Tools.getEnv('target') query = Tools.getArgv(1) path_to_ask = 'source' if env_source == '' else 'target' new_path = to_absolute_path(query) wf = Items() if query != '' and is_valid_path(new_path): wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle= new_path, arg=f'{new_path}|add') elif query.startswith('/') or query.startswith('~'): wf.setItem(title='Path does not exists, create?', subtitle=new_path, arg=f'{new_path}|create') else: wf.setItem(title=f'Enter {path_to_ask} path', subtitle= 'Type a directory path starting with / or ~', valid=False) wf.addItem() wf.write() <|reserved_special_token_1|> #!/usr/bin/env python3 import os from Alfred3 import Items, Tools def to_absolute_path(filepath): filepath = os.path.expanduser(filepath) return os.path.abspath(filepath) def is_valid_path(path): abs_path = to_absolute_path(path) if os.path.exists(abs_path) and os.path.isdir(abs_path): return True else: return False env_source = Tools.getEnv("source") env_target = Tools.getEnv("target") query = Tools.getArgv(1) path_to_ask = "source" if env_source == "" else "target" new_path = to_absolute_path(query) wf = Items() if query != "" and is_valid_path(new_path): wf.setItem( title=f"Path exists, add as {path_to_ask} path?", subtitle=new_path, arg=f"{new_path}|add" ) elif query.startswith("/") or query.startswith("~"): wf.setItem( title="Path does not exists, create?", subtitle=new_path, arg=f"{new_path}|create" ) else: wf.setItem( title=f"Enter {path_to_ask} path", subtitle="Type a directory path starting with / or ~", valid=False ) wf.addItem() wf.write()
flexible
{ "blob_id": "1cf573863fca660cc1fec71ab64743e7a2dd74d8", "index": 1730, "step-1": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\n<mask token>\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n", "step-3": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv('source')\nenv_target = Tools.getEnv('target')\nquery = Tools.getArgv(1)\npath_to_ask = 'source' if env_source == '' else 'target'\nnew_path = to_absolute_path(query)\nwf = Items()\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n", "step-4": "import os\nfrom Alfred3 import Items, Tools\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv('source')\nenv_target = Tools.getEnv('target')\nquery = Tools.getArgv(1)\npath_to_ask = 'source' if env_source == '' else 'target'\nnew_path = to_absolute_path(query)\nwf = Items()\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n", "step-5": "#!/usr/bin/env python3\n\nimport os\n\nfrom Alfred3 import Items, Tools\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv(\"source\")\nenv_target = Tools.getEnv(\"target\")\nquery = Tools.getArgv(1)\n\npath_to_ask = \"source\" if env_source == \"\" else \"target\"\n\nnew_path = to_absolute_path(query)\n\n\nwf = Items()\n\nif query != \"\" and is_valid_path(new_path):\n wf.setItem(\n title=f\"Path exists, add as {path_to_ask} path?\",\n subtitle=new_path,\n arg=f\"{new_path}|add\"\n )\nelif query.startswith(\"/\") or query.startswith(\"~\"):\n wf.setItem(\n title=\"Path does not exists, create?\",\n subtitle=new_path,\n arg=f\"{new_path}|create\"\n )\nelse:\n wf.setItem(\n title=f\"Enter {path_to_ask} path\",\n subtitle=\"Type a directory path starting with / or ~\",\n valid=False\n )\nwf.addItem()\nwf.write()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> IMAGE_SIZE = 640, 480 <|reserved_special_token_1|> IMAGE_SIZE=(640, 480)
flexible
{ "blob_id": "af80cb4d4ce5c071efc39e85f89bb412cff6bf6e", "index": 4489, "step-1": "<mask token>\n", "step-2": "IMAGE_SIZE = 640, 480\n", "step-3": "IMAGE_SIZE=(640, 480)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
msg = "eduardo foi a feira" if 'feira' in msg: print('Sim, foi a feira') else: print('não ele não foi a feira')
normal
{ "blob_id": "2a83bc9157e2210da46e58c56fc0b7199856f4c0", "index": 6287, "step-1": "<mask token>\n", "step-2": "<mask token>\nif 'feira' in msg:\n print('Sim, foi a feira')\nelse:\n print('não ele não foi a feira')\n", "step-3": "msg = 'eduardo foi a feira'\nif 'feira' in msg:\n print('Sim, foi a feira')\nelse:\n print('não ele não foi a feira')\n", "step-4": "msg = \"eduardo foi a feira\"\n\nif 'feira' in msg:\n print('Sim, foi a feira')\nelse:\n print('não ele não foi a feira')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> log_info(f'Just initialized a bot named {bot.name}') log_ok(f'Bot is given cash: {bot.cash}') log_error('Nothing else to do ! :(') <|reserved_special_token_1|> <|reserved_special_token_0|> bot = TradeBot() log_info(f'Just initialized a bot named {bot.name}') log_ok(f'Bot is given cash: {bot.cash}') log_error('Nothing else to do ! :(') <|reserved_special_token_1|> <|reserved_special_token_0|> from src.CORE.class_TradeBot import TradeBot from src.util.logging import log_ok, log_info, log_error bot = TradeBot() log_info(f'Just initialized a bot named {bot.name}') log_ok(f'Bot is given cash: {bot.cash}') log_error('Nothing else to do ! :(') <|reserved_special_token_1|> ''' Author: Allen Chen This is an example of entry point to CORE. Pay close attention to the import syntax - they're relative to this repo. Don't try to run this by doing 'python3 main.py' under this directory. Try to add your Target in Makefile under the root dir, and call './run YOUR_TARGET_NAME' from root. ''' from src.CORE.class_TradeBot import TradeBot from src.util.logging import log_ok, log_info, log_error bot = TradeBot() log_info(f"Just initialized a bot named {bot.name}") log_ok(f"Bot is given cash: {bot.cash}") log_error("Nothing else to do ! :(")
flexible
{ "blob_id": "18eed41cbc419ecbb215f77235be99f15f86ea9a", "index": 7468, "step-1": "<mask token>\n", "step-2": "<mask token>\nlog_info(f'Just initialized a bot named {bot.name}')\nlog_ok(f'Bot is given cash: {bot.cash}')\nlog_error('Nothing else to do ! :(')\n", "step-3": "<mask token>\nbot = TradeBot()\nlog_info(f'Just initialized a bot named {bot.name}')\nlog_ok(f'Bot is given cash: {bot.cash}')\nlog_error('Nothing else to do ! :(')\n", "step-4": "<mask token>\nfrom src.CORE.class_TradeBot import TradeBot\nfrom src.util.logging import log_ok, log_info, log_error\nbot = TradeBot()\nlog_info(f'Just initialized a bot named {bot.name}')\nlog_ok(f'Bot is given cash: {bot.cash}')\nlog_error('Nothing else to do ! :(')\n", "step-5": "'''\nAuthor: Allen Chen\n\nThis is an example of entry point to CORE. Pay close attention to the import syntax - they're relative to this repo.\nDon't try to run this by doing 'python3 main.py' under this directory. Try to add your Target in Makefile under the root dir,\nand call './run YOUR_TARGET_NAME' from root. \n'''\n\nfrom src.CORE.class_TradeBot import TradeBot\nfrom src.util.logging import log_ok, log_info, log_error\n\n\nbot = TradeBot()\nlog_info(f\"Just initialized a bot named {bot.name}\")\n\nlog_ok(f\"Bot is given cash: {bot.cash}\")\n\nlog_error(\"Nothing else to do ! :(\")", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from sys import stdin Read = stdin.readline INF = int(1e9) n, m = map(int, Read().split()) graph = [[INF] * (n+1) for _ in range(n+1)] for i in range(1, n+1): for j in range(1, n+1): if i == j: graph[i][j] = 0 for _ in range(m): a, b = map(int, Read().split()) graph[a][b] = 1 for k in range(1, n+1): for i in range(1, n+1): for j in range(1, n+1): graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j]) result = 0 for i in range(1, n+1): count = 0 for j in range(1, n+1): if graph[i][j] != INF or graph[j][i] != INF: count += 1 if count == n: result += 1 print(result)
normal
{ "blob_id": "6ec39aa712c8abe610418e410883ff168d73126d", "index": 3292, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\n<mask token>\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n", "step-3": "<mask token>\nRead = stdin.readline\nINF = int(1000000000.0)\nn, m = map(int, Read().split())\ngraph = [([INF] * (n + 1)) for _ in range(n + 1)]\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\nresult = 0\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n", "step-4": "from sys import stdin\nRead = stdin.readline\nINF = int(1000000000.0)\nn, m = map(int, Read().split())\ngraph = [([INF] * (n + 1)) for _ in range(n + 1)]\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\nresult = 0\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n", "step-5": "from sys import stdin\nRead = stdin.readline\nINF = int(1e9)\n\nn, m = map(int, Read().split())\ngraph = [[INF] * (n+1) for _ in range(n+1)]\n\nfor i in range(1, n+1):\n for j in range(1, n+1):\n if i == j:\n graph[i][j] = 0\n\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\n\nfor k in range(1, n+1):\n for i in range(1, n+1):\n for j in range(1, n+1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\n\nresult = 0\nfor i in range(1, n+1):\n count = 0\n for j in range(1, n+1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n\n if count == n:\n result += 1\n\nprint(result)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def download_images(links, name): dir_name = name.replace(' ', '_') if not os.path.isdir(dir_name): os.mkdir(dir_name) for i, img_link in enumerate(links): img_path = os.path.join(dir_name, '{:06}.png'.format(i)) ulib.urlretrieve(img_link, img_path) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def find_links(name): name = name.replace(' ', '+') url_str = ( 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + '\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + '\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s') headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36', 'Content-Type': 'application/json'} url_str = url_str.format(name, 0) print(url_str) request = ulib.Request(url_str, None, headers) json_str = ulib.urlopen(request).read() json_str = json.loads(json_str) soup = Bsoup(json_str[1][1], 'lxml') soup_imgs = soup.find_all('img') img_links = [img['src'] for img in soup_imgs] return img_links def download_images(links, name): dir_name = name.replace(' ', '_') if not os.path.isdir(dir_name): os.mkdir(dir_name) for i, img_link in enumerate(links): img_path = os.path.join(dir_name, '{:06}.png'.format(i)) ulib.urlretrieve(img_link, img_path) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def find_links(name): name = name.replace(' ', '+') url_str = ( 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + '\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + '\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s') headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36', 'Content-Type': 'application/json'} url_str = url_str.format(name, 0) print(url_str) request = ulib.Request(url_str, None, headers) json_str = ulib.urlopen(request).read() json_str = json.loads(json_str) soup = Bsoup(json_str[1][1], 'lxml') soup_imgs = soup.find_all('img') img_links = [img['src'] for img in soup_imgs] return img_links def download_images(links, name): dir_name = name.replace(' ', '_') if not os.path.isdir(dir_name): os.mkdir(dir_name) for i, img_link in enumerate(links): img_path = os.path.join(dir_name, '{:06}.png'.format(i)) ulib.urlretrieve(img_link, img_path) if __name__ == '__main__': search_str = 'yoyo' links = find_links(search_str) download_images(links, search_str) print('downloding images.... done!!!') <|reserved_special_token_1|> import os import urllib.request as ulib import json from bs4 import BeautifulSoup as Bsoup def find_links(name): name = name.replace(' ', '+') url_str = ( 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + '\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + '\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s') headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36', 'Content-Type': 'application/json'} url_str = url_str.format(name, 0) print(url_str) request = ulib.Request(url_str, None, headers) json_str = ulib.urlopen(request).read() json_str = json.loads(json_str) soup = Bsoup(json_str[1][1], 'lxml') soup_imgs = soup.find_all('img') img_links = [img['src'] for img in soup_imgs] return img_links def download_images(links, name): dir_name = name.replace(' ', '_') if not os.path.isdir(dir_name): os.mkdir(dir_name) for i, img_link in enumerate(links): img_path = os.path.join(dir_name, '{:06}.png'.format(i)) ulib.urlretrieve(img_link, img_path) if __name__ == '__main__': search_str = 'yoyo' links = find_links(search_str) download_images(links, search_str) print('downloding images.... done!!!') <|reserved_special_token_1|> import os import urllib.request as ulib import json from bs4 import BeautifulSoup as Bsoup def find_links(name): name = name.replace(" ", "+") url_str = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + \ '\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + \ '\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + \ '\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s' headers = {"User-Agent": "Chrome/65.0.3325.162 Safari/537.36", "Content-Type": "application/json"} url_str = url_str.format(name, 0) print(url_str) request = ulib.Request(url_str, None, headers) json_str = ulib.urlopen(request).read() json_str = json.loads(json_str) soup = Bsoup(json_str[1][1], 'lxml') soup_imgs = soup.find_all("img") img_links = [img["src"] for img in soup_imgs] return img_links def download_images(links, name): dir_name = name.replace(" ", "_") if not os.path.isdir(dir_name): os.mkdir(dir_name) for i, img_link in enumerate(links): img_path = os.path.join(dir_name, "{:06}.png".format(i)) ulib.urlretrieve(img_link, img_path) if __name__ == "__main__": search_str = "yoyo" links = find_links(search_str) download_images(links, search_str) print("downloding images.... done!!!")
flexible
{ "blob_id": "02ffdd1c03cc20883eddc691fc841022b4ff40fd", "index": 1601, "step-1": "<mask token>\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef find_links(name):\n name = name.replace(' ', '+')\n url_str = (\n 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +\n '\\\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'\n +\n '\\\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'\n + '\\\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')\n headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',\n 'Content-Type': 'application/json'}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all('img')\n img_links = [img['src'] for img in soup_imgs]\n return img_links\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef find_links(name):\n name = name.replace(' ', '+')\n url_str = (\n 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +\n '\\\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'\n +\n '\\\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'\n + '\\\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')\n headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',\n 'Content-Type': 'application/json'}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all('img')\n img_links = [img['src'] for img in soup_imgs]\n return img_links\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\nif __name__ == '__main__':\n search_str = 'yoyo'\n links = find_links(search_str)\n download_images(links, search_str)\n print('downloding images.... done!!!')\n", "step-4": "import os\nimport urllib.request as ulib\nimport json\nfrom bs4 import BeautifulSoup as Bsoup\n\n\ndef find_links(name):\n name = name.replace(' ', '+')\n url_str = (\n 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +\n '\\\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'\n +\n '\\\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'\n + '\\\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')\n headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',\n 'Content-Type': 'application/json'}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all('img')\n img_links = [img['src'] for img in soup_imgs]\n return img_links\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\nif __name__ == '__main__':\n search_str = 'yoyo'\n links = find_links(search_str)\n download_images(links, search_str)\n print('downloding images.... done!!!')\n", "step-5": "import os\nimport urllib.request as ulib\nimport json\nfrom bs4 import BeautifulSoup as Bsoup\n\n\ndef find_links(name):\n name = name.replace(\" \", \"+\")\n\n url_str = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + \\\n '\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + \\\n '\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + \\\n '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s'\n\n headers = {\"User-Agent\": \"Chrome/65.0.3325.162 Safari/537.36\", \"Content-Type\": \"application/json\"}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all(\"img\")\n img_links = [img[\"src\"] for img in soup_imgs]\n return img_links\n\ndef download_images(links, name):\n dir_name = name.replace(\" \", \"_\")\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, \"{:06}.png\".format(i))\n ulib.urlretrieve(img_link, img_path)\n\nif __name__ == \"__main__\":\n\n search_str = \"yoyo\"\n links = find_links(search_str)\n download_images(links, search_str)\n\n print(\"downloding images.... done!!!\")", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> os.chdir( '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL' ) for f in os.listdir(): file_name, file_ext = os.path.splitext(f) file_name = file_name.replace('_DEL', '') os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg')) <|reserved_special_token_1|> <|reserved_special_token_0|> path2 = ( '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL' ) os.chdir( '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL' ) for f in os.listdir(): file_name, file_ext = os.path.splitext(f) file_name = file_name.replace('_DEL', '') os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg')) <|reserved_special_token_1|> <|reserved_special_token_0|> import os path2 = ( '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL' ) os.chdir( '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL' ) for f in os.listdir(): file_name, file_ext = os.path.splitext(f) file_name = file_name.replace('_DEL', '') os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg')) <|reserved_special_token_1|> #################################################################################### # About # Date: April 12, 2018 # Notes ''' Code that renames a list of files in a directory MUST Run in Python 3 environment! jpeg Drop extra number at the end of unique ID add DEL or INS based on variant type ''' ''' Resources --------- https://gist.github.com/seanh/93666 https://www.youtube.com/watch?v=ve2pmm5JqmI https://www.youtube.com/watch?v=WQVisBzJGLw ''' #################################################################################### import os path2 = '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL' os.chdir('/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL') for f in os.listdir(): file_name, file_ext = os.path.splitext(f) file_name = file_name.replace('_DEL', '') # file_name = file_name.replace(' 1', '') # file_name = file_name.replace(' 2', '') # file_name = file_name.replace(' 3', '') # file_name = file_name.replace(' 4', '') # file_name = file_name.replace(' 5', '') # file_name = file_name.replace(' 6', '') # file_name = file_name.replace(' 7', '') # file_name = file_name.replace(' 8', '') # file_name = file_name.replace(' 9', '') # file_name = file_name.replace(' 10', '') # file_name = file_name.replace(' 11', '') # file_name = file_name.replace(' 12', '') os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg'))
flexible
{ "blob_id": "d483314fa7e8a2514fd5089b872b9e480e7454f4", "index": 8116, "step-1": "<mask token>\n", "step-2": "<mask token>\nos.chdir(\n '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL'\n )\nfor f in os.listdir():\n file_name, file_ext = os.path.splitext(f)\n file_name = file_name.replace('_DEL', '')\n os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg'))\n", "step-3": "<mask token>\npath2 = (\n '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL'\n )\nos.chdir(\n '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL'\n )\nfor f in os.listdir():\n file_name, file_ext = os.path.splitext(f)\n file_name = file_name.replace('_DEL', '')\n os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg'))\n", "step-4": "<mask token>\nimport os\npath2 = (\n '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL'\n )\nos.chdir(\n '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL'\n )\nfor f in os.listdir():\n file_name, file_ext = os.path.splitext(f)\n file_name = file_name.replace('_DEL', '')\n os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg'))\n", "step-5": "####################################################################################\n# About\n# Date: April 12, 2018\n# Notes\n'''\nCode that renames a list of files in a directory\nMUST Run in Python 3 environment!\n\njpeg Drop extra number at the end of unique ID\nadd DEL or INS based on variant type\n'''\n\n'''\nResources\n---------\nhttps://gist.github.com/seanh/93666\nhttps://www.youtube.com/watch?v=ve2pmm5JqmI\nhttps://www.youtube.com/watch?v=WQVisBzJGLw\n'''\n\n####################################################################################\n\nimport os\npath2 = '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL'\nos.chdir('/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL')\n\nfor f in os.listdir():\n\tfile_name, file_ext = os.path.splitext(f)\n\tfile_name = file_name.replace('_DEL', '')\n\t# file_name = file_name.replace(' 1', '')\n\t# file_name = file_name.replace(' 2', '')\n\t# file_name = file_name.replace(' 3', '')\n\t# file_name = file_name.replace(' 4', '')\n\t# file_name = file_name.replace(' 5', '')\n\t# file_name = file_name.replace(' 6', '')\n\t# file_name = file_name.replace(' 7', '')\n\t# file_name = file_name.replace(' 8', '')\n\t# file_name = file_name.replace(' 9', '')\n\t# file_name = file_name.replace(' 10', '')\n\t# file_name = file_name.replace(' 11', '')\n\t# file_name = file_name.replace(' 12', '')\n\tos.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg'))\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
a = int(input("Enter no. of over: ")) print("total ball:",a*6 ) import random comp_runs = random.randint(0,36) print("computer's run:" ,comp_runs) comp_runs = comp_runs+1 print("runs need to win:",comp_runs) chances_1 = a*6 no_of_chances_1 = 0 your_runs = 0 print("-----------------------------------------------\nYour Batting\n") while no_of_chances_1 < chances_1: runs = int(input("Enter Runs for Your Batting Turn: ")) comp_bowl = random.randint(1,6) if runs == comp_bowl: print("Computer Guess: ", comp_bowl) print("You are Out. Your Total Runs= ", your_runs, "\n") break elif runs > 10: print("ALERT!! Support No only till 10\n") continue else: your_runs = your_runs + runs print("Computer Guess: ", comp_bowl) print("Your runs Now are: ", your_runs, "\n") if comp_runs < your_runs: break no_of_chances_1 = no_of_chances_1 + 1 #after the over ends now result time print("\n-----------------------------------------------\nRESULTS: ") if comp_runs < your_runs: print("You won the Game.") elif comp_runs == your_runs: print("The Game is a Tie") else: print("Computer won the Game.")
normal
{ "blob_id": "00312f57e8a78444937f46cecb62a2b684b4fc91", "index": 8779, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('total ball:', a * 6)\n<mask token>\nprint(\"computer's run:\", comp_runs)\n<mask token>\nprint('runs need to win:', comp_runs)\n<mask token>\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n", "step-3": "a = int(input('Enter no. of over: '))\nprint('total ball:', a * 6)\n<mask token>\ncomp_runs = random.randint(0, 36)\nprint(\"computer's run:\", comp_runs)\ncomp_runs = comp_runs + 1\nprint('runs need to win:', comp_runs)\nchances_1 = a * 6\nno_of_chances_1 = 0\nyour_runs = 0\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n", "step-4": "a = int(input('Enter no. of over: '))\nprint('total ball:', a * 6)\nimport random\ncomp_runs = random.randint(0, 36)\nprint(\"computer's run:\", comp_runs)\ncomp_runs = comp_runs + 1\nprint('runs need to win:', comp_runs)\nchances_1 = a * 6\nno_of_chances_1 = 0\nyour_runs = 0\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n", "step-5": "a = int(input(\"Enter no. of over: \"))\r\nprint(\"total ball:\",a*6 )\r\nimport random\r\n\r\ncomp_runs = random.randint(0,36)\r\nprint(\"computer's run:\" ,comp_runs)\r\ncomp_runs = comp_runs+1\r\nprint(\"runs need to win:\",comp_runs)\r\nchances_1 = a*6\r\nno_of_chances_1 = 0\r\nyour_runs = 0\r\n\r\nprint(\"-----------------------------------------------\\nYour Batting\\n\")\r\nwhile no_of_chances_1 < chances_1:\r\n\r\n runs = int(input(\"Enter Runs for Your Batting Turn: \"))\r\n comp_bowl = random.randint(1,6)\r\n\r\n if runs == comp_bowl:\r\n print(\"Computer Guess: \", comp_bowl)\r\n print(\"You are Out. Your Total Runs= \", your_runs, \"\\n\")\r\n break\r\n elif runs > 10:\r\n print(\"ALERT!! Support No only till 10\\n\")\r\n continue\r\n else:\r\n your_runs = your_runs + runs\r\n print(\"Computer Guess: \", comp_bowl)\r\n print(\"Your runs Now are: \", your_runs, \"\\n\")\r\n if comp_runs < your_runs:\r\n break\r\n\r\n no_of_chances_1 = no_of_chances_1 + 1\r\n\r\n#after the over ends now result time\r\n\r\nprint(\"\\n-----------------------------------------------\\nRESULTS: \")\r\n\r\nif comp_runs < your_runs:\r\n print(\"You won the Game.\")\r\n\r\nelif comp_runs == your_runs:\r\n print(\"The Game is a Tie\")\r\n\r\nelse:\r\n print(\"Computer won the Game.\")\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.conf.urls import url from . import views from .import admin urlpatterns = [ url(r'^$', views.showberanda, name='showberanda'), url(r'^sentimenanalisis/$', views.showsentimenanalisis, name='showsentimenanalisis'), url(r'^bantuan/$', views.showbantuan, name='showbantuan'), url(r'^tweets/', views.get_tweets), ]
normal
{ "blob_id": "077c596f71aae22e85589fdaf78d5cdae8085443", "index": 8710, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [url('^$', views.showberanda, name='showberanda'), url(\n '^sentimenanalisis/$', views.showsentimenanalisis, name=\n 'showsentimenanalisis'), url('^bantuan/$', views.showbantuan, name=\n 'showbantuan'), url('^tweets/', views.get_tweets)]\n", "step-3": "from django.conf.urls import url\nfrom . import views\nfrom . import admin\nurlpatterns = [url('^$', views.showberanda, name='showberanda'), url(\n '^sentimenanalisis/$', views.showsentimenanalisis, name=\n 'showsentimenanalisis'), url('^bantuan/$', views.showbantuan, name=\n 'showbantuan'), url('^tweets/', views.get_tweets)]\n", "step-4": "from django.conf.urls import url\nfrom . import views\nfrom .import admin\n\nurlpatterns = [\n url(r'^$', views.showberanda, name='showberanda'),\n url(r'^sentimenanalisis/$', views.showsentimenanalisis, name='showsentimenanalisis'),\n url(r'^bantuan/$', views.showbantuan, name='showbantuan'),\n url(r'^tweets/', views.get_tweets),\n]", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os from conan import ConanFile from conan.tools.build import check_min_cppstd from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout from conan.tools.files import copy, get, replace_in_file, rmdir from conan.tools.scm import Version from conan.errors import ConanInvalidConfiguration required_conan_version = ">=1.57.0" class RuyConan(ConanFile): name = "ruy" description = "ruy is a matrix multiplication library.\n" \ "Its focus is to cover the matrix multiplication needs of neural network inference engines\n" url = "https://github.com/conan-io/conan-center-index" homepage = "https://github.com/google/ruy" license = "Apache-2.0" topics = ("matrix", "multiplication", "neural", "network", "AI", "tensorflow") settings = "os", "arch", "compiler", "build_type" options = { "shared": [True, False], "fPIC": [True, False], } default_options = { "shared": False, "fPIC": True, } @property def _minimum_compilers_version(self): return { "Visual Studio": "15", "msvc": "191", "gcc": "5", "clang": "3.4", "apple-clang": "5.1", } def validate(self): if self.settings.compiler.get_safe("cppstd"): check_min_cppstd(self, 14) minimum_version = self._minimum_compilers_version.get(str(self.settings.compiler), False) if not minimum_version: self.output.warning("Compiler is unknown. Assuming it supports C++14.") elif Version(self.settings.compiler.version) < minimum_version: raise ConanInvalidConfiguration("Build requires support for C++14. Minimum version for {} is {}" .format(str(self.settings.compiler), minimum_version)) if str(self.settings.compiler) == "clang" and Version(self.settings.compiler.version) <= 5 and self.settings.build_type == "Debug": raise ConanInvalidConfiguration("Debug builds are not supported on older versions of Clang (<=5)") def config_options(self): if self.settings.os == "Windows": self.options.rm_safe("fPIC") def configure(self): if self.options.shared: self.options.rm_safe("fPIC") def requirements(self): self.requires("cpuinfo/cci.20220228") def layout(self): cmake_layout(self, src_folder="src") def source(self): get(self, **self.conan_data["sources"][self.version], strip_root=True) def generate(self): tc = CMakeToolchain(self) tc.cache_variables["RUY_MINIMAL_BUILD"] = True tc.cache_variables["RUY_FIND_CPUINFO"] = True # Ruy public headers don't have API decorators, # export everything to support shared libraries on Windows tc.variables["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = True tc.generate() deps = CMakeDeps(self) deps.generate() def _patch_sources(self): cmakelists = os.path.join(self.source_folder, "CMakeLists.txt") patches = { #Remove the invocation after project(), see https://github.com/google/ruy/issues/328 "cmake_minimum_required(VERSION 3.13)": "", # Ensure `cmake_minimum_required` is called first "# Copyright 2021 Google LLC": "# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)", } for pattern, patch in patches.items(): replace_in_file(self, cmakelists, pattern, patch) # 1. Allow Shared builds replace_in_file(self, os.path.join(self.source_folder, "cmake", "ruy_cc_library.cmake"), "add_library(${_NAME} STATIC", "add_library(${_NAME}" ) def build(self): self._patch_sources() cmake = CMake(self) cmake.configure() cmake.build() def package(self): cmake = CMake(self) cmake.install() copy(self, "LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) def package_info(self): self.cpp_info.libs = ["ruy_frontend", "ruy_context", "ruy_trmul", "ruy_thread_pool", "ruy_blocking_counter", "ruy_prepare_packed_matrices", "ruy_ctx", "ruy_allocator", "ruy_prepacked_cache", "ruy_tune", "ruy_wait", "ruy_apply_multiplier", "ruy_block_map", "ruy_context_get_ctx", "ruy_cpuinfo", "ruy_denormal", "ruy_have_built_path_for_avx", "ruy_have_built_path_for_avx2_fma", "ruy_have_built_path_for_avx512", "ruy_kernel_arm", "ruy_kernel_avx", "ruy_kernel_avx2_fma", "ruy_kernel_avx512", "ruy_pack_arm", "ruy_pack_avx", "ruy_pack_avx2_fma", "ruy_pack_avx512", "ruy_system_aligned_alloc", "ruy_profiler_instrumentation", "ruy_profiler_profiler" ] if self.settings.os in ["Linux", "FreeBSD"]: self.cpp_info.system_libs.extend(["m", "pthread"])
normal
{ "blob_id": "fe1c499efe492dbd4f5c9b99bd6339c503c7902b", "index": 5766, "step-1": "<mask token>\n\n\nclass RuyConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n <mask token>\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass RuyConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def _minimum_compilers_version(self):\n return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':\n '3.4', 'apple-clang': '5.1'}\n\n def validate(self):\n if self.settings.compiler.get_safe('cppstd'):\n check_min_cppstd(self, 14)\n minimum_version = self._minimum_compilers_version.get(str(self.\n settings.compiler), False)\n if not minimum_version:\n self.output.warning(\n 'Compiler is unknown. Assuming it supports C++14.')\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n 'Build requires support for C++14. Minimum version for {} is {}'\n .format(str(self.settings.compiler), minimum_version))\n if str(self.settings.compiler) == 'clang' and Version(self.settings\n .compiler.version) <= 5 and self.settings.build_type == 'Debug':\n raise ConanInvalidConfiguration(\n 'Debug builds are not supported on older versions of Clang (<=5)'\n )\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.rm_safe('fPIC')\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n <mask token>\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables['RUY_MINIMAL_BUILD'] = True\n tc.cache_variables['RUY_FIND_CPUINFO'] = True\n tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True\n tc.generate()\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')\n patches = {'cmake_minimum_required(VERSION 3.13)': '',\n '# Copyright 2021 Google LLC':\n \"\"\"# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)\"\"\"\n }\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n replace_in_file(self, os.path.join(self.source_folder, 'cmake',\n 'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',\n 'add_library(${_NAME}')\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, 'LICENSE', dst=os.path.join(self.package_folder,\n 'licenses'), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))\n\n def package_info(self):\n self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',\n 'ruy_thread_pool', 'ruy_blocking_counter',\n 'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',\n 'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',\n 'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',\n 'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',\n 'ruy_have_built_path_for_avx2_fma',\n 'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',\n 'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',\n 'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',\n 'ruy_pack_avx512', 'ruy_system_aligned_alloc',\n 'ruy_profiler_instrumentation', 'ruy_profiler_profiler']\n if self.settings.os in ['Linux', 'FreeBSD']:\n self.cpp_info.system_libs.extend(['m', 'pthread'])\n", "step-3": "<mask token>\n\n\nclass RuyConan(ConanFile):\n name = 'ruy'\n description = \"\"\"ruy is a matrix multiplication library.\nIts focus is to cover the matrix multiplication needs of neural network inference engines\n\"\"\"\n url = 'https://github.com/conan-io/conan-center-index'\n homepage = 'https://github.com/google/ruy'\n license = 'Apache-2.0'\n topics = ('matrix', 'multiplication', 'neural', 'network', 'AI',\n 'tensorflow')\n settings = 'os', 'arch', 'compiler', 'build_type'\n options = {'shared': [True, False], 'fPIC': [True, False]}\n default_options = {'shared': False, 'fPIC': True}\n\n @property\n def _minimum_compilers_version(self):\n return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':\n '3.4', 'apple-clang': '5.1'}\n\n def validate(self):\n if self.settings.compiler.get_safe('cppstd'):\n check_min_cppstd(self, 14)\n minimum_version = self._minimum_compilers_version.get(str(self.\n settings.compiler), False)\n if not minimum_version:\n self.output.warning(\n 'Compiler is unknown. Assuming it supports C++14.')\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n 'Build requires support for C++14. Minimum version for {} is {}'\n .format(str(self.settings.compiler), minimum_version))\n if str(self.settings.compiler) == 'clang' and Version(self.settings\n .compiler.version) <= 5 and self.settings.build_type == 'Debug':\n raise ConanInvalidConfiguration(\n 'Debug builds are not supported on older versions of Clang (<=5)'\n )\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.rm_safe('fPIC')\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n\n def layout(self):\n cmake_layout(self, src_folder='src')\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables['RUY_MINIMAL_BUILD'] = True\n tc.cache_variables['RUY_FIND_CPUINFO'] = True\n tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True\n tc.generate()\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')\n patches = {'cmake_minimum_required(VERSION 3.13)': '',\n '# Copyright 2021 Google LLC':\n \"\"\"# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)\"\"\"\n }\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n replace_in_file(self, os.path.join(self.source_folder, 'cmake',\n 'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',\n 'add_library(${_NAME}')\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, 'LICENSE', dst=os.path.join(self.package_folder,\n 'licenses'), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))\n\n def package_info(self):\n self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',\n 'ruy_thread_pool', 'ruy_blocking_counter',\n 'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',\n 'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',\n 'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',\n 'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',\n 'ruy_have_built_path_for_avx2_fma',\n 'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',\n 'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',\n 'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',\n 'ruy_pack_avx512', 'ruy_system_aligned_alloc',\n 'ruy_profiler_instrumentation', 'ruy_profiler_profiler']\n if self.settings.os in ['Linux', 'FreeBSD']:\n self.cpp_info.system_libs.extend(['m', 'pthread'])\n", "step-4": "<mask token>\nrequired_conan_version = '>=1.57.0'\n\n\nclass RuyConan(ConanFile):\n name = 'ruy'\n description = \"\"\"ruy is a matrix multiplication library.\nIts focus is to cover the matrix multiplication needs of neural network inference engines\n\"\"\"\n url = 'https://github.com/conan-io/conan-center-index'\n homepage = 'https://github.com/google/ruy'\n license = 'Apache-2.0'\n topics = ('matrix', 'multiplication', 'neural', 'network', 'AI',\n 'tensorflow')\n settings = 'os', 'arch', 'compiler', 'build_type'\n options = {'shared': [True, False], 'fPIC': [True, False]}\n default_options = {'shared': False, 'fPIC': True}\n\n @property\n def _minimum_compilers_version(self):\n return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':\n '3.4', 'apple-clang': '5.1'}\n\n def validate(self):\n if self.settings.compiler.get_safe('cppstd'):\n check_min_cppstd(self, 14)\n minimum_version = self._minimum_compilers_version.get(str(self.\n settings.compiler), False)\n if not minimum_version:\n self.output.warning(\n 'Compiler is unknown. Assuming it supports C++14.')\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n 'Build requires support for C++14. Minimum version for {} is {}'\n .format(str(self.settings.compiler), minimum_version))\n if str(self.settings.compiler) == 'clang' and Version(self.settings\n .compiler.version) <= 5 and self.settings.build_type == 'Debug':\n raise ConanInvalidConfiguration(\n 'Debug builds are not supported on older versions of Clang (<=5)'\n )\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.rm_safe('fPIC')\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n\n def layout(self):\n cmake_layout(self, src_folder='src')\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables['RUY_MINIMAL_BUILD'] = True\n tc.cache_variables['RUY_FIND_CPUINFO'] = True\n tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True\n tc.generate()\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')\n patches = {'cmake_minimum_required(VERSION 3.13)': '',\n '# Copyright 2021 Google LLC':\n \"\"\"# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)\"\"\"\n }\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n replace_in_file(self, os.path.join(self.source_folder, 'cmake',\n 'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',\n 'add_library(${_NAME}')\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, 'LICENSE', dst=os.path.join(self.package_folder,\n 'licenses'), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))\n\n def package_info(self):\n self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',\n 'ruy_thread_pool', 'ruy_blocking_counter',\n 'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',\n 'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',\n 'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',\n 'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',\n 'ruy_have_built_path_for_avx2_fma',\n 'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',\n 'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',\n 'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',\n 'ruy_pack_avx512', 'ruy_system_aligned_alloc',\n 'ruy_profiler_instrumentation', 'ruy_profiler_profiler']\n if self.settings.os in ['Linux', 'FreeBSD']:\n self.cpp_info.system_libs.extend(['m', 'pthread'])\n", "step-5": "import os\nfrom conan import ConanFile\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import copy, get, replace_in_file, rmdir\nfrom conan.tools.scm import Version\nfrom conan.errors import ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.57.0\"\n\n\nclass RuyConan(ConanFile):\n name = \"ruy\"\n description = \"ruy is a matrix multiplication library.\\n\" \\\n \"Its focus is to cover the matrix multiplication needs of neural network inference engines\\n\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/google/ruy\"\n license = \"Apache-2.0\"\n topics = (\"matrix\", \"multiplication\", \"neural\", \"network\", \"AI\", \"tensorflow\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _minimum_compilers_version(self):\n return {\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\", \n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"5.1\",\n }\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 14)\n\n minimum_version = self._minimum_compilers_version.get(str(self.settings.compiler), False)\n if not minimum_version:\n self.output.warning(\"Compiler is unknown. Assuming it supports C++14.\")\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\"Build requires support for C++14. Minimum version for {} is {}\"\n .format(str(self.settings.compiler), minimum_version))\n\n if str(self.settings.compiler) == \"clang\" and Version(self.settings.compiler.version) <= 5 and self.settings.build_type == \"Debug\":\n raise ConanInvalidConfiguration(\"Debug builds are not supported on older versions of Clang (<=5)\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n self.options.rm_safe(\"fPIC\")\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def requirements(self):\n self.requires(\"cpuinfo/cci.20220228\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables[\"RUY_MINIMAL_BUILD\"] = True\n tc.cache_variables[\"RUY_FIND_CPUINFO\"] = True\n # Ruy public headers don't have API decorators,\n # export everything to support shared libraries on Windows\n tc.variables[\"CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS\"] = True\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, \"CMakeLists.txt\")\n patches = {\n #Remove the invocation after project(), see https://github.com/google/ruy/issues/328\n \"cmake_minimum_required(VERSION 3.13)\": \"\",\n # Ensure `cmake_minimum_required` is called first \n \"# Copyright 2021 Google LLC\": \"# Copyright 2021 Google LLC\\ncmake_minimum_required(VERSION 3.13)\", \n }\n\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n\n # 1. Allow Shared builds\n replace_in_file(self, os.path.join(self.source_folder, \"cmake\", \"ruy_cc_library.cmake\"),\n \"add_library(${_NAME} STATIC\",\n \"add_library(${_NAME}\"\n )\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, \"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n def package_info(self):\n self.cpp_info.libs = [\"ruy_frontend\",\n \"ruy_context\",\n \"ruy_trmul\",\n \"ruy_thread_pool\",\n \"ruy_blocking_counter\",\n \"ruy_prepare_packed_matrices\",\n \"ruy_ctx\",\n \"ruy_allocator\",\n \"ruy_prepacked_cache\",\n \"ruy_tune\",\n \"ruy_wait\",\n \"ruy_apply_multiplier\",\n \"ruy_block_map\",\n \"ruy_context_get_ctx\",\n \"ruy_cpuinfo\",\n \"ruy_denormal\",\n \"ruy_have_built_path_for_avx\",\n \"ruy_have_built_path_for_avx2_fma\",\n \"ruy_have_built_path_for_avx512\",\n \"ruy_kernel_arm\",\n \"ruy_kernel_avx\",\n \"ruy_kernel_avx2_fma\",\n \"ruy_kernel_avx512\",\n \"ruy_pack_arm\",\n \"ruy_pack_avx\",\n \"ruy_pack_avx2_fma\",\n \"ruy_pack_avx512\",\n \"ruy_system_aligned_alloc\",\n \"ruy_profiler_instrumentation\",\n \"ruy_profiler_profiler\"\n ]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n", "step-ids": [ 4, 12, 14, 15, 17 ] }
[ 4, 12, 14, 15, 17 ]
# Generated by Django 2.1.2 on 2018-11-05 12:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('PDPAPI', '0011_auto_20181105_1021'), ] operations = [ migrations.RemoveField( model_name='optionvoting', name='totalVotes', ), migrations.AddField( model_name='mcqoption', name='totalVotes', field=models.IntegerField(default=0), ), ]
normal
{ "blob_id": "53519c704ca9aff62140f187d4246208350fa9ba", "index": 4610, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PDPAPI', '0011_auto_20181105_1021')]\n operations = [migrations.RemoveField(model_name='optionvoting', name=\n 'totalVotes'), migrations.AddField(model_name='mcqoption', name=\n 'totalVotes', field=models.IntegerField(default=0))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PDPAPI', '0011_auto_20181105_1021')]\n operations = [migrations.RemoveField(model_name='optionvoting', name=\n 'totalVotes'), migrations.AddField(model_name='mcqoption', name=\n 'totalVotes', field=models.IntegerField(default=0))]\n", "step-5": "# Generated by Django 2.1.2 on 2018-11-05 12:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PDPAPI', '0011_auto_20181105_1021'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='optionvoting',\n name='totalVotes',\n ),\n migrations.AddField(\n model_name='mcqoption',\n name='totalVotes',\n field=models.IntegerField(default=0),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]